code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from .config import _cfg, _cfgi
engine = create_engine(_cfg('connection-string'))
db = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db.query_property()
def init_db():
import truecraft.objects
Base.metadata.create_all(bind=engine)
| MaxLeiter/truecraft.io | truecraft/database.py | Python | mit | 461 |
from setuptools import setup
setup(
name='djmicro',
version='0.0.2',
author='Andrew Pendleton',
py_modules = ['djmicro'],
install_requires=['Django>=1.6'],
)
| apendleton/djmicro | setup.py | Python | bsd-3-clause | 178 |
from __future__ import division, print_function, unicode_literals
import re, collections
import hindkit as kit
SCRIPT_PREFIX = 'dv'
STEM_ANCHOR_NAMES = ['abvm.e', 'abvm']
def glyph_filter_matra_i_alts(glyph):
return glyph.name.startswith(SCRIPT_PREFIX + 'mI.alt')
def glyph_filter_bases_for_matra_i(glyph):
name = glyph.name
is_filted = False
if name.startswith(SCRIPT_PREFIX):
name = name[2:]
if name.endswith('.MAR'):
name = name[:-4]
if name.endswith('.traditional'):
name = name[:-12]
if name.endswith('.simplified'):
name = name[:-11]
is_filted = name in POTENTIAL_BASES_FOR_MATRA_I
return is_filted
def glyph_filter_bases_for_wide_matra_ii(glyph):
name = glyph.name
is_filted = False
if name.startswith(SCRIPT_PREFIX) and SCRIPT_PREFIX == 'dv':
name = name[2:]
is_filted = name in POTENTIAL_BASES_FOR_WIDE_MATRA_II
return is_filted
def get_stem_position(glyph, stem_right_margin):
has_stem_anchor = False
for anchor in glyph.anchors:
if anchor.name in STEM_ANCHOR_NAMES:
has_stem_anchor = True
stem_anchor = anchor
break
if has_stem_anchor:
stem_position = stem_anchor.x
else:
stem_position = glyph.width - stem_right_margin
return stem_position
def restore_abvm_content(abvm_content):
if re.search(
r'# lookup MARK_BASE_abvm.i \{',
abvm_content
):
abvm_content = re.sub(
r'(?m)\n\n\n^lookup MARK_BASE_abvm.i \{\n(^.+\n)+^\} MARK_BASE_abvm.i;',
r'',
abvm_content
)
commented_abvm_lookup = re.search(
r'(?m)^# lookup MARK_BASE_abvm.i \{\n(^# .+\n)+^# \} MARK_BASE_abvm.i;',
abvm_content
).group()
uncommented_abvm_lookup = '\n'.join([
line[2:] for line in commented_abvm_lookup.splitlines()
])
original_abvm_content = abvm_content.replace(
commented_abvm_lookup,
uncommented_abvm_lookup
)
else:
original_abvm_content = abvm_content
return original_abvm_content
def write_mI_matches_to_files(directory, mI_table, long_base_names):
with open(directory + '/abvm.fea', 'r') as f:
abvm_content = f.read()
original_abvm_content = restore_abvm_content(abvm_content)
original_abvm_lookup = re.search(
r'(?m)^lookup MARK_BASE_abvm.i {\n(.+\n)+^} MARK_BASE_abvm.i;',
original_abvm_content
).group()
modified_abvm_lookup = original_abvm_lookup.replace(
'pos base {}mI.alt'.format(SCRIPT_PREFIX),
'pos base @generated_MATRA_I_BASES_'
)
Reph_positioning_offset = mI_table[0].glyph.width
class_def_lines = []
class_def_lines.extend(
kit.builder.compose_glyph_class_def_lines('generated_MATRA_I_BASES_TOO_LONG', long_base_names)
)
substitute_rule_lines = []
lookup_name = 'GENERATED_matra_i_matching'
substitute_rule_lines.append('lookup {} {{'.format(lookup_name))
for mI in mI_table:
mI_number = mI.glyph.name[-2:]
to_comment_substitute_rule = False
if not mI.matches:
print('\t `%s` is not used.' % mI.glyph.name)
to_comment_substitute_rule = True
modified_abvm_lookup = modified_abvm_lookup.replace(
'\tpos base @generated_MATRA_I_BASES_' + mI_number,
'#\tpos base @generated_MATRA_I_BASES_' + mI_number
)
locator = '@generated_MATRA_I_BASES_%s <anchor ' % mI_number
search_result = re.search(
locator + r'\-?\d+',
modified_abvm_lookup
)
if search_result:
x = search_result.group().split(' ')[-1]
modified_x = str(int(x) - Reph_positioning_offset)
modified_abvm_lookup = modified_abvm_lookup.replace(
locator + x,
locator + modified_x,
)
else:
print("\t[!] `%s` doesn't have the anchor for Reph." % mI.glyph.name)
class_def_lines.extend(
kit.builder.compose_glyph_class_def_lines(
'generated_MATRA_I_BASES_' + mI_number,
mI.matches
)
)
substitute_rule_lines.append(
"{}sub {}mI' @generated_MATRA_I_BASES_{} by {};".format(
'# ' if to_comment_substitute_rule else ' ',
SCRIPT_PREFIX,
mI_number,
mI.glyph.name
)
)
substitute_rule_lines.append('}} {};'.format(lookup_name))
commented_original_abvm_lookup = '# ' + original_abvm_lookup.replace('\n', '\n# ')
modified_abvm_content = original_abvm_content.replace(
original_abvm_lookup,
commented_original_abvm_lookup + '\n\n\n' + modified_abvm_lookup
)
with open(directory + '/abvm.fea', 'w') as f:
f.write(modified_abvm_content)
with open(directory + '/matra_i_matching.fea', 'w') as f:
result_lines = (
['# CLASSES', ''] + class_def_lines +
['# RULES', ''] + substitute_rule_lines
)
f.write('\n'.join(result_lines) + '\n')
def match_matra_i_alts(style, offset_range = (0, 0)):
font = style.open_font()
mI_list = [font[glyph_name] for glyph_name in sorted(font.groups['generated_MATRA_I_ALTS'])]
base_list = [font[glyph_name] for glyph_name in font.groups['generated_BASES_FOR_MATRA_I']]
MatchRow = collections.namedtuple('MatchRow', 'glyph, stretch, matches')
mI_table = [
MatchRow(
glyph = mI,
stretch = abs(mI.rightMargin),
matches = []
) for mI in mI_list
]
for anchor in font[SCRIPT_PREFIX + 'mE'].anchors:
if anchor.name in ['_' + name for name in STEM_ANCHOR_NAMES]:
stem_right_margin = abs(anchor.x)
break
else:
print("Error: Can't find the stem anchor in glyph `mE`!")
tolerance_of_mI_stretch_shormI_numbere = (font[SCRIPT_PREFIX + 'VA'].width - stem_right_margin) / 2
long_base_names = []
stem_positions = [get_stem_position(b, stem_right_margin) for b in base_list]
stem_position_min = min(stem_positions)
stem_position_max = max(stem_positions)
stem_positions_with_offset = []
for stem_position in stem_positions:
ratio = (stem_position - stem_position_min) / (stem_position_max - stem_position_min)
adjusted_offset = offset_range[0] + (offset_range[-1] - offset_range[0]) * ratio
stem_position_with_offset = stem_position + int(adjusted_offset)
stem_positions_with_offset.append(stem_position_with_offset)
for i, base in enumerate(base_list):
base_name = base.name
stem_position = stem_positions_with_offset[i]
if stem_position < mI_table[0].stretch:
mI_table[0].matches.append(base_name)
elif stem_position >= mI_table[-1].stretch:
if stem_position < mI_table[-1].stretch + tolerance_of_mI_stretch_shormI_numbere:
mI_table[-1].matches.append(base_name)
else:
long_base_names.append(base_name)
else:
for index, mI in enumerate(mI_table):
if stem_position < mI.stretch:
if mI.stretch - stem_position < abs(mI_table[index - 1].stretch - stem_position):
mI.matches.append(base_name)
else:
mI_table[index - 1].matches.append(base_name)
break
write_mI_matches_to_files(style.directory, mI_table, long_base_names)
POTENTIAL_BASES_FOR_WIDE_MATRA_II = '''\
KA
PHA
KxA
PHxA
K_RA
PH_RA
Kx_RA
PHx_RA
J_KA
K_KA
K_PHA
Kx_KxA
Kx_PHA
Kx_PHxA
L_KA
L_PHA
N_KA
N_PHA
N_PH_RA
PH_PHA
PHx_PHxA
P_PHA
SH_KA
SH_KxA
SS_KA
SS_K_RA
SS_PHA
S_KA
S_K_RA
S_PHA
T_KA
T_K_RA
T_PHA
K_TA.traditional
'''.splitlines()
POTENTIAL_BASES_FOR_MATRA_I = '''\
KA
KHA
GA
GHA
NGA
CA
CHA
JA
JHA
NYA
TTA
TTHA
DDA
DDHA
NNA
TA
THA
DA
DHA
NA
PA
PHA
BA
BHA
MA
YA
RA
LA
VA
SHA
SSA
SA
HA
LLA
K_SSA
J_NYA
KxA
KHxA
GxA
GHxA
NGxA
CxA
CHxA
JxA
JHxA
NYxA
TTxA
TTHxA
DDxA
DDHxA
NNxA
TxA
THxA
DxA
DHxA
NxA
PxA
PHxA
BxA
BHxA
MxA
YxA
RxA
LxA
VxA
SHxA
SSxA
SxA
HxA
LLxA
GAbar
JAbar
DDAbar
BAbar
ZHA
YAheavy
DDAmarwari
K_RA
KH_RA
G_RA
GH_RA
NG_RA
C_RA
CH_RA
J_RA
JH_RA
NY_RA
TT_RA
TTH_RA
DD_RA
DDH_RA
NN_RA
T_RA
TH_RA
D_RA
DH_RA
N_RA
P_RA
PH_RA
B_RA
BH_RA
M_RA
Y_RA
L_RA
V_RA
SH_RA
SS_RA
S_RA
H_RA
LL_RA
K_SS_RA
J_NY_RA
Kx_RA
KHx_RA
Gx_RA
GHx_RA
NGx_RA
Cx_RA
CHx_RA
Jx_RA
JHx_RA
NYx_RA
TTx_RA
TTHx_RA
DDx_RA
DDHx_RA
NNx_RA
Tx_RA
THx_RA
Dx_RA
DHx_RA
Nx_RA
Px_RA
PHx_RA
Bx_RA
BHx_RA
Mx_RA
Yx_RA
Lx_RA
Vx_RA
SHx_RA
SSx_RA
Sx_RA
Hx_RA
LLx_RA
K_KA
Kx_KxA
K_KHA
K_CA
K_JA
K_TTA
K_TT_RA
K_NNA
K_TA
Kx_TA
K_T_YA
K_T_RA
K_T_VA
K_THA
K_DA
K_NA
Kx_NA
K_PA
K_P_RA
K_PHA
Kx_PHA
Kx_PHxA
K_BA
Kx_BA
K_MA
Kx_MA
K_YA
K_LA
K_VA
K_V_YA
K_SHA
Kx_SHA
K_SS_MA
K_SS_M_YA
K_SS_YA
K_SS_VA
K_SA
K_S_TTA
K_S_DDA
K_S_TA
K_S_P_RA
K_S_P_LA
KH_KHA
KH_TA
KHx_TA
KH_NA
KHx_NA
KH_MA
KHx_MA
KH_YA
KHx_YA
KH_VA
KHx_VA
KH_SHA
KHx_SHA
KHx_SA
G_GA
G_GHA
G_JA
G_NNA
G_DA
G_DHA
G_DH_YA
G_DH_VA
G_NA
Gx_NA
G_N_YA
G_BA
G_BHA
G_BH_YA
G_MA
G_YA
G_R_YA
G_LA
G_VA
G_SA
GH_NA
GH_MA
GH_YA
NG_KA
NG_KHA
NG_GA
NG_GHA
NG_NGA
NG_YA
NG_VA
C_CA
C_CHA
C_CH_VA
C_NA
C_MA
C_YA
CH_YA
CH_R_YA
CH_VA
J_KA
J_JA
Jx_JxA
J_J_NYA
J_J_YA
J_J_VA
J_JHA
J_NY_YA
J_TTA
J_DDA
J_TA
J_DA
J_NA
Jx_NA
J_BA
J_MA
J_YA
Jx_YA
J_VA
JH_NA
JH_MA
JH_YA
NY_CA
NY_CHA
NY_JA
NY_NYA
NY_SHA
TT_TTA
TT_TTHA
TT_YA
TT_VA
TTH_TTHA
TTH_YA
TTH_VA
DD_DDA
DD_DDHA
DD_YA
DD_VA
DDH_DDHA
DDH_YA
DDH_VA
NN_TTA
NN_TTHA
NN_DDA
NN_DDHA
NN_NNA
NN_MA
NN_YA
NN_VA
T_KA
T_K_YA
T_K_RA
T_K_VA
T_K_SSA
T_KHA
T_KH_NA
T_KH_RA
T_TA
T_T_YA
T_T_VA
T_THA
T_NA
T_N_YA
T_PA
T_P_RA
T_P_LA
T_PHA
T_MA
T_M_YA
T_YA
T_R_YA
T_LA
T_VA
T_SA
T_S_NA
T_S_YA
T_S_VA
TH_NA
TH_YA
TH_VA
D_GA
D_G_RA
D_GHA
D_DA
D_DHA
D_DH_YA
D_NA
D_BA
D_B_RA
D_BHA
D_BH_YA
D_MA
D_YA
D_R_YA
D_VA
D_V_YA
DH_NA
DH_N_YA
DH_MA
DH_YA
DH_VA
N_KA
N_K_SA
N_CA
N_CHA
N_JA
N_TTA
N_DDA
N_TA
N_T_YA
N_T_RA
N_T_SA
N_THA
N_TH_YA
N_TH_VA
N_DA
N_D_RA
N_D_VA
N_DHA
N_DH_YA
N_DH_RA
N_DH_VA
N_NA
N_N_YA
N_PA
N_P_RA
N_PHA
N_PH_RA
N_BHA
N_BH_YA
N_BH_VA
N_MA
N_M_YA
N_YA
N_VA
N_SHA
N_SA
N_S_TTA
N_S_M_YA
N_S_YA
N_HA
P_CA
P_TTA
P_TTHA
P_TA
P_T_YA
P_NA
P_PA
P_PHA
P_MA
P_YA
P_LA
P_VA
P_SHA
P_SA
PH_JA
PHx_JxA
PH_TTA
PHx_TTA
PH_TA
PHx_TA
PH_NA
PHx_NA
PH_PA
PH_PHA
PHx_PHxA
PH_YA
PH_LA
PH_SHA
PHx_SA
B_JA
B_JxA
B_J_YA
B_JHA
B_TA
B_DA
B_DHA
B_DH_VA
B_NA
B_BA
B_BHA
B_BH_RA
B_YA
B_LA
B_L_YA
B_VA
B_SHA
B_SA
BH_NA
BH_YA
BH_R_YA
BH_LA
BH_VA
M_TA
M_DA
M_NA
M_PA
M_P_RA
M_BA
M_B_YA
M_B_RA
M_BHA
M_BH_YA
M_BH_RA
M_BH_VA
M_MA
M_YA
M_LA
M_VA
M_SHA
M_SA
M_HA
Y_NA
Y_YA
Eyelash_YA
Eyelash_HA
L_KA
L_K_YA
L_KHA
L_GA
L_CA
L_JA
L_JxA
L_TTA
L_TTHA
L_DDA
L_DDHA
L_TA
L_THA
L_TH_YA
L_DA
L_D_RA
L_NA
L_PA
L_PHA
L_BA
L_BHA
L_MA
L_YA
L_LA
L_L_YA
L_VA
L_V_DDA
L_SA
L_HA
V_NA
V_YA
V_LA
V_VA
V_HA
SH_KA
SH_KxA
SH_CA
SH_CHA
SH_TTA
SH_TA
SH_NA
SH_MA
SH_YA
SH_LA
SH_VA
SH_SHA
SS_KA
SS_K_RA
SS_TTA
SS_TT_YA
SS_TT_RA
SS_TT_VA
SS_TTHA
SS_TTH_YA
SS_TTH_RA
SS_NNA
SS_NN_YA
SS_NA
SS_PA
SS_P_RA
SS_PHA
SS_MA
SS_M_YA
SS_YA
SS_VA
SS_SSA
S_KA
S_K_RA
S_K_VA
S_KHA
S_JA
S_TTA
S_TA
S_T_YA
S_T_RA
S_T_VA
S_THA
S_TH_YA
S_DA
S_NA
S_PA
S_P_RA
S_PHA
S_BA
S_MA
S_M_YA
S_YA
S_LA
S_VA
S_SA
H_NNA
H_NA
H_MA
H_YA
H_LA
H_VA
LL_YA
NG_NA
NG_MA
CH_NA
TT_NA
TTH_NA
DD_NA
'''.splitlines()
| mooniak/hindkit | hindkit/scripts/devanagari.py | Python | mit | 11,394 |
"""Test txstripe."""
from mock import patch, Mock
from twisted.trial.unittest import TestCase
from twisted.internet import defer
class BaseTest(TestCase):
"""Default settings for all tests."""
def _json_mock(self):
return self.mocked_resp
def _request_mock(self, *args, **kwargs):
return defer.succeed(self.resp_mock)
def setUp(self):
self._mocked_resp = {}
self.resp_mock = Mock()
self.resp_mock.json = self._json_mock
treq_patch = patch('txstripe.resource.treq')
self.treq_mock = treq_patch.start()
self.treq_mock.request.side_effect = self._request_mock
import txstripe
txstripe.api_key = 'ABC123'
self.txstripe = txstripe
| lextoumbourou/txstripe | txstripe/test/__init__.py | Python | mit | 741 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_encap_pool
short_description: Manage encap pools on Cisco ACI fabrics (fvns:VlanInstP, fvns:VxlanInstP, fvns:VsanInstP)
description:
- Manage vlan, vxlan, and vsan pools on Cisco ACI fabrics.
- More information from the internal APIC class
I(fvns:VlanInstP), I(fvns:VxlanInstP), and I(fvns:VsanInstP) at
U(https://developer.cisco.com/site/aci/docs/apis/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.5'
options:
allocation_mode:
description:
- The method used for allocating encaps to resources.
- Only vlan and vsan support allocation modes.
aliases: [ mode ]
choices: [ dynamic, static]
description:
description:
- Description for the C(pool).
aliases: [ descr ]
pool:
description:
- The name of the pool.
aliases: [ name, pool_name ]
pool_type:
description:
- The encap type of C(pool).
required: yes
aliases: [ type ]
choices: [ vlan, vxlan, vsan]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
description: Production VLANs
state: present
- name: Remove a vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: absent
- name: Query a vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: query
- name: Query all vlan pools
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool_type: vlan
state: query
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
ACI_MAPPING = dict(
vlan=dict(
aci_class='fvnsVlanInstP',
aci_mo='infra/vlanns-',
),
vxlan=dict(
aci_class='fvnsVxlanInstP',
aci_mo='infra/vxlanns-',
),
vsan=dict(
aci_class='fvnsVsanInstP',
aci_mo='infra/vsanns-',
),
)
def main():
argument_spec = aci_argument_spec
argument_spec.update(
allocation_mode=dict(type='str', aliases=['mode'], choices=['dynamic', 'static']),
description=dict(type='str', aliases=['descr']),
pool=dict(type='str', aliases=['name', 'pool_name']),
pool_type=dict(type='str', aliases=['type'], choices=['vlan', 'vxlan', 'vsan'], required=True),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['pool']],
['state', 'present', ['pool']],
],
)
allocation_mode = module.params['allocation_mode']
description = module.params['description']
pool = module.params['pool']
pool_type = module.params['pool_type']
state = module.params['state']
aci_class = ACI_MAPPING[pool_type]["aci_class"]
aci_mo = ACI_MAPPING[pool_type]["aci_mo"]
pool_name = pool
# ACI Pool URL requires the allocation mode for vlan and vsan pools (ex: uni/infra/vlanns-[poolname]-static)
if pool_type != 'vxlan' and pool is not None:
if allocation_mode is not None:
pool_name = '[{0}]-{1}'.format(pool, allocation_mode)
else:
module.fail_json(msg='ACI requires the "allocation_mode" for "pool_type" of "vlan" and "vsan" when the "pool" is provided')
# Vxlan pools do not support allocation modes
if pool_type == 'vxlan' and allocation_mode is not None:
module.fail_json(msg='vxlan pools do not support setting the allocation_mode; please remove this parameter from the task')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_mo, pool_name),
filter_target='eq({0}.name, "{1}")'.format(aci_class, pool),
module_object=pool,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class=aci_class,
class_config=dict(
allocMode=allocation_mode,
descr=description,
name=pool,
)
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class=aci_class)
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| le9i0nx/ansible | lib/ansible/modules/network/aci/aci_encap_pool.py | Python | gpl-3.0 | 5,480 |
from fake_switches.switch_configuration import Port, AggregatedPort
_unique_port_index = 20000
def _unique_port():
global _unique_port_index
_unique_port_index += 1
return _unique_port_index
def _juniper_ports_with_less_ae():
return [Port("ge-0/0/{}".format(i)) for i in range(1, 5)] + \
[AggregatedPort("ae{}".format(i)) for i in range(1, 5)]
| internap/fake-switches | tests/util/__init__.py | Python | apache-2.0 | 376 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import win32api
import win32con
import winerror
import win32gui_struct
import win32ui
from win32com.shell import shell, shellcon
try:
import winxpgui as win32gui
except ImportError:
import win32gui
import struct, array
import commctrl
IDC_SEARCHTEXT = 1024
IDC_BUTTON_CANCEL = 1025
IDC_BUTTON_OK = 1026
desktop_pidl = shell.SHGetFolderLocation (0, shellcon.CSIDL_DESKTOP, 0, 0)
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.iteritems():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, unicode):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
def MakeLoginDlgTemplate(title):
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
# Window frame and title
dlg = [[title, (0, 0, 184, 40), style, None, (8, "MS Sans Serif")], ]
# ID label and text box
dlg.append([130, "User ID:", -1, (7, 9, 69, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, win32ui.IDC_EDIT1, (50, 7, 60, 12), s])
# Password label and text box
dlg.append([130, "Password:", -1, (7, 22, 69, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, win32ui.IDC_EDIT2, (50, 20, 60, 12),
s | win32con.ES_PASSWORD])
# OK/Cancel Buttons
s = cs | win32con.WS_TABSTOP
dlg.append([128, "OK", win32con.IDOK, (124, 5, 50, 14),
s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Cancel", win32con.IDCANCEL, (124, 20, 50, 14), s])
return dlg
def MakePasswordDlgTemplate(title):
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
# Window frame and title
dlg = [[title, (0, 0, 177, 45), style, None, (8, "MS Sans Serif")], ]
# Password label and text box
dlg.append([130, "Password:", -1, (7, 7, 69, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, win32ui.IDC_EDIT1, (50, 7, 60, 12),
s | win32con.ES_PASSWORD])
# OK/Cancel Buttons
s = cs | win32con.WS_TABSTOP | win32con.BS_PUSHBUTTON
dlg.append([128, "OK", win32con.IDOK, (124, 5, 50, 14),
s | win32con.BS_DEFPUSHBUTTON])
dlg.append([128, "Cancel", win32con.IDCANCEL, (124, 22, 50, 14), s])
return dlg
class InputDialog(object):
def __init__(self, title='Input Dialog', message='Enter text:'):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
self.className = "AvaInputDialog"
self.title = title
self.message = message
self.hwnd = None
self.value = None
def _RegisterWndClass(self):
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = self.className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app = win32api.GetModuleHandle(None)
try:
wc.hIcon = win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return self.className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
# Window frame and title
dlg = [ [self.title, (0, 0, 210, 60), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, self.message, -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Cancel", IDC_BUTTON_CANCEL, (100, 35, 50, 14), s])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "OK", IDC_BUTTON_OK, (100, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
# self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l, b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_OK)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnNotify(self, hwnd, msg, wparam, lparam):
info = win32gui_struct.UnpackNMITEMACTIVATE(lparam)
if info.code == commctrl.NM_DBLCLK:
print("Double click on item", info.iItem+1)
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_CANCEL:
print("Cancel button pressed.")
win32gui.EndDialog(hwnd, 0)
elif id == IDC_BUTTON_OK:
print("Ok button pressed")
self.value = win32gui.GetDlgItemText(self.hwnd, IDC_SEARCHTEXT)
print(self.value)
win32gui.EndDialog(hwnd, 1)
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
def DoModal(self):
if self._DoCreate(win32gui.DialogBoxIndirect):
return self.value
else:
return None
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def getTextInput(title='Input Dialog', message='Enter text:'):
w=InputDialog(title, message)
return w.DoModal()
def chooseOpenFolder():
"""
Open a dialog for user to choose a folder/directory.
:return: the path to the folder, or None if not selected.
"""
pidl, display_name, image_list = shell.SHBrowseForFolder (
win32gui.GetDesktopWindow (),
desktop_pidl,
"Choose a folder",
0,
None,
None
)
if pidl is None:
return None
return shell.SHGetPathFromIDList(pidl)
# Test code
if __name__ == '__main__':
# print(chooseOpenFolder())
print(getTextInput())
| nickchen-mitac/fork | src/avashell/win32/simpledialog.py | Python | apache-2.0 | 11,791 |
#!/usr/local/bin/python
'''PSwAaDS 1.12 Self Check - Inf Monkeys w/ Hill Climb'''
import random as rand
TARGET = 'methinks it is like a weasel'
def generate(instr, matchset):
'''generate based on matchset'''
outstr = ''
for i, _ in enumerate(TARGET):
if i in matchset:
outstr += instr[i]
else:
val = rand.randint(0, 26)
if val == 26:
outstr += ' '
else:
outstr += chr(val+ord('a'))
return outstr
def check(instr):
'''check and update matchset'''
matchlist = []
for i, _ in enumerate(TARGET):
if instr[i] == TARGET[i]:
matchlist.append(i)
return matchlist
def main():
'''main area'''
done = False
ctr = 0
workingstr = ''
matchlist = []
while not done:
workingstr = generate(workingstr, matchlist)
matchlist = check(workingstr)
ctr = ctr + 1
print str(ctr), str(len(matchlist)), workingstr
if len(matchlist) == len(TARGET):
print 'Perfect match after', ctr, 'iterations'
return
main()
| jonjon33/sandbox | python/dsbook/1.12-func/hcmonkey.py | Python | mit | 1,125 |
""" conf test cases """
import unittest
from zk_shell.conf import Conf, ConfVar
class ConfTestCase(unittest.TestCase):
""" test conf code """
def setUp(self):
""" nothing for now """
pass
def test_conf(self):
""" basic tests """
conf = Conf(
ConfVar(
"foo",
"A foo variable",
10
),
ConfVar(
"bar",
"A bar variable",
"some value"
)
)
self.assertEqual(conf.get_int("foo"), 10)
self.assertEqual(conf.get_str("bar"), "some value")
self.assertEqual(len(list(conf.get_all())), 2)
| harlowja/zk_shell | zk_shell/tests/test_conf.py | Python | apache-2.0 | 701 |
from .__about__ import __version__
__all__ = ['__version__']
| DataDog/integrations-extras | resin/datadog_checks/resin/__init__.py | Python | bsd-3-clause | 62 |
from __future__ import absolute_import, unicode_literals
import datetime
import json
import logging
import requests
import six
from abc import ABCMeta, abstractmethod
from . import __version__, CLIENT_NAME
from .utils import format_iso8601, parse_iso8601
logger = logging.getLogger(__name__)
# =====================================================================
# Exceptions
# =====================================================================
class TembaException(Exception):
def __unicode__(self): # pragma: no cover
return self.message
def __str__(self):
return str(self.__unicode__())
class TembaNoSuchObjectError(TembaException):
message = "Request for single object returned no objects"
class TembaMultipleResultsError(TembaException):
message = "Request for single object returned multiple objects"
class TembaAPIError(TembaException):
"""
Errors returned by the Temba API
"""
message = "API request error"
def __init__(self, caused_by):
self.caused_by = caused_by
self.errors = {}
# if error was caused by a HTTP 400 response, we may have a useful validation error
if isinstance(caused_by, requests.HTTPError) and caused_by.response.status_code == 400:
try:
self.errors = caused_by.response.json()
except ValueError:
self.errors = {'non_field_errors': [caused_by.response.content]}
pass
def __unicode__(self):
if self.errors:
msgs = []
for field, field_errors in six.iteritems(self.errors):
for error in field_errors:
msgs.append(error)
cause = msgs[0] if len(msgs) == 1 else ". ".join(msgs)
return "%s. Caused by: %s" % (self.message, cause)
else:
return "%s. Caused by: %s" % (self.message, six.text_type(self.caused_by))
class TembaConnectionError(TembaException):
message = "Unable to connect to host"
# =====================================================================
# Paging
# =====================================================================
class TembaPager(object):
def __init__(self, start_page):
self.start_page = start_page
self.count = None
self.next_url = None
def update(self, response):
self.count = response['count']
self.next_url = response['next']
@property
def total(self):
return self.count
def has_more(self):
return bool(self.next_url)
# =====================================================================
# Domain objects
# =====================================================================
class TembaObject(object):
"""
Base class for objects returned by the Temba API
"""
__metaclass__ = ABCMeta
@classmethod
def create(cls, **kwargs):
source = kwargs.copy()
instance = cls()
for attr_name, field in six.iteritems(cls._get_fields()):
if attr_name in source:
field_value = source.pop(attr_name)
else:
field_value = None
setattr(instance, attr_name, field_value)
for remaining in source:
raise ValueError("Class %s has no attribute '%s'" % (cls.__name__, remaining))
return instance
@classmethod
def deserialize(cls, item):
instance = cls()
for attr_name, field in six.iteritems(cls._get_fields()):
field_source = field.src if field.src else attr_name
if field_source not in item and not field.optional:
raise TembaException("Serialized %s item is missing field '%s'" % (cls.__name__, field_source))
field_value = item.get(field_source, None)
attr_value = field.deserialize(field_value)
setattr(instance, attr_name, attr_value)
return instance
@classmethod
def deserialize_list(cls, item_list):
return [cls.deserialize(item) for item in item_list]
def serialize(self):
item = {}
for attr_name, field in six.iteritems(self._get_fields()):
attr_value = getattr(self, attr_name, None)
field_value = field.serialize(attr_value)
field_source = field.src if field.src else six.text_type(attr_name)
item[field_source] = field_value
return item
@classmethod
def _get_fields(cls):
return {k: v for k, v in six.iteritems(cls.__dict__) if isinstance(v, TembaField)}
# =====================================================================
# Field types
# =====================================================================
class TembaField(object):
__metaclass__ = ABCMeta
def __init__(self, src=None, optional=False):
self.src = src
self.optional = optional
@abstractmethod
def deserialize(self, value): # pragma: no cover
pass
@abstractmethod
def serialize(self, value): # pragma: no cover
pass
class SimpleField(TembaField):
def deserialize(self, value):
return value
def serialize(self, value):
return value
class IntegerField(SimpleField):
def deserialize(self, value):
if value and type(value) not in six.integer_types:
raise TembaException("Value '%s' field is not an integer" % six.text_type(value))
return value
class DatetimeField(TembaField):
def deserialize(self, value):
return parse_iso8601(value)
def serialize(self, value):
return format_iso8601(value)
class ObjectField(TembaField):
def __init__(self, item_class, src=None):
super(ObjectField, self).__init__(src)
self.item_class = item_class
def deserialize(self, value):
return self.item_class.deserialize(value)
def serialize(self, value):
return self.item_class.serialize(value)
class ObjectListField(ObjectField):
def deserialize(self, value):
if not isinstance(value, list):
raise TembaException("Value '%s' field is not a list" % six.text_type(value))
return self.item_class.deserialize_list(value)
def serialize(self, value):
if not isinstance(value, list):
raise TembaException("Value '%s' field is not a list" % six.text_type(value))
return [self.item_class.serialize(item) for item in value]
# =====================================================================
# Client base
# =====================================================================
class AbstractTembaClient(object):
"""
Abstract and version agnostic base client class
"""
__metaclass__ = ABCMeta
def __init__(self, host, token, user_agent=None):
if host.startswith('http'):
if host.endswith('/'):
self.root_url = host[:-1]
else:
self.root_url = host
else:
self.root_url = 'https://%s/api/v1' % host
self.headers = self._headers(token, user_agent)
@staticmethod
def _headers(token, user_agent):
if user_agent:
user_agent_header = '%s %s/%s' % (user_agent, CLIENT_NAME, __version__)
else:
user_agent_header = '%s/%s' % (CLIENT_NAME, __version__)
return {'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Token %s' % token,
'User-Agent': user_agent_header}
def pager(self, start_page=1):
"""
Returns a new pager
:param int start_page: the starting page number
:return: the pager
"""
return TembaPager(start_page)
def _get_single(self, endpoint, params, from_results=True):
"""
GETs a single result from the given endpoint. Throws an exception if there are no or multiple results.
"""
url = '%s/%s.json' % (self.root_url, endpoint)
response = self._request('get', url, params=params)
if from_results:
num_results = len(response['results'])
if num_results > 1:
raise TembaMultipleResultsError()
elif num_results == 0:
raise TembaNoSuchObjectError()
else:
return response['results'][0]
else:
return response
def _get_multiple(self, endpoint, params, pager):
"""
GETs multiple results from the given endpoint
"""
if pager:
return self._get_page(endpoint, params, pager)
else:
return self._get_all(endpoint, params)
def _get_page(self, endpoint, params, pager):
"""
GETs a page of results from the given endpoint
"""
if pager.next_url:
url = pager.next_url
params = None
else:
url = '%s/%s.json' % (self.root_url, endpoint)
if pager.start_page != 1:
params['page'] = pager.start_page
response = self._request('get', url, params=params)
pager.update(response)
return response['results']
def _get_all(self, endpoint, params):
"""
GETs all results from the given endpoint using multiple requests to fetch all pages
"""
results = []
url = '%s/%s.json' % (self.root_url, endpoint)
while url:
response = self._request('get', url, params=params)
results += response['results']
url = response.get('next', None)
params = {}
return results
def _post(self, endpoint, payload):
"""
POSTs to the given endpoint which must return a single item or list of items
"""
url = '%s/%s.json' % (self.root_url, endpoint)
return self._request('post', url, body=payload)
def _delete(self, endpoint, params):
"""
DELETEs to the given endpoint which won't return anything
"""
url = '%s/%s.json' % (self.root_url, endpoint)
self._request('delete', url, params=params)
def _request(self, method, url, body=None, params=None):
"""
Makes a GET or POST request to the given URL and returns the parsed JSON
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug("%s %s %s" % (method.upper(), url, json.dumps(params if params else body)))
try:
kwargs = {'headers': self.headers}
if body:
kwargs['data'] = body
if params:
kwargs['params'] = params
response = request(method, url, **kwargs)
response.raise_for_status()
return response.json() if response.content else None
except requests.HTTPError as ex:
raise TembaAPIError(ex)
except requests.exceptions.ConnectionError:
raise TembaConnectionError()
@classmethod
def _build_params(cls, **kwargs):
"""
Helper method to build params for a POST body or query string. Converts Temba objects to ids and UUIDs and
removes None values.
"""
params = {}
for kwarg, value in six.iteritems(kwargs):
if value is None:
continue
else:
params[kwarg] = cls._serialize_value(value)
return params
@classmethod
def _serialize_value(cls, value):
if isinstance(value, list) or isinstance(value, tuple):
serialized = []
for item in value:
serialized.append(cls._serialize_value(item))
return serialized
elif isinstance(value, TembaObject):
if hasattr(value, 'uuid'):
return value.uuid
elif hasattr(value, 'id'):
return value.id
elif isinstance(value, datetime.datetime):
return format_iso8601(value)
elif isinstance(value, bool):
return 1 if value else 0
else:
return value
def request(method, url, **kwargs): # pragma: no cover
"""
For the purposes of testing, all calls to requests.request go through here before JSON bodies are encoded. It's
easier to mock this and verify request data before it's encoded.
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
return requests.request(method, url, **kwargs)
| caktus/rapidpro-python | temba_client/base.py | Python | bsd-3-clause | 12,474 |
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Usuario(models.Model):
""" Este modelo one-to-one es frecuentemente llamado modelo Perfil """
usuario = models.OneToOneField(User, on_delete=models.CASCADE)
# pip install Pillow
foto = models.ImageField(upload_to = 'imagenes/', default = 'imagenes/None/no-img.jpg')
| mike-rg/MyGoalTracker | myGoalTracker/usuario/models.py | Python | gpl-3.0 | 388 |
import os
import sys
def is_excluded():
file_name = os.path.basename(sys.argv[0])
path = os.path.dirname(os.getenv("APPDATA")) # get path to APPDATA
path += "\\Local\\Animation Labs\\vorpX\\vorpControl.ini"
with open(path, mode="r") as f:
return file_name in f.read()
if __name__ == '__main__':
is_excluded()
| Fire-Proof/LoLVRSpectate | LoLVRSpectate/VorpX.py | Python | gpl-3.0 | 343 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.forms import ValidationError
from django.forms.models import ModelForm
from django.forms.widgets import Media
from django.utils.translation import ugettext_lazy as _
from djangocms_link.models import Link
class LinkForm(ModelForm):
try:
from djangocms_link.fields import PageSearchField
page_link = PageSearchField(label=_('Page'), required=False)
except ImportError:
from cms.forms.fields import PageSelectFormField
page_link = PageSelectFormField(label=_('Page'), required=False)
def for_site(self, site):
# override the page_link fields queryset to contains just pages for
# current site
# this will work for PageSelectFormField
from cms.models import Page
self.fields['page_link'].queryset = Page.objects.drafts().on_site(site)
# set the current site as a page_link field instance attribute
# this will be used by the field later to properly set up the queryset
# this will work for PageSearchField
self.fields['page_link'].site = site
class Meta:
model = Link
exclude = ('page', 'position', 'placeholder', 'language', 'plugin_type')
def _get_media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
media._js = ['cms/js/libs/jquery.min.js'] + media._js
return media
media = property(_get_media)
def clean(self):
cleaned_data = super(LinkForm, self).clean()
url = cleaned_data.get('url')
page_link = cleaned_data.get('page_link')
mailto = cleaned_data.get('mailto')
phone = cleaned_data.get('phone')
anchor = cleaned_data.get('anchor')
if not any([url, page_link, mailto, phone, anchor]):
raise ValidationError(_('At least one link is required.'))
return cleaned_data
| Glasgow2015/team-10 | env/lib/python2.7/site-packages/djangocms_link/forms.py | Python | apache-2.0 | 2,110 |
#!/usr/bin/python
##
# Use : To make reverse compliment of multiple fasta
# Require : preinstalled BioPython
# Usage : python Rev_Comp.py <InputFile> > <OutputFile>
##
import sys
file=sys.argv[1]
print >> sys.stderr, file
from Bio import SeqIO
for record in SeqIO.parse(file, "fasta"):
print ">"+record.id
print record.seq.reverse_complement()
| minesh1291/Sequence-Utilities | Rev_Comp.py | Python | apache-2.0 | 359 |
import copy
import unittest
import warnings
import mock
import numpy
import pytest
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import chainerx
def _assert_variable_array_equal(var, expected_array):
assert var.shape == expected_array.shape
assert var.dtype == expected_array.dtype
_assert_arrays_equal(var.data, expected_array)
def _assert_arrays_equal(array, expected_array):
array = backend.CpuDevice().send(array)
assert array.shape == expected_array.shape
assert array.dtype == expected_array.dtype
assert (array == expected_array).all()
class LinkTestBase(object):
def setUp(self):
x_shape_0 = 2
x_shape_1 = numpy.int64(3)
self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'),
u=(None, 'd'))
with self.link.init_scope():
self.link.y = chainer.Parameter(shape=(2,))
self.link.v = chainer.Parameter()
self.p = numpy.array([1, 2, 3], dtype='f')
self.link.add_persistent('p', self.p)
self.link.name = 'a'
self.link.x.update_rule = chainer.UpdateRule()
self.link.x.update_rule.enabled = False
self.link.u.update_rule = chainer.UpdateRule()
if cuda.available:
self.current_device_id = cuda.cupy.cuda.get_device_id()
def tearDown(self):
if cuda.available \
and cuda.cupy.cuda.get_device_id() != self.current_device_id:
cuda.Device(self.current_device_id).use()
def check_param_init(self, name, shape, dtype, data_value=numpy.nan):
self.assertTrue(hasattr(self.link, name))
var = getattr(self.link, name)
self.assertEqual(var.name, name)
self.assertIsInstance(var, chainer.Parameter)
self.assertEqual(var.data.shape, shape)
self.assertEqual(var.data.dtype, dtype)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(var.data), data_value)
self.assertEqual(var.grad.shape, shape)
self.assertEqual(var.grad.dtype, dtype)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(var.grad), numpy.nan)
def check_param_uninit(self, name, initializer=None):
self.assertTrue(hasattr(self.link, name))
var = getattr(self.link, name)
self.assertIsInstance(var, chainer.Parameter)
self.assertEqual(var.name, name)
self.assertIsNone(var.data)
if initializer is not None:
self.assertIs(var.initializer, initializer)
class TestLink(LinkTestBase, unittest.TestCase):
def test_init(self):
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
self.link.u.initialize((2, 3))
self.check_param_init('u', (2, 3), 'd')
self.check_param_uninit('v')
self.link.v.initialize((2, 3))
self.check_param_init('v', (2, 3), 'f')
def test_str(self):
# empty Link
self.assertEqual(str(chainer.Link()), 'Link()')
class MyLink(chainer.Link):
pass
# Link without overriding printable_specs
self.assertEqual(str(MyLink()), 'MyLink()')
class LinearForTest(chainer.Link):
def __init__(self, in_size, out_size, nobias=False):
self.in_size = in_size
self.out_size = out_size
self.nobias = nobias
@property
def printable_specs(self):
specs = [
('in_size', self.in_size),
('out_size', self.out_size),
('nobias', self.nobias)
]
for spec in specs:
yield spec
def __call__(self):
pass
# Link with overriding printable_specs
self.assertEqual(
str(LinearForTest(10, 1)),
'LinearForTest(in_size=10, out_size=1, nobias=False)',
)
def test_assign_param_outside_of_init_scope(self):
p = chainer.Parameter()
self.link.p = p
self.assertTrue(all(p is not param for param in self.link.params()))
def test_assign_var_in_init_scope(self):
p = chainer.Variable()
with self.link.init_scope():
self.link.p = p
self.assertTrue(all(p is not param for param in self.link.params()))
def test_call_injected_with_mixin(self):
call = mock.MagicMock()
call.return_value = 3
class CallMixin(object):
__call__ = call
class InjectedLink(chainer.Link, CallMixin):
pass
link = InjectedLink()
ret = link(1, a=2)
call.assert_called_once_with(1, a=2)
assert ret == call.return_value
def test_add_param(self):
self.link.add_param('z', (2, 3))
self.check_param_init('z', (2, 3), 'f')
self.link.add_param('w', (2, 3), dtype='d')
self.check_param_init('w', (2, 3), 'd')
self.link.add_param('r')
self.check_param_uninit('r')
self.link.r.initialize((2, 3))
self.check_param_init('r', (2, 3), 'f')
self.link.add_param('s', dtype='d')
self.check_param_uninit('s')
self.link.s.initialize((2, 3))
self.check_param_init('s', (2, 3), 'd')
initializer = initializers.Zero('d')
self.link.add_param('t', initializer=initializer)
self.check_param_uninit('t', initializer)
self.link.t.initialize((2, 3))
self.check_param_init('t', (2, 3), 'd', 0)
def test_add_param_direct_initialization(self):
z = numpy.random.rand(2, 3).astype('f')
self.link.add_param('z', initializer=z)
self.assertIsInstance(self.link.z.data, numpy.ndarray)
numpy.testing.assert_array_equal(self.link.z.data, z)
def test_add_param_duplicated_with_persistent(self):
self.link.add_persistent('z', 'abc')
with self.assertRaises(AttributeError):
self.link.add_param('z', (2, 3))
def test_add_persistent(self):
self.assertTrue(hasattr(self.link, 'p'))
self.assertIs(self.link.p, self.p)
self.link.add_persistent('q', 'abc')
self.assertTrue(hasattr(self.link, 'q'))
self.assertEqual(self.link.q, 'abc')
def test_delete(self):
del self.link.x
self.assertFalse(hasattr(self.link, 'x'))
self.assertNotIn('x', self.link._params)
self.assertNotIn('x', self.link._persistent)
del self.link.p
self.assertFalse(hasattr(self.link, 'p'))
self.assertNotIn('p', self.link._params)
self.assertNotIn('p', self.link._persistent)
def test_copy_with_share_mode(self):
link = self.link.copy(mode='share')
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIs(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIs(link.y.array, self.link.y.array)
self.assertIsNone(link.u.array)
self.assertIs(link.p, self.link.p)
self.assertIs(link.name, None)
def test_copy_with_copy_mode(self):
link = self.link.copy(mode='copy')
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.array, self.link.y.array)
self.assertIsNone(link.u.array)
self.assertIsNot(link.p, self.link.p)
self.assertIsNot(link.name, None)
def test_copy_with_init_mode(self):
self.link.u.initializer = initializers.Normal(
dtype=self.link.u.initializer.dtype)
self.link.u.initialize((2, 3))
link = self.link.copy(mode='init')
self.assertFalse(numpy.array_equal(self.link.u.array, link.u.array))
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.array, self.link.y.array)
self.assertIsNot(link.p, self.link.p)
self.assertIsNot(link.name, None)
@attr.gpu
def test_copy_and_to_gpu_init(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
self.assertIs(l0.x.data, l1.x.data)
l1.to_gpu()
self.assertIsNot(l0.x.data, l1.x.data)
self.assertIsInstance(l0.x.data, numpy.ndarray)
self.assertIsInstance(l1.x.data, cupy.ndarray)
@attr.gpu
def test_copy_and_to_gpu_uninit(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
self.assertIs(l0.device.xp, numpy)
self.assertIsNone(l0.u.data)
self.assertIsNone(l1.u.data)
l1.to_gpu()
self.assertIs(l0.device.xp, numpy)
self.assertIsNone(l0.u.data)
l1.u.initialize((2, 3))
self.assertIsNone(l0.u.data)
self.assertIsInstance(l1.u.data, cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_to_gpu_uninit_multi_gpu(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
l2 = l0.copy()
self.assertIsNone(l0.u.data)
self.assertIsNone(l1.u.data)
self.assertIsNone(l2.u.data)
l1.to_gpu()
l1.u.initialize((2, 3))
l2.to_gpu()
l2.u.initialize((2, 3))
self.assertIsNone(l0.u.data)
self.assertIsInstance(l1.u.data, cupy.ndarray)
self.assertIsInstance(l2.u.data, cupy.ndarray)
self.assertNotEqual(l1.u.data.data, l2.u.data.data)
def _check_deepcopy(self, link):
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.data, self.link.x.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.x.data),
cuda.to_cpu(self.link.x.data))
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.data, self.link.y.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.y.data),
cuda.to_cpu(self.link.y.data))
self.assertIsNone(link.u.data)
self.assertIsNot(link.p, self.link.p)
self.assertEqual(link.name, self.link.name)
def test_deepcopy(self):
link = copy.deepcopy(self.link)
self._check_deepcopy(link)
self.assertEqual(link.device.xp, numpy)
@attr.multi_gpu(2)
def test_deepcopy_multi_device(self):
device_id = 1
self.link.to_gpu(device_id)
link = copy.deepcopy(self.link)
self._check_deepcopy(link)
self.assertEqual(link.device.device, cuda.Device(device_id))
self.assertEqual(link.x.data.device.id, device_id)
self.assertEqual(link.y.data.device.id, device_id)
def test_to_cpu_on_cpu(self):
x = self.link.x.data
gx = self.link.x.grad
y = self.link.y.data
gy = self.link.y.grad
p = self.link.p
self.link.to_cpu()
self.assertIs(self.link.x.data, x)
self.assertIs(self.link.x.grad, gx)
self.assertIs(self.link.y.data, y)
self.assertIs(self.link.y.grad, gy)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIs(self.link.p, p)
@attr.gpu
def test_to_cpu(self):
self.link.to_gpu()
self.link.to_cpu()
self.link.v.initialize((2, 3))
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.x.data, numpy.ndarray)
self.assertIsInstance(self.link.x.grad, numpy.ndarray)
self.assertIsInstance(self.link.y.data, numpy.ndarray)
self.assertIsInstance(self.link.y.grad, numpy.ndarray)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIsInstance(self.link.v.data, numpy.ndarray)
self.assertIsInstance(self.link.v.grad, numpy.ndarray)
self.assertIsInstance(self.link.p, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
self.link.to_gpu()
self.link.v.initialize((2, 3))
self.assertIs(self.link.xp, cupy)
self.assertIsInstance(self.link.x.data, cupy.ndarray)
self.assertIsInstance(self.link.x.grad, cupy.ndarray)
self.assertIsInstance(self.link.y.data, cupy.ndarray)
self.assertIsInstance(self.link.y.grad, cupy.ndarray)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIsInstance(self.link.v.data, cupy.ndarray)
self.assertIsInstance(self.link.v.grad, cupy.ndarray)
self.assertIsInstance(self.link.p, cupy.ndarray)
@attr.multi_gpu(2)
def test_to_gpu_different_current_device(self):
cuda.Device(1).use()
self.link.to_gpu(0)
self.assertEqual(self.link.device.device, cuda.Device(0))
@attr.multi_gpu(2)
def test_to_gpu_different_device(self):
self.link.to_gpu(0)
self.assertEqual(self.link.device.device, cuda.Device(0))
self.assertEqual(self.link.x.data.device, cuda.Device(0))
self.assertEqual(self.link.x.grad.device, cuda.Device(0))
self.assertEqual(self.link.y.data.device, cuda.Device(0))
self.assertEqual(self.link.y.grad.device, cuda.Device(0))
self.assertEqual(self.link.p.device, cuda.Device(0))
self.link.to_gpu(1)
self.assertEqual(self.link.device.device, cuda.Device(1))
self.assertEqual(self.link.x.data.device, cuda.Device(0))
self.assertEqual(self.link.x.grad.device, cuda.Device(0))
self.assertEqual(self.link.y.data.device, cuda.Device(0))
self.assertEqual(self.link.y.grad.device, cuda.Device(0))
self.assertEqual(self.link.p.device, cuda.Device(0))
@attr.multi_gpu(2)
def test_to_gpu_current_device(self):
cuda.Device(1).use()
self.link.to_gpu()
self.assertEqual(self.link.device.device, cuda.Device(1))
def test_params(self):
params = list(self.link.params())
self.assertEqual([id(p) for p in params],
[id(self.link.u), id(self.link.v),
id(self.link.x), id(self.link.y)])
def test_params_skip_uninit(self):
params = list(self.link.params(include_uninit=False))
self.assertEqual([id(p) for p in params],
[id(self.link.x), id(self.link.y)])
def test_namedparams(self):
namedparams = list(self.link.namedparams())
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/u', id(self.link.u)), ('/v', id(self.link.v)),
('/x', id(self.link.x)), ('/y', id(self.link.y))])
def test_namedparams_skip_uninit(self):
namedparams = list(self.link.namedparams(include_uninit=False))
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/x', id(self.link.x)), ('/y', id(self.link.y))])
def test_links(self):
links = list(self.link.links())
self.assertIs(links[0], self.link)
def test_links_skipself(self):
links = list(self.link.links(skipself=True))
self.assertFalse(links) # empty
def test_namedlinks(self):
pl = list(self.link.namedlinks())
self.assertEqual(len(pl), 1)
self.assertEqual(pl[0][0], '/')
self.assertIs(pl[0][1], self.link)
def _setup_test_copyparams(self):
self.link.x.grad.fill(0)
self.link.y.grad.fill(1)
self.link.u.initialize((2, 3))
self.link.u.data.fill(0)
self.link.u.grad.fill(1)
self.link.v.cleargrad()
gx = self.link.x.grad.copy()
gy = self.link.y.grad.copy()
gu = self.link.u.grad.copy()
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3))
l.y = chainer.Parameter(shape=2)
l.u = chainer.Parameter(shape=(2, 3))
l.v = chainer.Parameter(shape=(3, 2))
l.x.data.fill(2)
l.x.grad.fill(3)
l.y.data.fill(4)
l.y.grad.fill(5)
l.u.data.fill(6)
l.u.grad.fill(7)
l.v.data.fill(8)
l.v.grad.fill(9)
l.add_persistent('p', numpy.full_like(self.link.p, 10))
return l, (gx, gy, gu)
def _check_copyparams(self, l, gs):
gx, gy, gu = gs
numpy.testing.assert_array_equal(self.link.x.data, l.x.data)
numpy.testing.assert_array_equal(self.link.x.grad, gx)
numpy.testing.assert_array_equal(self.link.y.data, l.y.data)
numpy.testing.assert_array_equal(self.link.y.grad, gy)
numpy.testing.assert_array_equal(self.link.u.data, l.u.data)
numpy.testing.assert_array_equal(self.link.u.grad, gu)
numpy.testing.assert_array_equal(self.link.v.data, l.v.data)
numpy.testing.assert_array_equal(self.link.v.grad, None)
def test_copyparams(self):
l, gs = self._setup_test_copyparams()
self.link.copyparams(l)
self._check_copyparams(l, gs)
numpy.testing.assert_array_equal(self.link.p, l.p)
def test_copyparams_no_copy_persistent(self):
orig_p = self.link.p.copy()
l, gs = self._setup_test_copyparams()
numpy.testing.assert_array_equal(False, orig_p == l.p)
self.link.copyparams(l, copy_persistent=False)
self._check_copyparams(l, gs)
numpy.testing.assert_array_equal(self.link.p, orig_p)
def test_cleargrads(self):
self.link.cleargrads()
self.assertIsNone(self.link.x.grad)
self.assertIsNone(self.link.y.grad)
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
self.assertIsNone(self.link.u.grad)
self.assertIsNone(self.link.v.grad)
def test_zerograds(self):
gx_expect = numpy.zeros_like(self.link.x.data)
gy_expect = numpy.zeros_like(self.link.y.data)
with testing.assert_warns(DeprecationWarning):
self.link.zerograds()
numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
gu_expect = numpy.zeros_like(self.link.u.data)
gv_expect = numpy.zeros_like(self.link.v.data)
numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
numpy.testing.assert_array_equal(self.link.v.grad, gv_expect)
def test_addgrads(self):
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3),
initializer=initializers.NaN('d'))
l.y = chainer.Parameter(shape=2)
l.u = chainer.Parameter(shape=(2, 3),
initializer=initializers.NaN('d'))
l.v = chainer.Parameter()
l.x.grad.fill(1)
l.y.grad.fill(2)
l.u.grad.fill(3)
# TODO(niboshi): Remove this line after #7140
l.v.cleargrad()
self.link.x.grad.fill(-1)
self.link.y.grad.fill(-2)
self.link.u.cleargrad()
self.link.addgrads(l)
gx_expect = numpy.zeros_like(l.x.grad)
gy_expect = numpy.zeros_like(l.y.grad)
gu_expect = l.u.grad
numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
v = self.link.v
with pytest.raises(RuntimeError):
v.grad
def test_serialize(self):
serializer = mock.MagicMock(return_value=3)
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3))
l.y = chainer.Parameter(shape=2)
l.add_persistent('z', 1)
l.serialize(serializer)
self.assertEqual(serializer.call_count, 3)
serializer.assert_any_call('x', l.x.data)
serializer.assert_any_call('y', l.y.data)
serializer.assert_any_call('z', 1)
self.assertEqual(l.z, 3)
def test_serialize_param_shape_placeholder(self):
serializer = mock.MagicMock(return_value=3)
l = chainer.Link()
with l.init_scope():
l.y = chainer.Parameter(shape=2)
l.x = chainer.Parameter()
l.x.initialize((2, 3))
l.add_persistent('z', 1)
l.serialize(serializer)
self.assertEqual(serializer.call_count, 3)
serializer.assert_any_call('x', l.x.data)
serializer.assert_any_call('y', l.y.data)
serializer.assert_any_call('z', 1)
self.assertEqual(l.z, 3)
def test_serialize_deserialize_to_uninitialized_param(self):
ret = numpy.random.rand(2, 3).astype('f')
serializer = mock.MagicMock(return_value=ret)
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter()
l.serialize(serializer)
self.assertEqual(serializer.call_count, 1)
serializer.assert_any_call('x', None)
self.assertIsInstance(l.x.data, numpy.ndarray)
numpy.testing.assert_array_equal(l.x.data, ret)
def test_enable_update(self):
self.link.enable_update()
self.assertTrue(self.link.x.update_rule.enabled)
self.assertTrue(self.link.u.update_rule.enabled)
def test_disable_update(self):
self.link.disable_update()
self.assertFalse(self.link.x.update_rule.enabled)
self.assertFalse(self.link.u.update_rule.enabled)
def test_update_enabled(self):
self.assertTrue(self.link.update_enabled)
self.link.disable_update()
self.assertFalse(self.link.update_enabled)
self.link.enable_update()
self.assertTrue(self.link.update_enabled)
def test_count_params(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.link.count_params() == 8
assert len(w) == 2
assert w[0].category is UserWarning
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.link.count_params()
assert not w
@testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@attr.chainerx
class TestLinkFromToChainerx(LinkTestBase, unittest.TestCase):
def test_from_chx(self, backend_config):
self.link.to_device(backend_config.device)
self.link.from_chx()
source_device = backend_config.device
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
if source_device.xp is chainerx:
backend_name = source_device.device.backend.name
if backend_name == 'native':
expected_device = backend.CpuDevice()
elif backend_name == 'cuda':
expected_device = backend.GpuDevice.from_device_id(
source_device.device.index)
else:
expected_device = source_device
self.assertEqual(self.link.device, expected_device)
def test_to_chx(self, backend_config):
self.link.to_device(backend_config.device)
self.link.to_chx()
source_device = backend_config.device
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
if source_device.xp is chainerx:
expected_device = source_device
elif source_device.xp is numpy:
expected_device = backend.ChainerxDevice(
chainerx.get_device('native', 0))
elif source_device.xp is cuda.cupy:
expected_device = backend.ChainerxDevice(
chainerx.get_device('cuda', source_device.device.id))
else:
assert False
self.assertEqual(self.link.device, expected_device)
class TestLinkMissingInitCall(unittest.TestCase):
# Tests for detecting incorrectly written Link subclasses in which
# the call to Link.__init__ is missing
expected_message = r'^Link\.__init__\(\) has not been called\.$'
def test_missing1(self):
# Nothing is done in __init__.
# The fault should be detected no later than __call__().
class Derived(chainer.Link):
def __init__(self):
pass
def forward(self, x):
return x
with pytest.raises(RuntimeError, match=self.expected_message):
link = Derived()
link(numpy.array([1, 2], numpy.float32))
def test_missing2(self):
# init_scope is called.
# The fault should be detected at init_scope.
class Derived(chainer.Link):
def __init__(self):
with self.init_scope():
pass
with pytest.raises(RuntimeError, match=self.expected_message):
Derived()
def test_missing3(self):
# add_param is called.
# The fault should be detected at add_param.
class Derived(chainer.Link):
def __init__(self):
self.add_param('p1', (2, 3), numpy.float32)
with pytest.raises(RuntimeError, match=self.expected_message):
Derived()
class TestLinkRepeat(unittest.TestCase):
def setUp(self):
class Layer(chainer.Link):
def __init__(self):
super(Layer, self).__init__()
with self.init_scope():
self.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def forward(self):
pass
self.link = Layer()
def test_no_repeat(self):
ret = self.link.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_init(self):
ret = self.link.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But shape and type of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
# Parameters are re-initialized, so the values should be different
self.assertFalse(numpy.all(ret[0].x.array == ret[1].x.array))
def test_repeat_with_copy(self):
ret = self.link.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But shape, type, and value of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
numpy.testing.assert_array_equal(ret[0].x.array, ret[1].x.array)
def test_repeat_with_share(self):
ret = self.link.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But the array objects should be the same
self.assertIs(ret[0].x.array, ret[1].x.array)
# But shape, type, and value of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
numpy.testing.assert_array_equal(ret[0].x.array, ret[1].x.array)
class CountParameter(chainer.Parameter):
def __init__(self, v):
super(CountParameter, self).__init__(v.data, name=v.name)
self.data = v.data
self.grad = v.grad if v.data is not None else None
self.count_zerograd = 0
def zerograd(self):
self.count_zerograd += 1
super(CountParameter, self).zerograd()
class ChainTestBase(object):
def setUp(self):
# Schematic:
# c2
# - c1
# - l1 (x: uninitialized with shape=(2, 3))
# - l2 (x: uninitialized with shape=2)
# - l3 (x: uninitialized without shape)
self.l1 = chainer.Link()
with self.l1.init_scope():
self.l1.x = chainer.Parameter(shape=(2, 3))
self.l2 = chainer.Link()
with self.l2.init_scope():
self.l2.x = chainer.Parameter(shape=2)
self.l3 = chainer.Link()
with self.l3.init_scope():
self.l3.x = chainer.Parameter()
self.c1 = chainer.Chain()
with self.c1.init_scope():
self.c1.l1 = self.l1
self.c1.add_link('l2', self.l2)
self.c2 = chainer.Chain()
with self.c2.init_scope():
self.c2.c1 = self.c1
self.c2.l3 = self.l3
def set_count_parameters(self):
self.l1.x = CountParameter(self.l1.x)
self.l2.x = CountParameter(self.l2.x)
self.l3.x = CountParameter(self.l3.x)
class TestChain(ChainTestBase, unittest.TestCase):
def test_init(self):
self.assertIs(self.c1.l1, self.l1)
self.assertIs(self.c1['l1'], self.l1)
self.assertEqual(self.l1.name, 'l1')
self.assertIs(self.c2.c1, self.c1)
self.assertIs(self.c2['c1'], self.c1)
self.assertEqual(self.c1.name, 'c1')
self.assertIs(self.c2.l3, self.l3)
self.assertIs(self.c2['l3'], self.l3)
self.assertEqual(self.l3.name, 'l3')
def test_str(self):
self.assertEqual(str(chainer.Chain()), 'Chain()')
self.assertEqual(
str(self.c2),
'''\
Chain(
(c1): Chain(
(l1): Link(),
(l2): Link(),
),
(l3): Link(),
)''',
)
def test_add_link(self):
self.assertIs(self.c1.l2, self.l2)
self.assertEqual(self.l2.name, 'l2')
def test_add_link_to_existing_attribute(self):
self.l1.z = 0
with self.assertRaises(AttributeError):
self.l1.add_link('z', chainer.Link())
def test_assign_link_outside_of_init_scope(self):
l = chainer.Link()
self.l1.l = l
self.assertTrue(all(l is not link for link in self.l1.links()))
def test_delete_link(self):
del self.c1.l1
self.assertFalse(hasattr(self.c1, 'l1'))
self.assertNotIn('l1', self.c1._children)
def test_copy_with_share_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='share')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIs(c2.c1.l1.x.data, self.l1.x.data)
self.assertIs(c2.c1.l1.x.grad, None)
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIs(c2.c1.l2.x.data, self.l2.x.data)
self.assertIs(c2.c1.l2.x.grad, None)
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
self.assertIs(c2.l3.x.grad, None)
def test_copy_with_copy_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='copy')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIsNot(c2.c1.l1.x.data, self.l1.x.data)
self.assertTrue(numpy.array_equal(c2.c1.l1.x.data, self.l1.x.data))
self.assertIs(c2.c1.l1.x.grad, None)
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIsNot(c2.c1.l2.x.data, self.l2.x.data)
self.assertTrue(numpy.array_equal(c2.c1.l2.x.data, self.l2.x.data))
self.assertIs(c2.c1.l2.x.grad, None)
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
x = c2.l3.x
with pytest.raises(RuntimeError):
x.grad
def test_copy_with_init_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='init')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIsNot(c2.c1.l1.x.data, self.l1.x.data)
self.assertFalse(numpy.array_equal(c2.c1.l1.x.data, self.l1.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.c1.l1.x.grad).all())
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIsNot(c2.c1.l2.x.data, self.l2.x.data)
self.assertFalse(numpy.array_equal(c2.c1.l2.x.data, self.l2.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.c1.l2.x.grad).all())
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
# A Parameter constructed with shape argument but not initialized
# has invalid grad
with pytest.raises(RuntimeError):
c2.l3.x.grad
def test_to_cpu_on_cpu(self):
x1 = self.l1.x.data
gx1 = self.l1.x.grad
x2 = self.l2.x.data
gx2 = self.l2.x.grad
x3 = self.l3.x.data
self.c2.to_cpu()
self.assertIs(self.l1.x.data, x1)
self.assertIs(self.l1.x.grad, gx1)
self.assertIs(self.l2.x.data, x2)
self.assertIs(self.l2.x.grad, gx2)
self.assertIs(self.l3.x.data, x3)
with pytest.raises(RuntimeError):
self.l3.x.grad
@attr.gpu
def test_to_cpu(self):
self.set_count_parameters()
self.c2.to_gpu()
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
self.set_count_parameters()
cupy = cuda.cupy
self.c2.to_gpu()
self.assertIs(self.c2.xp, cupy)
self.assertIs(self.c1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.x.data, cupy.ndarray)
self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
self.assertIsInstance(self.l2.x.data, cupy.ndarray)
self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, cupy.ndarray)
self.assertIsInstance(self.l3.x.grad, cupy.ndarray)
def test_to_device(self):
self.set_count_parameters()
device = backend.CpuDevice()
self.c2.to_device(device)
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsNone(self.l3.x.data)
self.l3.x.initialize((3,))
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
def test_params(self):
params = list(self.c2.params())
self.assertEqual([id(p) for p in params],
[id(self.l1.x), id(self.l2.x), id(self.l3.x)])
def test_params_skip_uninit(self):
params = list(self.c2.params(include_uninit=False))
self.assertEqual([id(p) for p in params],
[id(self.l1.x), id(self.l2.x)])
def test_namedparams(self):
namedparams = list(self.c2.namedparams())
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/c1/l1/x', id(self.l1.x)),
('/c1/l2/x', id(self.l2.x)),
('/l3/x', id(self.l3.x))])
def test_namedparams_skip_uninit(self):
namedparams = list(self.c2.namedparams(include_uninit=False))
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/c1/l1/x', id(self.l1.x)),
('/c1/l2/x', id(self.l2.x))])
def test_links(self):
links = list(self.c2.links())
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c2,
self.c1, self.l1, self.l2,
self.l3]])
def test_links_skipself(self):
links = list(self.c2.links(skipself=True))
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c1, self.l1, self.l2,
self.l3]])
def test_namedlinks(self):
namedlinks = list(self.c2.namedlinks())
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/', id(self.c2)),
('/c1', id(self.c1)),
('/c1/l1', id(self.l1)),
('/c1/l2', id(self.l2)),
('/l3', id(self.l3))])
def test_namedlinks_skipself(self):
namedlinks = list(self.c2.namedlinks(skipself=True))
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/c1', id(self.c1)),
('/c1/l1', id(self.l1)),
('/c1/l2', id(self.l2)),
('/l3', id(self.l3))])
def test_children(self):
children = list(self.c2.children())
self.assertEqual([id(c) for c in children], [id(self.c1), id(self.l3)])
def test_copyparams(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.Chain()
with c1.init_scope():
c1.l1 = l1
c1.l2 = l2
c2 = chainer.Chain()
with c2.init_scope():
c2.c1 = c1
c2.l3 = l3
l1.x.data.fill(0)
l2.x.data.fill(1)
l3.x.data.fill(2)
self.c2.copyparams(c2)
numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data)
numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data)
numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data)
def test_zerograds(self):
self.set_count_parameters()
with testing.assert_warns(DeprecationWarning):
self.c2.zerograds()
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
self.assertEqual(self.l1.x.count_zerograd, 1)
self.assertEqual(self.l2.x.count_zerograd, 1)
self.assertEqual(self.l3.x.count_zerograd, 1)
self.l3.x.initialize(3)
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
def test_addgrads(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.Chain()
with c1.init_scope():
c1.l1 = l1
c1.l2 = l2
c2 = chainer.Chain()
with c2.init_scope():
c2.c1 = c1
c2.l3 = l3
l1.x.grad.fill(1)
l2.x.grad.fill(2)
l3.x.grad.fill(3)
self.l1.x.grad.fill(-1)
self.l2.x.grad.fill(-2)
self.l3.cleargrads()
self.c2.addgrads(c2)
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.full(3, 3.))
def test_serialize(self):
mocks = {'l1': mock.MagicMock(), 'l2': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
self.c1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('l1')
serializer.__getitem__.assert_any_call('l2')
mocks['l1'].assert_called_with('x', self.l1.x.data)
mocks['l2'].assert_called_with('x', self.l2.x.data)
def test_count_params(self):
assert self.c1.count_params() == 8
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert len(w) == 1
assert w[0].category is UserWarning
self.c2.l3.x.initialize((3,))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert not w
@testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@attr.chainerx
class TestChainFromToChainerx(ChainTestBase, unittest.TestCase):
def check_array_device(self, array, expected_device):
expected_ndarray = expected_device.xp.ndarray
self.assertIsInstance(array, expected_ndarray)
if expected_device.xp in (chainerx, cuda.cupy):
assert array.device == expected_device.device
def check_expected_device(self, expected_device):
expected_xp = expected_device.xp
self.assertIs(self.c2.xp, expected_xp)
self.assertIs(self.c1.xp, expected_xp)
self.assertIs(self.l1.xp, expected_xp)
self.assertIs(self.l2.xp, expected_xp)
self.assertIs(self.l3.xp, expected_xp)
self.check_array_device(self.l1.x.data, expected_device)
self.check_array_device(self.l1.x.grad, expected_device)
self.check_array_device(self.l2.x.data, expected_device)
self.check_array_device(self.l2.x.grad, expected_device)
self.assertIsNone(self.l3.x.data)
self.l3.x.initialize((3,))
self.check_array_device(self.l3.x.data, expected_device)
self.check_array_device(self.l3.x.grad, expected_device)
def test_to_chx(self, backend_config):
self.set_count_parameters()
self.c2.to_device(backend_config.device)
self.c2.to_chx()
src_device = backend_config.device
if src_device.xp is chainerx:
expected_device = src_device
else:
expected_device = (
backend.ChainerxDevice.from_fallback_device(src_device))
self.check_expected_device(expected_device)
def test_from_chx(self, backend_config):
self.set_count_parameters()
self.c2.to_device(backend_config.device)
self.c2.from_chx()
src_device = backend_config.device
if src_device.xp is chainerx:
expected_device = src_device.fallback_device
else:
expected_device = src_device
self.check_expected_device(expected_device)
class TestChainRepeat(unittest.TestCase):
def setUp(self):
class ChainForTest(chainer.Chain):
def __init__(self):
super(ChainForTest, self).__init__()
with self.init_scope():
self.link = chainer.Link()
def forward(self):
pass
self.chain = ChainForTest()
self.link = self.chain.link
with self.link.init_scope():
self.link.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def test_no_repeat(self):
ret = self.chain.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_share_mode(self):
ret = self.chain.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link, ret[1].link)
self.assertIsNot(ret[0].link.x, self.chain.link.x)
self.assertIsNot(ret[1].link.x, self.chain.link.x)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIs(ret[0].link.x.data, self.chain.link.x.data)
self.assertIs(ret[0].link.x.data, ret[1].link.x.data)
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
def test_repeat_with_copy_mode(self):
ret = self.chain.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link, ret[1].link)
self.assertIsNot(ret[0].link.x, self.link.x)
self.assertIsNot(ret[1].link.x, self.link.x)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIsNot(ret[0].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[1].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[0].link.x.data, ret[1].link.x.data)
self.assertTrue(numpy.array_equal(
ret[0].link.x.data, self.chain.link.x.data))
self.assertTrue(numpy.array_equal(
ret[0].link.x.data, ret[1].link.x.data))
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
def test_repeat_with_init_mode(self):
ret = self.chain.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIsNot(ret[0].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[1].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[0].link.x.data, ret[1].link.x.data)
self.assertFalse(numpy.array_equal(
ret[0].link.x.data, self.chain.link.x.data))
self.assertFalse(numpy.array_equal(
ret[1].link.x.data, self.chain.link.x.data))
self.assertFalse(numpy.array_equal(
ret[0].link.x.data, ret[1].link.x.data))
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
class TestChainList(unittest.TestCase):
def setUp(self):
self.l1 = chainer.Link()
with self.l1.init_scope():
self.l1.x = chainer.Parameter(shape=(2, 3))
self.l1.y = chainer.Parameter()
self.l2 = chainer.Link()
with self.l2.init_scope():
self.l2.x = chainer.Parameter(shape=2)
self.l3 = chainer.Link()
with self.l3.init_scope():
self.l3.x = chainer.Parameter(shape=3)
self.l4 = chainer.Link()
self.l5 = chainer.Link()
self.l6 = chainer.Link()
self.c1 = chainer.ChainList(self.l1)
self.c1.add_link(self.l2)
self.c2 = chainer.ChainList(self.c1)
self.c2.append(self.l3)
self.c3 = chainer.ChainList(self.l4)
def test_init(self):
self.assertIs(self.c1[0], self.l1)
self.assertEqual(self.l1.name, '0')
self.assertIs(self.c2[0], self.c1)
self.assertEqual(self.c1.name, '0')
def test_str(self):
self.assertEqual(str(chainer.ChainList()), 'ChainList()')
self.assertEqual(
str(self.c2),
'''\
ChainList(
(0): ChainList(
(0): Link(),
(1): Link(),
),
(1): Link(),
)''',
)
def test_add_link(self):
self.assertIs(self.c1[1], self.l2)
self.assertEqual(self.l2.name, '1')
def test_append(self):
self.assertIs(self.c2[1], self.l3)
self.assertEqual(self.l3.name, '1')
def test_setitem(self):
self.c1[1] = self.l3
self.assertEqual(self.l3.name, '1')
def test_setitem_slice(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[3:0:-1] = [self.l4, self.l5] # l1 l5 l4
self.assertEqual(len(self.c1), 3)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '2')
self.assertEqual(self.l5.name, '1')
def test_setitem_slice_short(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[1:3] = [self.l4] # l1 l4
self.assertEqual(len(self.c1), 2)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '1')
def test_setitem_slice_long(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[1:3] = [self.l4, self.l5, self.l6] # l1 l4 l5 l6
self.assertEqual(len(self.c1), 4)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '1')
self.assertEqual(self.l5.name, '2')
self.assertEqual(self.l6.name, '3')
def test_iadd(self):
self.c2 += self.c3
self.assertIs(len(self.c2), 3)
self.assertEqual(self.l4.name, '2')
def test_delete_item(self):
del self.c2[0]
self.assertEqual(len(self.c2), 1)
self.assertEqual(self.l3.name, '0')
def test_assign_param_in_init_scope(self):
p = chainer.Parameter()
with self.c1.init_scope():
self.c1.p = p
self.assertIn(p, self.c1.params())
def test_assign_link_in_init_scope(self):
l = chainer.Link()
with self.c1.init_scope():
with self.assertRaises(TypeError):
self.c1.l = l
def test_iter(self):
links = list(self.c2)
self.assertEqual(2, len(links))
self.assertIs(links[0], self.c1)
self.assertIs(links[1], self.l3)
def test_len(self):
self.assertEqual(len(self.c1), 2)
self.assertEqual(len(self.c2), 2)
def test_copy_with_share_mode(self):
c2 = self.c2.copy(mode='share')
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertIsNot(c2[0], self.c1)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIs(c2[0][0].x.data, self.l1.x.data)
self.assertIs(c2[0][0].x.grad, None)
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIs(c2[0][1].x.data, self.l2.x.data)
self.assertIs(c2[0][1].x.grad, None)
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertIs(c2[1].x.data, self.l3.x.data)
self.assertIs(c2[1].x.grad, None)
def test_copy_with_copy_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='copy')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIsNot(c2[0][0].x.data, self.l1.x.data)
self.assertTrue(numpy.array_equal(c2[0][0].x.data, self.l1.x.data))
self.assertIs(c2[0][0].x.grad, None)
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIsNot(c2[0][1].x.data, self.l2.x.data)
self.assertTrue(numpy.array_equal(c2[0][1].x.data, self.l2.x.data))
self.assertIs(c2[0][1].x.grad, None)
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertIsNot(c2[1].x.data, self.l3.x.data)
# l3 is constructed with shape argument but not initialized
self.assertTrue(numpy.isnan(c2[1].x.grad).all())
def test_copy_with_init_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='init')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIsNot(c2[0][0].x.data, self.l1.x.data)
self.assertFalse(numpy.array_equal(c2[0][0].x.data, self.l1.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2[0][0].x.grad).all())
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIsNot(c2[0][1].x.data, self.l2.x.data)
self.assertFalse(numpy.array_equal(c2[0][1].x.data, self.l2.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2[0][1].x.grad).all())
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertTrue(numpy.isnan(c2[1].x.data).all())
self.assertTrue(numpy.isnan(c2[1].x.grad).all())
@attr.gpu
def test_copy_and_send_to_gpu(self):
c2 = self.c2.copy()
self.c2.to_gpu()
self.assertIsInstance(self.c2[0][0].x.data, cuda.cupy.ndarray)
self.assertIsInstance(self.c2[0][1].x.data, cuda.cupy.ndarray)
self.assertIsInstance(c2[0][0].x.data, numpy.ndarray)
self.assertIsInstance(c2[0][1].x.data, numpy.ndarray)
@attr.gpu
def test_copy_and_send_to_gpu_2(self):
c2 = self.c2.copy()
c2.to_gpu()
self.assertIsInstance(self.c2[0][0].x.data, numpy.ndarray)
self.assertIsInstance(self.c2[0][1].x.data, numpy.ndarray)
self.assertIsInstance(c2[0][0].x.data, cuda.cupy.ndarray)
self.assertIsInstance(c2[0][1].x.data, cuda.cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_send_to_gpu_multi(self):
c2 = self.c2.copy()
self.c2.to_gpu(0)
c2.to_gpu(1)
self.assertEqual(self.c2[0][0].x.data.device.id, 0)
self.assertEqual(self.c2[0][1].x.data.device.id, 0)
self.assertEqual(c2[0][0].x.data.device.id, 1)
self.assertEqual(c2[0][1].x.data.device.id, 1)
def test_to_cpu_on_cpu(self):
x1 = self.l1.x.data
gx1 = self.l1.x.grad
x2 = self.l2.x.data
gx2 = self.l2.x.grad
x3 = self.l3.x.data
gx3 = self.l3.x.grad
self.c2.to_cpu()
self.assertIs(self.l1.x.data, x1)
self.assertIs(self.l1.x.grad, gx1)
self.assertIs(self.l2.x.data, x2)
self.assertIs(self.l2.x.grad, gx2)
self.assertIs(self.l3.x.data, x3)
self.assertIs(self.l3.x.grad, gx3)
@attr.gpu
def test_to_cpu(self):
self.c2.to_gpu()
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
self.c2.to_gpu()
self.assertIs(self.c2.xp, cupy)
self.assertIs(self.c1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.x.data, cupy.ndarray)
self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
self.assertIsInstance(self.l2.x.data, cupy.ndarray)
self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
self.assertIsInstance(self.l3.x.data, cupy.ndarray)
self.assertIsInstance(self.l3.x.grad, cupy.ndarray)
@attr.chainerx
def test_to_chx(self):
self.c2.to_device(backend.CpuDevice())
self.c2.to_chx()
self.assertIs(self.c2.xp, chainerx)
self.assertIs(self.c1.xp, chainerx)
self.assertIs(self.l1.xp, chainerx)
self.assertIs(self.l2.xp, chainerx)
self.assertIs(self.l3.xp, chainerx)
self.assertIsInstance(self.l1.x.data, chainerx.ndarray)
self.assertIsInstance(self.l1.x.grad, chainerx.ndarray)
self.assertIsInstance(self.l2.x.data, chainerx.ndarray)
self.assertIsInstance(self.l2.x.grad, chainerx.ndarray)
self.assertIsInstance(self.l3.x.data, chainerx.ndarray)
self.assertIsInstance(self.l3.x.grad, chainerx.ndarray)
expected_device = chainerx.get_device('native:0')
self.assertIs(self.l1.x.data.device, expected_device)
self.assertIs(self.l1.x.grad.device, expected_device)
self.assertIs(self.l2.x.data.device, expected_device)
self.assertIs(self.l2.x.grad.device, expected_device)
self.assertIs(self.l3.x.data.device, expected_device)
self.assertIs(self.l3.x.grad.device, expected_device)
def test_to_device(self):
device = backend.CpuDevice()
self.c2.to_device(device)
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
def test_params(self):
params = list(self.c2.params())
self.assertEqual([id(p) for p in params],
[id(self.l1.x), id(self.l1.y),
id(self.l2.x), id(self.l3.x)])
def test_params_skip_uninit(self):
params = list(self.c2.params(include_uninit=False))
self.assertEqual([id(p) for p in params],
[id(self.l1.x), id(self.l2.x), id(self.l3.x)])
def test_namedparams(self):
namedparams = list(self.c2.namedparams())
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/0/0/x', id(self.l1.x)),
('/0/0/y', id(self.l1.y)),
('/0/1/x', id(self.l2.x)),
('/1/x', id(self.l3.x))])
def test_namedparams_skip_uninit(self):
namedparams = list(self.c2.namedparams(include_uninit=False))
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/0/0/x', id(self.l1.x)),
('/0/1/x', id(self.l2.x)),
('/1/x', id(self.l3.x))])
def test_links(self):
links = list(self.c2.links())
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c2,
self.c1, self.l1, self.l2,
self.l3]])
def test_links_skipself(self):
links = list(self.c2.links(skipself=True))
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c1, self.l1, self.l2,
self.l3]])
def test_namedlinks(self):
namedlinks = list(self.c2.namedlinks())
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/', id(self.c2)),
('/0', id(self.c1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))])
def test_namedlinks_skipself(self):
namedlinks = list(self.c2.namedlinks(skipself=True))
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/0', id(self.c1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))])
def test_children(self):
self.assertEqual(tuple(id(c) for c in self.c2.children()),
(id(self.c1), id(self.l3)))
self.assertEqual(tuple(id(c) for c in self.c1.children()),
(id(self.l1), id(self.l2)))
def test_copyparams(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l1.y = chainer.Parameter()
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.ChainList(l1, l2)
c2 = chainer.ChainList(c1, l3)
l1.x.data.fill(0)
l2.x.data.fill(1)
l3.x.data.fill(2)
self.c2.copyparams(c2)
numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data)
numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data)
numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data)
def test_zerograds(self):
with testing.assert_warns(DeprecationWarning):
self.c2.zerograds()
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
self.l1.y.initialize((2, 3))
numpy.testing.assert_array_equal(self.l1.y.grad, numpy.zeros((2, 3)))
def test_cleargrads(self):
self.c2.cleargrads()
self.assertIsNone(self.l1.x.grad)
self.assertIsNone(self.l2.x.grad)
self.assertIsNone(self.l3.x.grad)
self.l1.y.initialize((2, 3))
self.assertIsNone(self.l1.y.grad)
def test_addgrads(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l1.y = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.ChainList(l1, l2)
c2 = chainer.ChainList(c1, l3)
l1.x.grad.fill(1)
l2.x.grad.fill(2)
l3.x.grad.fill(3)
l1.y.grad.fill(4)
self.l1.x.grad.fill(-1)
self.l1.y.cleargrad()
self.l2.x.grad.fill(-2)
self.l3.x.grad.fill(-3)
self.c2.addgrads(c2)
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l1.y.grad, l1.y.grad)
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
def test_serialize(self):
l1 = chainer.Link()
with l1.init_scope():
l1.y = chainer.Parameter(shape=(1, 1))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(0, 2)
c1 = chainer.ChainList(l1, l2)
mocks = {'0': mock.MagicMock(), '1': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
serializer.return_value = None
mocks['0'].return_value = None
mocks['1'].return_value = None
c1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('0')
serializer.__getitem__.assert_any_call('1')
mocks['0'].assert_called_with('y', l1.y.data)
mocks['1'].assert_called_with('x', l2.x.data)
def test_count_params(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.c1.count_params() == 8
assert len(w) == 1
assert w[0].category is UserWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert len(w) == 1
assert w[0].category is UserWarning
self.c2[0][0].y.initialize((2, 3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert not w
class TestChainListRepeat(unittest.TestCase):
def setUp(self):
class ChainListForTest(chainer.ChainList):
def __init__(self):
super(ChainListForTest, self).__init__(chainer.Link())
def forward(self):
pass
self.chainlist = ChainListForTest()
self.link = self.chainlist[0]
with self.link.init_scope():
self.link.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def test_no_repeat(self):
ret = self.chainlist.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_share_mode(self):
ret = self.chainlist.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIs(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIs(ret[0][0].x.data, ret[1][0].x.data)
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
def test_repeat_with_copy_mode(self):
ret = self.chainlist.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIsNot(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[1][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[0][0].x.data, ret[1][0].x.data)
self.assertTrue(numpy.array_equal(
ret[0][0].x.data, self.chainlist[0].x.data))
self.assertTrue(numpy.array_equal(
ret[0][0].x.data, ret[1][0].x.data))
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
def test_repeat_with_init_mode(self):
ret = self.chainlist.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIsNot(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[1][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[0][0].x.data, ret[1][0].x.data)
self.assertFalse(numpy.array_equal(
ret[0][0].x.data, self.chainlist[0].x.data))
self.assertFalse(numpy.array_equal(
ret[1][0].x.data, self.chainlist[0].x.data))
self.assertFalse(numpy.array_equal(
ret[0][0].x.data, ret[1][0].x.data))
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
@attr.ideep
class TestIntel64(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
def test_cpu_to_intel64(self):
link = self.link
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
# Arrays should be converted to ideep.mdarray
# Initialized parameter
assert isinstance(link.y.data, intel64.ideep.mdarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, intel64.ideep.mdarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_intel64_to_intel64(self):
link = self.link
link.to_intel64()
prev_y = link.y
prev_v = link.v
prev_pa = link.pa
prev_ps = link.ps
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
# Everything should be left untouched
# Initialized parameter
assert link.y is prev_y
# Uninitialized parameter
assert link.v is prev_v
# Persistent ndarray
assert link.pa is prev_pa
# Persistent scalar
assert link.ps is prev_ps
@attr.gpu
def test_gpu_to_intel64(self):
link = self.link
link.to_gpu()
assert link.device.device == cuda.Device(0)
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
# Arrays should be converted to ideep.mdarray
# Initialized parameter
assert isinstance(link.y.data, intel64.ideep.mdarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, intel64.ideep.mdarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
@attr.gpu
def test_intel64_to_gpu(self):
link = self.link
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
link.to_gpu()
assert link.device.device == cuda.Device(0)
# Arrays should be converted to cupy.ndarray
# Initialized parameter
assert isinstance(link.y.data, cuda.cupy.ndarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, cuda.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_intel64_to_cpu(self):
link = self.link
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
link.to_cpu()
assert isinstance(link.device, backend.CpuDevice)
# Arrays should be converted to numpy.ndarray
# Initialized parameter
assert isinstance(link.y.data, numpy.ndarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, numpy.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_cpu_to_intel64_unsupported(self):
# Test for persistents that cannot be transferred to iDeep.
with self.link.init_scope():
self.link.no_ideep = numpy.ones((2, 2, 2), numpy.float32)
self.link.register_persistent('no_ideep')
self.link.to_intel64()
assert isinstance(self.link.no_ideep, numpy.ndarray)
@attr.gpu
def test_gpu_to_intel64_unsupported(self):
# Test for persistents that cannot be transferred to iDeep.
with self.link.init_scope():
self.link.no_ideep = cuda.cupy.ones((2, 2, 2), numpy.float32)
self.link.register_persistent('no_ideep')
self.link.to_intel64()
assert isinstance(self.link.no_ideep, numpy.ndarray)
@attr.chainerx
class TestToChainerX(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
def test_chainerx_to_chx(self):
link = self.link
link.to_chx()
prev_y = link.y
prev_v = link.v
prev_pa = link.pa
prev_ps = link.ps
link.to_chx()
assert link.device.device == chainerx.get_device('native:0')
# Everything should be left untouched
# Initialized parameter
assert link.y is prev_y
# Uninitialized parameter
assert link.v is prev_v
# Persistent ndarray
assert link.pa is prev_pa
# Persistent scalar
assert link.ps is prev_ps
def test_cpu_to_chx(self):
link = self.link
link.to_chx()
# Initialized parameter
assert isinstance(link.y.data, chainerx.ndarray)
assert link.y.data.device.backend.name == 'native'
assert link.y.data.device.index == 0
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, chainerx.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
@attr.gpu
def test_gpu_to_chx(self):
link = self.link
link.to_gpu()
assert link.device.device == cuda.Device(0)
link.to_chx()
assert link.device.device == chainerx.get_device('cuda:0')
# Arrays should be converted to chainerx.ndarray
# Initialized parameter
assert isinstance(link.y.data, chainerx.ndarray)
assert link.y.data.device.backend.name == 'cuda'
assert link.y.data.device.index == 0
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, chainerx.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
# TODO(niboshi): Add other test variations
class TestToDevice(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
if cuda.available:
self.current_device_id = cuda.cupy.cuda.get_device_id()
def check_to_device(self, device_spec, expected_ndarray_type):
link = self.link
link.to_device(device_spec)
# Initialized parameter
assert isinstance(link.y.data, expected_ndarray_type)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, expected_ndarray_type)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
return link
def test_to_device_numpy(self):
link = self.check_to_device('@numpy', numpy.ndarray)
assert isinstance(link.device, backend.CpuDevice)
@attr.gpu
def test_to_device_cupy(self):
link = self.check_to_device('@cupy:0', cuda.ndarray)
assert link.device.device == cuda.Device(0)
@attr.chainerx
def test_to_device_chainerx(self):
link = self.check_to_device('native:0', chainerx.ndarray)
assert link.device.device == chainerx.get_device('native:0')
class TestCallMethod(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.model = Model()
def test_has_forward_no_call(self):
self.model.forward = mock.MagicMock()
self.model(0) # model.forward is called
self.model.forward.assert_called_once()
def test_has_call_and_forward(self):
self.model.__call__ = mock.MagicMock()
self.model.forward = mock.MagicMock()
self.model(0) # Link.__call__ is called
self.model.forward.assert_called_with(0)
self.model.__call__.assert_not_called()
def test_has_call_no_forward(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.mock = mock.MagicMock()
def __call__(self, x):
self.mock(x)
model = Model()
model(0) # model.__call__ is called
model.mock.assert_called_with(0)
def test_no_call_no_forward(self):
with self.assertRaises(AttributeError):
self.model(0)
class TestLinkOverrideToDeviceMethods(unittest.TestCase):
# Overriding to_cpu, to_gpu, to_intel64 is currently not deprecated.
# This test ensures DeprecationWarning is NOT emitted and the overridden
# method is actually called.
#
# TODO(niboshi): Deprecate overriding these methods in a future release
# (such as v7).
def create_link(self, method_name):
class ChildLink(chainer.Link):
def __init__(self):
self.to_method_called = 0
super(ChildLink, self).__init__()
if method_name == 'to_device':
def to_device(self, device):
assert False # never called
elif method_name == 'to_chx':
def to_chx(self, device):
assert False # never called
elif method_name == 'from_chx':
def from_chx(self, device):
assert False # never called
elif method_name == 'to_cpu':
def to_cpu(self):
super(ChildLink, self).to_cpu()
self.to_method_called += 1
elif method_name == 'to_gpu':
def to_gpu(self, device=None):
assert isinstance(device, (cuda.Device, int))
super(ChildLink, self).to_gpu(device)
self.to_method_called += 1
elif method_name == 'to_intel64':
def to_intel64(self):
super(ChildLink, self).to_intel64()
self.to_method_called += 1
else:
assert False, method_name
class ParentLink(chainer.Chain):
def __init__(self):
super(ParentLink, self).__init__()
with self.init_scope():
self.child = ChildLink()
return ParentLink
# to_device, to_chx, from_chx can never be overridden
def test_to_device_override(self):
with pytest.raises(TypeError):
self.create_link('to_device')
def test_to_chx_override(self):
with pytest.raises(TypeError):
self.create_link('to_chx')
def test_from_chx_override(self):
with pytest.raises(TypeError):
self.create_link('from_chx')
# Deprecation warning not emitted on class definition
def test_to_cpu_override(self):
self.create_link('to_cpu')
def test_to_gpu_override(self):
self.create_link('to_gpu')
def test_to_intel64_override(self):
self.create_link('to_intel64')
# Overridden methods are called on to_device()
def test_to_device_cpu(self):
cls = self.create_link('to_cpu')
l = cls()
l.to_device('@numpy')
assert l.child.to_method_called == 1
@attr.gpu
def test_to_device_gpu(self):
cls = self.create_link('to_gpu')
l = cls()
l.to_device('@cupy:0')
assert l.child.to_method_called == 1
@attr.multi_gpu(2)
def test_to_device_multi_gpu(self):
cls = self.create_link('to_gpu')
l = cls()
l.to_device('@cupy:1')
assert l.child.to_method_called == 1
@attr.ideep
def test_to_device_intel64(self):
cls = self.create_link('to_intel64')
l = cls()
l.to_device('@intel64')
assert l.child.to_method_called == 1
# Overridden methods are called on to_cpu()/to_gpu()/to_intel()
def test_to_cpu(self):
cls = self.create_link('to_cpu')
l = cls()
l.to_cpu()
assert l.child.to_method_called == 1
@attr.gpu
def test_to_gpu_without_arg(self):
cls = self.create_link('to_gpu')
l = cls()
l.to_gpu()
assert l.child.to_method_called == 1
@attr.gpu
def test_to_gpu_with_arg(self):
cls = self.create_link('to_gpu')
l = cls()
l.to_gpu(0)
assert l.child.to_method_called == 1
@attr.ideep
def test_to_intel64(self):
cls = self.create_link('to_intel64')
l = cls()
l.to_intel64()
assert l.child.to_method_called == 1
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestSerialize(unittest.TestCase):
def setUp(self):
self.array = numpy.array([1, 2, 3], dtype=numpy.float32)
self.serializer = mock.MagicMock(return_value=self.array)
link = chainer.Link()
with link.init_scope():
link.x = chainer.Parameter()
link.y = chainer.Parameter()
link.add_persistent('z', None)
self.link = link
def test_serialize_numpy(self, backend_config):
array = self.array
link = self.link
serializer = self.serializer
link.to_device(backend_config.device)
link.serialize(serializer)
self.assertEqual(serializer.call_count, 3)
cpu_device = chainer.backend.CpuDevice()
numpy.testing.assert_array_equal(cpu_device.send(link.x.array), array)
numpy.testing.assert_array_equal(cpu_device.send(link.y.array), array)
numpy.testing.assert_array_equal(cpu_device.send(link.z), array)
testing.run_module(__name__, __file__)
| okuta/chainer | tests/chainer_tests/test_link.py | Python | mit | 93,478 |
from django.http import Http404
from django.views.generic.base import TemplateView
from treeherder.model.derived import JobsModel
class ResultsetStatusView(TemplateView):
template_name = "embed/resultset_status.html"
def get_context_data(self, **kwargs):
assert "repository" in kwargs and "revision" in kwargs
context = super(ResultsetStatusView, self).get_context_data(**kwargs)
with JobsModel(kwargs['repository']) as jm:
resultset_list = jm.get_revision_resultset_lookup(
[kwargs['revision']])
if not resultset_list:
raise Http404("No resultset found for revision {0}".format(
kwargs['revision']))
result_set_id = resultset_list[kwargs['revision']]['id']
resultset_status_dict = jm.get_resultset_status(result_set_id)
update_needed = (('pending' in resultset_status_dict) or
('running' in resultset_status_dict) or
not resultset_status_dict)
context['update_needed'] = update_needed
context['resultset_status_dict'] = resultset_status_dict
return context
| adusca/treeherder | treeherder/embed/views.py | Python | mpl-2.0 | 1,201 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
import fixtures
from tests.software_config import common
class HookPuppetTest(common.RunScriptTest):
data = {
'id': '1234',
'creation_time': '2015-07-16T11:40:20',
'name': 'fake_resource_name',
'group': 'puppet',
'options': {
'enable_hiera': True,
'enable_facter': True,
'enable_debug': True,
},
'inputs': [
{'name': 'foo', 'value': 'bar'},
{'name': 'another', 'value': 'input'}
],
'outputs': [
{'name': 'first_output'},
{'name': 'second_output'}
],
'config': 'the puppet script'
}
def setUp(self):
super(HookPuppetTest, self).setUp()
self.hook_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements',
'heat-config-puppet/install.d/hook-puppet.py')
self.fake_tool_path = self.relative_path(
__file__,
'config-tool-fake.py')
self.working_dir = self.useFixture(fixtures.TempDir())
self.outputs_dir = self.useFixture(fixtures.TempDir())
self.log_dir = self.useFixture(fixtures.TempDir())
self.hiera_datadir = self.useFixture(fixtures.TempDir())
self.test_state_path = self.outputs_dir.join('test_state.json')
self.env = os.environ.copy()
self.env.update({
'HEAT_PUPPET_WORKING': self.working_dir.join(),
'HEAT_PUPPET_OUTPUTS': self.outputs_dir.join(),
'HEAT_PUPPET_LOGDIR': self.log_dir.join(),
'HEAT_PUPPET_HIERA_DATADIR': self.hiera_datadir.join(),
'HEAT_PUPPET_CMD': self.fake_tool_path,
'TEST_STATE_PATH': self.test_state_path,
})
def test_hook(self):
self.env.update({
'TEST_RESPONSE': json.dumps({
'stdout': 'puppet success',
'stderr': 'thing happened',
'files': {
self.outputs_dir.join('1234.first_output'): 'output 1',
self.outputs_dir.join('1234.second_output'): 'output 2',
}
}),
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
self.assertEqual({
'deploy_stdout': 'puppet success',
'deploy_stderr': 'thing happened',
'deploy_status_code': 0,
'first_output': 'output 1',
'second_output': 'output 2',
}, json.loads(stdout))
state = self.json_from_file(self.test_state_path)
puppet_script = self.working_dir.join('1234.pp')
self.assertEqual(
[
self.fake_tool_path,
'apply',
'--detailed-exitcodes',
'--debug',
puppet_script
],
state['args'])
self.assertEqual('bar', state['env']['FACTER_foo'])
self.assertEqual('input', state['env']['FACTER_another'])
self.assertEqual(self.outputs_dir.join('1234'),
state['env']['FACTER_heat_outputs_path'])
with open(puppet_script) as f:
self.assertEqual('the puppet script', f.read())
def test_hook_no_debug(self):
self.data['options']['enable_debug'] = False
self.env.update({
'TEST_RESPONSE': json.dumps({
'stdout': 'success',
'stderr': '',
}),
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
state = self.json_from_file(self.test_state_path)
puppet_script = self.working_dir.join('1234.pp')
self.assertEqual(
[
self.fake_tool_path,
'apply',
'--detailed-exitcodes',
puppet_script
],
state['args'])
self.data['options']['enable_debug'] = True
def test_hook_puppet_failed(self):
self.env.update({
'TEST_RESPONSE': json.dumps({
'stdout': 'puppet failed',
'stderr': 'bad thing happened',
'returncode': 4
}),
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
self.assertEqual({
'deploy_stdout': 'puppet failed',
'deploy_stderr': 'bad thing happened',
'deploy_status_code': 4,
}, json.loads(stdout))
state = self.json_from_file(self.test_state_path)
puppet_script = self.working_dir.join('1234.pp')
self.assertEqual(
[
self.fake_tool_path,
'apply',
'--detailed-exitcodes',
'--debug',
puppet_script
],
state['args'])
self.assertEqual('bar', state['env']['FACTER_foo'])
self.assertEqual('input', state['env']['FACTER_another'])
self.assertEqual(self.outputs_dir.join('1234'),
state['env']['FACTER_heat_outputs_path'])
with open(puppet_script) as f:
self.assertEqual('the puppet script', f.read())
def test_hook_hiera(self):
self.env.update({
'TEST_RESPONSE': json.dumps({
'stdout': 'puppet success',
'stderr': 'thing happened',
'files': {
self.outputs_dir.join('1234.first_output'): 'output 1',
self.outputs_dir.join('1234.second_output'): 'output 2',
}
}),
})
modulepath = self.working_dir.join()
data = copy.deepcopy(self.data)
data['options']['modulepath'] = modulepath
data['options']['tags'] = 'package,file'
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(data))
self.assertEqual(0, returncode, stderr)
self.assertEqual({
'deploy_stdout': 'puppet success',
'deploy_stderr': 'thing happened',
'deploy_status_code': 0,
'first_output': 'output 1',
'second_output': 'output 2',
}, json.loads(stdout))
state = self.json_from_file(self.test_state_path)
puppet_script = self.working_dir.join('1234.pp')
hiera_datafile = self.hiera_datadir.join('heat_config_%s.json'
% self.data['name'])
self.assertEqual(
[
self.fake_tool_path,
'apply',
'--detailed-exitcodes',
'--modulepath',
modulepath,
'--tags',
'package,file',
'--debug',
puppet_script
],
state['args'])
self.assertEqual(self.outputs_dir.join('1234'),
state['env']['FACTER_heat_outputs_path'])
with open(puppet_script) as f:
self.assertEqual('the puppet script', f.read())
with open(hiera_datafile) as f:
self.assertEqual({
'foo': 'bar',
'another': 'input',
}, json.loads(f.read()))
| cosminmocan/heat-templates | tests/software_config/test_hook_puppet.py | Python | apache-2.0 | 8,040 |
"""LIT demo for image model.
To run:
python -m lit_nlp.examples.image_demo --port=5432
Then navigate to localhost:5432 to access the demo UI.
"""
import sys
from absl import app
from absl import flags
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.components import image_gradient_maps
from lit_nlp.examples.datasets import imagenette
from lit_nlp.examples.models import mobilenet
FLAGS = flags.FLAGS
FLAGS.set_default('development_demo', True)
FLAGS.set_default('warm_start', 1)
FLAGS.set_default('default_layout', 'demo_layout')
FLAGS.set_default('page_title', 'LIT Image Demo')
# Function for running demo through gunicorn instead of the local dev server.
def get_wsgi_app():
FLAGS.set_default('server_type', 'external')
FLAGS.set_default('demo_mode', True)
# Parse flags without calling app.run(main), to avoid conflict with
# gunicorn command line flags.
unused = flags.FLAGS(sys.argv, known_only=True)
return main(unused)
def main(_):
demo_layout = lit_dtypes.LitComponentLayout(
components={
'Main': [
'data-table-module',
'datapoint-editor-module',
'lit-slice-module',
'color-module',
],
'Predictions': ['classification-module', 'scalar-module'],
'Explanations': [
'classification-module', 'salience-map-module'],
},
description='Basic layout for image demo',
)
datasets = {'imagenette': imagenette.ImagenetteDataset()}
models = {'mobilenet': mobilenet.MobileNet()}
interpreters = {
'Grad': image_gradient_maps.VanillaGradients(),
'Integrated Gradients': image_gradient_maps.IntegratedGradients(),
'Blur IG': image_gradient_maps.BlurIG(),
'Guided IG': image_gradient_maps.GuidedIG(),
'XRAI': image_gradient_maps.XRAI(),
'XRAI GIG': image_gradient_maps.XRAIGIG(),
}
lit_demo = dev_server.Server(models, datasets, interpreters=interpreters,
generators={},
layouts={'demo_layout': demo_layout},
**server_flags.get_flags())
return lit_demo.serve()
if __name__ == '__main__':
app.run(main)
| pair-code/lit | lit_nlp/examples/image_demo.py | Python | apache-2.0 | 2,271 |
import itertools
import json
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import patch
from raiden.messages import Lock
from raiden.storage.serialize import JSONSerializer
from raiden.storage.sqlite import SerializedSQLiteStorage, SQLiteStorage
from raiden.tests.utils import factories
from raiden.transfer.mediated_transfer.events import (
SendBalanceProof,
SendLockedTransfer,
SendLockExpired,
SendRefundTransfer,
)
from raiden.transfer.mediated_transfer.state_change import (
ActionInitMediator,
ActionInitTarget,
ReceiveLockExpired,
ReceiveTransferRefund,
ReceiveTransferRefundCancelRoute,
)
from raiden.transfer.state import BalanceProofUnsignedState
from raiden.transfer.state_change import ReceiveUnlock
from raiden.transfer.utils import (
get_event_with_balance_proof_by_balance_hash,
get_event_with_balance_proof_by_locksroot,
get_state_change_with_balance_proof_by_balance_hash,
get_state_change_with_balance_proof_by_locksroot,
)
from raiden.utils import sha3
def make_signed_balance_proof_from_counter(counter):
lock = Lock(
amount=next(counter),
expiration=next(counter),
secrethash=sha3(factories.make_secret(next(counter))),
)
lock_expired_balance_proof = factories.create(
factories.BalanceProofSignedStateProperties(
nonce=next(counter),
transferred_amount=next(counter),
locked_amount=next(counter),
canonical_identifier=factories.make_canonical_identifier(
token_network_address=factories.make_address(), channel_identifier=next(counter)
),
locksroot=sha3(lock.as_bytes),
message_hash=sha3(b""),
sender=factories.HOP1,
pkey=factories.HOP1_KEY,
)
)
return lock_expired_balance_proof
def make_balance_proof_from_counter(counter) -> BalanceProofUnsignedState:
return BalanceProofUnsignedState(
nonce=next(counter),
transferred_amount=next(counter),
locked_amount=next(counter),
locksroot=sha3(next(counter).to_bytes(1, "big")),
canonical_identifier=factories.make_canonical_identifier(
chain_identifier=next(counter),
token_network_address=factories.make_address(),
channel_identifier=next(counter),
),
)
def make_transfer_from_counter(counter):
return factories.create(
factories.LockedTransferUnsignedStateProperties(
amount=next(counter),
initiator=factories.make_address(),
target=factories.make_address(),
expiration=next(counter),
secret=factories.make_secret(next(counter)),
)
)
def make_signed_transfer_from_counter(counter):
lock = Lock(
amount=next(counter),
expiration=next(counter),
secrethash=sha3(factories.make_secret(next(counter))),
)
signed_transfer = factories.create(
factories.LockedTransferSignedStateProperties(
amount=next(counter),
initiator=factories.make_address(),
target=factories.make_address(),
expiration=next(counter),
secret=factories.make_secret(next(counter)),
payment_identifier=next(counter),
token=factories.make_address(),
nonce=next(counter),
transferred_amount=next(counter),
locked_amount=next(counter),
locksroot=sha3(lock.as_bytes),
canonical_identifier=factories.make_canonical_identifier(
token_network_address=factories.make_address(), channel_identifier=next(counter)
),
recipient=factories.make_address(),
sender=factories.HOP1,
pkey=factories.HOP1_KEY,
)
)
return signed_transfer
def make_from_route_from_counter(counter):
from_channel = factories.create(
factories.NettingChannelStateProperties(
canonical_identifier=factories.make_canonical_identifier(),
token_address=factories.make_address(),
partner_state=factories.NettingChannelEndStateProperties(
balance=next(counter), address=factories.HOP1
),
)
)
from_route = factories.make_route_from_channel(from_channel)
expiration = factories.UNIT_REVEAL_TIMEOUT + 1
from_transfer = factories.make_signed_transfer_for(
from_channel,
factories.LockedTransferSignedStateProperties(
transferred_amount=0,
canonical_identifier=factories.make_canonical_identifier(
token_network_address=from_channel.token_network_identifier
),
amount=1,
expiration=expiration,
secret=sha3(factories.make_secret(next(counter))),
initiator=factories.make_address(),
target=factories.make_address(),
payment_identifier=next(counter),
sender=factories.HOP1,
pkey=factories.HOP1_KEY,
),
)
return from_route, from_transfer
def test_get_state_change_with_balance_proof():
""" All state changes which contain a balance proof must be found by when
querying the database.
"""
serializer = JSONSerializer
storage = SerializedSQLiteStorage(":memory:", serializer)
counter = itertools.count()
lock_expired = ReceiveLockExpired(
balance_proof=make_signed_balance_proof_from_counter(counter),
secrethash=sha3(factories.make_secret(next(counter))),
message_identifier=next(counter),
)
unlock = ReceiveUnlock(
message_identifier=next(counter),
secret=sha3(factories.make_secret(next(counter))),
balance_proof=make_signed_balance_proof_from_counter(counter),
)
transfer_refund = ReceiveTransferRefund(
transfer=make_signed_transfer_from_counter(counter), routes=list()
)
transfer_refund_cancel_route = ReceiveTransferRefundCancelRoute(
routes=list(),
transfer=make_signed_transfer_from_counter(counter),
secret=sha3(factories.make_secret(next(counter))),
)
mediator_from_route, mediator_signed_transfer = make_from_route_from_counter(counter)
action_init_mediator = ActionInitMediator(
routes=list(), from_route=mediator_from_route, from_transfer=mediator_signed_transfer
)
target_from_route, target_signed_transfer = make_from_route_from_counter(counter)
action_init_target = ActionInitTarget(route=target_from_route, transfer=target_signed_transfer)
statechanges_balanceproofs = [
(lock_expired, lock_expired.balance_proof),
(unlock, unlock.balance_proof),
(transfer_refund, transfer_refund.transfer.balance_proof),
(transfer_refund_cancel_route, transfer_refund_cancel_route.transfer.balance_proof),
(action_init_mediator, action_init_mediator.from_transfer.balance_proof),
(action_init_target, action_init_target.transfer.balance_proof),
]
timestamp = datetime.utcnow().isoformat(timespec="milliseconds")
for state_change, _ in statechanges_balanceproofs:
storage.write_state_change(state_change, timestamp)
# Make sure state changes are returned in the correct order in which they were stored
stored_statechanges = storage.get_statechanges_by_identifier(1, "latest")
assert isinstance(stored_statechanges[0], ReceiveLockExpired)
assert isinstance(stored_statechanges[1], ReceiveUnlock)
assert isinstance(stored_statechanges[2], ReceiveTransferRefund)
assert isinstance(stored_statechanges[3], ReceiveTransferRefundCancelRoute)
assert isinstance(stored_statechanges[4], ActionInitMediator)
assert isinstance(stored_statechanges[5], ActionInitTarget)
# Make sure state changes are returned in the correct order in which they were stored
stored_statechanges = storage.get_statechanges_by_identifier(1, 2)
assert isinstance(stored_statechanges[0], ReceiveLockExpired)
assert isinstance(stored_statechanges[1], ReceiveUnlock)
for state_change, balance_proof in statechanges_balanceproofs:
state_change_record = get_state_change_with_balance_proof_by_balance_hash(
storage=storage,
canonical_identifier=balance_proof.canonical_identifier,
sender=balance_proof.sender,
balance_hash=balance_proof.balance_hash,
)
assert state_change_record.data == state_change
state_change_record = get_state_change_with_balance_proof_by_locksroot(
storage=storage,
canonical_identifier=balance_proof.canonical_identifier,
sender=balance_proof.sender,
locksroot=balance_proof.locksroot,
)
assert state_change_record.data == state_change
def test_get_event_with_balance_proof():
""" All events which contain a balance proof must be found by when
querying the database.
"""
serializer = JSONSerializer
storage = SerializedSQLiteStorage(":memory:", serializer)
counter = itertools.count()
lock_expired = SendLockExpired(
recipient=factories.make_address(),
message_identifier=next(counter),
balance_proof=make_balance_proof_from_counter(counter),
secrethash=sha3(factories.make_secret(next(counter))),
)
locked_transfer = SendLockedTransfer(
recipient=factories.make_address(),
channel_identifier=factories.make_channel_identifier(),
message_identifier=next(counter),
transfer=make_transfer_from_counter(counter),
)
balance_proof = SendBalanceProof(
recipient=factories.make_address(),
channel_identifier=factories.make_channel_identifier(),
message_identifier=next(counter),
payment_identifier=next(counter),
token_address=factories.make_address(),
secret=factories.make_secret(next(counter)),
balance_proof=make_balance_proof_from_counter(counter),
)
refund_transfer = SendRefundTransfer(
recipient=factories.make_address(),
channel_identifier=factories.make_channel_identifier(),
message_identifier=next(counter),
transfer=make_transfer_from_counter(counter),
)
events_balanceproofs = [
(lock_expired, lock_expired.balance_proof),
(locked_transfer, locked_transfer.balance_proof),
(balance_proof, balance_proof.balance_proof),
(refund_transfer, refund_transfer.transfer.balance_proof),
]
timestamp = datetime.utcnow().isoformat(timespec="milliseconds")
state_change = ""
for event, _ in events_balanceproofs:
state_change_identifier = storage.write_state_change(state_change, timestamp)
storage.write_events(
state_change_identifier=state_change_identifier, events=[event], log_time=timestamp
)
for event, balance_proof in events_balanceproofs:
event_record = get_event_with_balance_proof_by_balance_hash(
storage=storage,
canonical_identifier=balance_proof.canonical_identifier,
balance_hash=balance_proof.balance_hash,
)
assert event_record.data == event
event_record = get_event_with_balance_proof_by_locksroot(
storage=storage,
canonical_identifier=balance_proof.canonical_identifier,
recipient=event.recipient,
locksroot=balance_proof.locksroot,
)
assert event_record.data == event
# Checking that balance proof attribute can be accessed for all events.
# Issue https://github.com/raiden-network/raiden/issues/3179
assert event_record.data.balance_proof == event.balance_proof
def test_log_run():
with patch("raiden.storage.sqlite.get_system_spec") as get_speck_mock:
get_speck_mock.return_value = dict(raiden="1.2.3")
store = SerializedSQLiteStorage(":memory:", None)
store.log_run()
cursor = store.conn.cursor()
cursor.execute("SELECT started_at, raiden_version FROM runs")
run = cursor.fetchone()
now = datetime.utcnow()
assert now - timedelta(seconds=2) <= run[0] <= now, f"{run[0]} not right before {now}"
assert run[1] == "1.2.3"
def test_batch_query_state_changes():
storage = SQLiteStorage(":memory:")
# Add the v18 state changes to the DB
state_changes_file = Path(__file__).parent / "storage/migrations/data/v18_statechanges.json"
state_changes_data = json.loads(state_changes_file.read_text())
for state_change_record in state_changes_data:
storage.write_state_change(
state_change=json.dumps(state_change_record[1]),
log_time=datetime.utcnow().isoformat(timespec="milliseconds"),
)
# Test that querying the state changes in batches of 10 works
state_changes_num = 87
state_changes = []
for state_changes_batch in storage.batch_query_state_changes(batch_size=10):
state_changes.extend(state_changes_batch)
assert len(state_changes) == state_changes_num
for i in range(1, 87):
assert state_changes[i - 1].state_change_identifier == i
# Test that we can also add a filter
state_changes = []
state_changes_batch_query = storage.batch_query_state_changes(
batch_size=10, filters=[("_type", "raiden.transfer.state_change.Block")]
)
for state_changes_batch in state_changes_batch_query:
state_changes.extend(state_changes_batch)
assert len(state_changes) == 77
# Test that filter works with logical or and a wildmark too
state_changes = []
state_changes_batch_query = storage.batch_query_state_changes(
batch_size=10,
filters=[
# Should be 5 of them
("_type", "raiden.transfer.state_change.ContractReceiveChannel%"),
# Should be only 1
("_type", "raiden.transfer.state_change.ContractReceiveNewPaymentNetwork"),
],
logical_and=False,
)
for state_changes_batch in state_changes_batch_query:
state_changes.extend(state_changes_batch)
assert len(state_changes) == 6
def test_batch_query_event_records():
storage = SQLiteStorage(":memory:")
# Add the v18 state changes to the DB (need them to satisfy foreign key constraints)
state_changes_file = Path(__file__).parent / "storage/migrations/data/v18_statechanges.json"
state_changes_data = json.loads(state_changes_file.read_text())
for state_change_record in state_changes_data:
storage.write_state_change(
state_change=json.dumps(state_change_record[1]),
log_time=datetime.utcnow().isoformat(timespec="milliseconds"),
)
# Add the v18 events to the DB
events_file = Path(__file__).parent / "storage/migrations/data/v18_events.json"
events_data = json.loads(events_file.read_text())
for event in events_data:
state_change_identifier = event[1]
event_data = json.dumps(event[2])
log_time = datetime.utcnow().isoformat(timespec="milliseconds")
event_tuple = (None, state_change_identifier, log_time, event_data)
storage.write_events([event_tuple])
# Test that querying the events in batches of 1 works
events = []
for events_batch in storage.batch_query_event_records(batch_size=1):
events.extend(events_batch)
assert len(events) == 3
# Test that we can also add a filter
events = []
events_batch_query = storage.batch_query_event_records(
batch_size=1, filters=[("_type", "raiden.transfer.events.EventPaymentReceivedSuccess")]
)
for events_batch in events_batch_query:
events.extend(events_batch)
assert len(events) == 1
event_type = json.loads(events[0].data)["_type"]
assert event_type == "raiden.transfer.events.EventPaymentReceivedSuccess"
# Test that we can also add a filter with logical OR
events = []
events_batch_query = storage.batch_query_event_records(
batch_size=1,
filters=[
("_type", "raiden.transfer.events.EventPaymentReceivedSuccess"),
("_type", "raiden.transfer.events.ContractSendChannelSettle"),
],
logical_and=False,
)
for events_batch in events_batch_query:
events.extend(events_batch)
assert len(events) == 2
| hackaugusto/raiden | raiden/tests/unit/test_sqlite.py | Python | mit | 16,362 |
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
minecraft_username = models.CharField(max_length=120, blank=True)
referred_by = models.CharField(max_length=120, blank=True) # This should be Minecraft username.
granted_access = models.BooleanField(default=False)
suspended_until = models.DateTimeField(blank=True, null=True)
suspended_reason = models.CharField(max_length=140, blank=True)
is_operator = models.BooleanField(default=False)
can_invite_new_users = models.BooleanField(default=False)
reddit_username = models.CharField(max_length=120, blank=True, default='')
reddit_access_granted = models.BooleanField(default=False)
| weegeekps/minecraft-gatekeeper | MinecraftGatekeeper/RootSite/models.py | Python | apache-2.0 | 727 |
from __future__ import unicode_literals, print_function
from kivy.factory import Factory
def widget_from_json(json_dict):
#Conversion step as python2 kivy does not accept Property names in unicode
args = json_dict['args']
new_dict = {}
for each in args:
new_dict[str(each)] = args[each]
return getattr(Factory, json_dict['type'])(**new_dict)
| Kovak/KivySurvey | kivy_survey/jsontowidget.py | Python | mit | 351 |
import simplejson as json
from flask import current_app as app, request
from flask.json import dumps
class ExtensibleJSONEncoder(json.JSONEncoder):
"""A JSON encoder that returns the to_json method if present"""
def default(self, obj):
if hasattr(obj, 'to_json'):
return obj.to_json()
return super(ExtensibleJSONEncoder, self).default(obj)
def jsonify(*args, **kwargs):
"""
Returns a json response
"""
data = None
indent = not request.is_xhr
status = kwargs.pop('_status_code', 200)
if args:
data = args[0] if len(args) == 1 else args
if kwargs:
if data:
if type(data) != list:
data = [data]
data.append(dict(**kwargs))
else:
data = dict(**kwargs)
return app.response_class(dumps(data, indent=indent), status=status,
mimetype='application/json') | oregoncountryfair/ocfnet | ocfnet/util.py | Python | mit | 908 |
"""
Django settings for tutorial project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from unipath import Path
BASE_DIR = Path(__file__).ancestor(2)
print BASE_DIR
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$2l8fanigl1a(e@s%9$(+aohy&#_ps!l$u&1&nl0-6aoon$3%0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
BASE_DIR.child("templates"),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tutorial.urls'
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| python-uni/tutorial-django | tutorial/tutorial/settings.py | Python | mit | 2,574 |
"""Implement the rules of each C++ build utility type."""
import os
import logging
import shared_utils as su
class CplusplusCommon(object):
"""Common C++ handler functions."""
@classmethod
def _get_cc_all_link_libs(cls, rule_details, details_map):
"""Get all link libraries for a C++ build rule."""
if su.CC_LIB_TYPE == rule_details[su.TYPE_KEY]:
return [], []
# Note: It might be a good idea to combine the link libraries to an archive
# file before linking. Linker picks up only used .o files from .a files.
# However, we want developers to keep dependencies thin (i.e. stop
# including unused dependencies). If that happens, then building the
# archive file is an unnecessary additional step.
# Ignore system dependencies if they are already specified in
# compiler parameters.
rule_details[su.SYS_DEPS_KEY] = [
item for item in rule_details.get(su.SYS_DEPS_KEY, [])
if item not in rule_details[su.COMPILE_PARAMS_KEY]]
all_link_libs = []
link_libs, sys_deps = [], []
for rule_symbol in rule_details[su.ALL_DEPS_KEY]:
sys_deps.extend(details_map[rule_symbol].get(su.SYS_DEPS_KEY, []))
if rule_symbol == rule_details[su.SYMBOL_KEY]:
continue
if details_map[rule_symbol][su.TYPE_KEY] == su.CC_LIB_TYPE:
link_libs.append(details_map[rule_symbol][su.OUT_KEY])
sys_deps = sorted(list(set(sys_deps)))
# Linked libs are the .o files from dependency lib rule outputs.
assert len(set(link_libs)) == len(link_libs)
assert all([l.endswith('.o') for l in link_libs])
# Precompiled linked libs are picked only from current dependency.
pc_link_libs = rule_details[su.PC_DEPS_KEY]
assert len(set(pc_link_libs)) == len(pc_link_libs)
link_libs.extend(pc_link_libs)
all_link_libs.extend(link_libs)
all_link_libs.extend(sys_deps)
return link_libs, all_link_libs
@classmethod
def _set_compile_command(cls, rule_details, proto_sources, all_link_libs):
"""Sets C++ compile command."""
compile_command = su.CC_COMPILER.split()
compile_command.append('-I.')
for other_dir in rule_details.get(su.OTHER_INCLUDE_DIRS, []):
compile_command.append('-I{}'.format(other_dir))
compile_command.extend(rule_details[su.COMPILE_PARAMS_KEY])
compile_command.extend(
[su.get_relative_path(rule_details[su.POSSIBLE_PREFIXES_KEY], f)
for f in rule_details[su.SRCS_KEY]])
compile_command.extend(
[su.get_relative_path(rule_details[su.POSSIBLE_PREFIXES_KEY], f)
for f in proto_sources])
compile_command.extend(all_link_libs)
compile_command.extend(['-o', rule_details[su.OUT_KEY]])
rule_details[su.COMPILE_COMMAND_KEY] = compile_command
@classmethod
def _set_pc_deps(cls, rule_details, extra_pc_deps):
"""Set precompiled dependency flags."""
pc_deps = rule_details.get(su.PC_DEPS_KEY, [])
pc_deps.extend(extra_pc_deps or [])
pc_deps = sorted(list(set(su.expand_env_vars(d) for d in pc_deps)))
rule_details[su.PC_DEPS_KEY] = pc_deps
@classmethod
def _set_possible_prefixes(cls, rule_details, details_map):
"""Set possible prefixes list for copying files in working directory."""
possible_prefixes = []
for rule_symbol in rule_details[su.ALL_DEPS_KEY]:
other_rule_details = details_map[rule_symbol]
if other_rule_details[su.TYPE_KEY] != su.CC_PROTO_LIB_TYPE:
continue
# If C++ module has proto dependencies, then the generated pb.cc and
# pb.h files need to be copied in locally.
possible_prefixes.extend([other_rule_details[su.WDIR_KEY]])
rule_details[su.POSSIBLE_PREFIXES_KEY] = su.prefix_transform(
possible_prefixes)
@classmethod
def _get_all_direct_srcs(cls, rule_details, details_map):
"""Gets all direct requirements."""
all_hdrs = []
for rule_symbol in rule_details[su.ALL_DEPS_KEY]:
all_hdrs.extend(details_map[rule_symbol][su.HDRS_KEY])
all_srcs = []
all_srcs.extend(rule_details[su.SRCS_KEY])
all_srcs.extend(all_hdrs)
return all_srcs
@classmethod
def _get_proto_info(cls, rule_details, details_map):
"""Get information from proto dependencies."""
proto_sources = []
proto_dep_paths = []
for rule_symbol in rule_details[su.ALL_DEPS_KEY]:
other_rule_details = details_map[rule_symbol]
if other_rule_details[su.TYPE_KEY] != su.CC_PROTO_LIB_TYPE:
continue
proto_sources.append(other_rule_details[su.OUT_CC_KEY])
proto_dep_paths.append(other_rule_details[su.OUT_CC_KEY])
proto_dep_paths.append(other_rule_details[su.OUT_HEADER_KEY])
return proto_sources, proto_dep_paths
@classmethod
def _set_all_dep_paths(cls, rule_details, link_libs, proto_dep_paths):
"""Set all dependency paths used for build state check."""
all_dep_paths = rule_details[su.ALL_SRCS_KEY][:]
all_dep_paths.extend(link_libs)
all_dep_paths.append(rule_details[su.OUT_KEY])
all_dep_paths.extend(proto_dep_paths)
rule_details[su.ALL_DEP_PATHS_KEY].extend(list(set(all_dep_paths)))
@classmethod
def _internal_setup(cls, rule_details, details_map, init_params):
"""Initializing build rule dictionary."""
out_file, is_test, extra_pc_deps, compile_params = init_params
su.init_rule_common(rule_details, out_file, [su.SRCS_KEY, su.HDRS_KEY])
compile_params.extend(rule_details.get(su.COMPILE_PARAMS_KEY, []))
rule_details[su.COMPILE_PARAMS_KEY] = compile_params
cls._set_pc_deps(rule_details, extra_pc_deps)
if not rule_details[su.SRCS_KEY]:
rule_details[su.SRCS_KEY] = [su.DUMMY_CC]
if is_test:
rule_details[su.TEST_COMMANDS_KEY] = [[rule_details[su.OUT_KEY]]]
cls._set_possible_prefixes(rule_details, details_map)
link_libs, all_link_libs = cls._get_cc_all_link_libs(
rule_details, details_map)
all_srcs = cls._get_all_direct_srcs(rule_details, details_map)
rule_details[su.ALL_SRCS_KEY] = all_srcs
proto_sources, proto_dep_paths = cls._get_proto_info(
rule_details, details_map)
rule_details[su.PROTO_SRCS_KEY] = proto_sources
cls._set_all_dep_paths(rule_details, link_libs, proto_dep_paths)
cls._set_compile_command(rule_details, proto_sources, all_link_libs)
@classmethod
def build_commands(cls, rule_details):
"""Generate build command line."""
# Build system does not rebuild a rule if the file cache is current. We use
# this optimization to perform a delayed calculation of the build command
# list.
logging.info('Emitting %s at %s', rule_details[su.TYPE_KEY],
su.log_normalize(rule_details[su.OUT_KEY]))
directory_list = [rule_details[su.OUTDIR_KEY], rule_details[su.WDIR_KEY]]
command_list = [su.get_mkdir_command(d) for d in directory_list]
command_list.append([su.CHANGE_CURR_DIR, rule_details[su.WDIR_KEY]])
command_list.extend(su.cp_commands_list(rule_details, su.ALL_SRCS_KEY))
command_list.extend(su.cp_commands_list(rule_details, su.PROTO_SRCS_KEY))
command_list.append(rule_details[su.COMPILE_COMMAND_KEY])
return command_list
class CplusplusLibrary(CplusplusCommon):
"""Handler class for CC lib build rules."""
@classmethod
def setup(cls, rule_details, details_map):
"""Do full setup."""
init_params = (
'{}.o'.format(rule_details[su.NAME_KEY]), False, None, ['-c'])
cls._internal_setup(rule_details, details_map, init_params)
class CplusplusBinary(CplusplusCommon):
"""Handler class for CC binary build rules."""
@classmethod
def setup(cls, rule_details, details_map):
"""Do full setup."""
init_params = (rule_details[su.NAME_KEY], False, None, [])
cls._internal_setup(rule_details, details_map, init_params)
class CplusplusTest(CplusplusCommon):
"""Handler class for CC test build rules."""
@classmethod
def setup(cls, rule_details, details_map):
"""Do full setup."""
init_params = (
rule_details[su.NAME_KEY], True,
['env.GTEST_MAIN_LIB', 'env.GTEST_MOCK_LIB'],
['-isystem', os.path.join(su.GMOCK_DIR, 'include'), '-isystem',
os.path.join(su.GTEST_DIR, 'include'), '-pthread'])
cls._internal_setup(rule_details, details_map, init_params)
| jkumarrf/mool | build_tool/bu.scripts/cc_common.py | Python | bsd-3-clause | 8,229 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("GradientBoostingClassifier" , "FourClass_10" , "mssql")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_10/ws_FourClass_10_GradientBoostingClassifier_mssql_code_gen.py | Python | bsd-3-clause | 153 |
from django.contrib import admin
from duelify_app.models import Ring, Punch, Category, User
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
#from customauth.models import User
admin.site.register(Category)
class TinyMCEAdmin(admin.ModelAdmin):
class Media:
js = ('/static/tiny_mce/tiny_mce.js', )
admin.site.register(Punch, TinyMCEAdmin)
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'date_of_birth', 'location', 'browser')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class MyUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'date_of_birth', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('first_name', 'last_name','date_of_birth', 'location', 'browser', 'score')}),
('Permissions', {'fields': ('is_admin',)}),
('Important dates', {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('first_name', 'last_name','email', 'date_of_birth', 'location', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
# Now register the new UserAdmin...
admin.site.register(User, MyUserAdmin)
# ... and, since we're not using Django's builtin permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
class RingAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('category',)}
admin.site.register(Ring, RingAdmin)
| houmie/duelify | duelify_app/admin.py | Python | gpl-2.0 | 3,440 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.refrigeration import RefrigerationCondenserWaterCooled
log = logging.getLogger(__name__)
class TestRefrigerationCondenserWaterCooled(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_refrigerationcondenserwatercooled(self):
pyidf.validation_level = ValidationLevel.error
obj = RefrigerationCondenserWaterCooled()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_rated_effective_total_heat_rejection_rate = 0.0001
obj.rated_effective_total_heat_rejection_rate = var_rated_effective_total_heat_rejection_rate
# real
var_rated_condensing_temperature = 0.0001
obj.rated_condensing_temperature = var_rated_condensing_temperature
# real
var_rated_subcooling_temperature_difference = 0.0
obj.rated_subcooling_temperature_difference = var_rated_subcooling_temperature_difference
# real
var_rated_water_inlet_temperature = 0.0001
obj.rated_water_inlet_temperature = var_rated_water_inlet_temperature
# node
var_water_inlet_node_name = "node|Water Inlet Node Name"
obj.water_inlet_node_name = var_water_inlet_node_name
# node
var_water_outlet_node_name = "node|Water Outlet Node Name"
obj.water_outlet_node_name = var_water_outlet_node_name
# alpha
var_watercooled_loop_flow_type = "VariableFlow"
obj.watercooled_loop_flow_type = var_watercooled_loop_flow_type
# object-list
var_water_outlet_temperature_schedule_name = "object-list|Water Outlet Temperature Schedule Name"
obj.water_outlet_temperature_schedule_name = var_water_outlet_temperature_schedule_name
# real
var_water_design_flow_rate = 0.0001
obj.water_design_flow_rate = var_water_design_flow_rate
# real
var_water_maximum_flow_rate = 0.0001
obj.water_maximum_flow_rate = var_water_maximum_flow_rate
# real
var_water_maximum_water_outlet_temperature = 35.0
obj.water_maximum_water_outlet_temperature = var_water_maximum_water_outlet_temperature
# real
var_water_minimum_water_inlet_temperature = 20.0
obj.water_minimum_water_inlet_temperature = var_water_minimum_water_inlet_temperature
# alpha
var_enduse_subcategory = "End-Use Subcategory"
obj.enduse_subcategory = var_enduse_subcategory
# real
var_condenser_refrigerant_operating_charge_inventory = 15.15
obj.condenser_refrigerant_operating_charge_inventory = var_condenser_refrigerant_operating_charge_inventory
# real
var_condensate_receiver_refrigerant_inventory = 16.16
obj.condensate_receiver_refrigerant_inventory = var_condensate_receiver_refrigerant_inventory
# real
var_condensate_piping_refrigerant_inventory = 17.17
obj.condensate_piping_refrigerant_inventory = var_condensate_piping_refrigerant_inventory
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.refrigerationcondenserwatercooleds[0].name, var_name)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].rated_effective_total_heat_rejection_rate, var_rated_effective_total_heat_rejection_rate)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].rated_condensing_temperature, var_rated_condensing_temperature)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].rated_subcooling_temperature_difference, var_rated_subcooling_temperature_difference)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].rated_water_inlet_temperature, var_rated_water_inlet_temperature)
self.assertEqual(idf2.refrigerationcondenserwatercooleds[0].water_inlet_node_name, var_water_inlet_node_name)
self.assertEqual(idf2.refrigerationcondenserwatercooleds[0].water_outlet_node_name, var_water_outlet_node_name)
self.assertEqual(idf2.refrigerationcondenserwatercooleds[0].watercooled_loop_flow_type, var_watercooled_loop_flow_type)
self.assertEqual(idf2.refrigerationcondenserwatercooleds[0].water_outlet_temperature_schedule_name, var_water_outlet_temperature_schedule_name)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].water_design_flow_rate, var_water_design_flow_rate)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].water_maximum_flow_rate, var_water_maximum_flow_rate)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].water_maximum_water_outlet_temperature, var_water_maximum_water_outlet_temperature)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].water_minimum_water_inlet_temperature, var_water_minimum_water_inlet_temperature)
self.assertEqual(idf2.refrigerationcondenserwatercooleds[0].enduse_subcategory, var_enduse_subcategory)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].condenser_refrigerant_operating_charge_inventory, var_condenser_refrigerant_operating_charge_inventory)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].condensate_receiver_refrigerant_inventory, var_condensate_receiver_refrigerant_inventory)
self.assertAlmostEqual(idf2.refrigerationcondenserwatercooleds[0].condensate_piping_refrigerant_inventory, var_condensate_piping_refrigerant_inventory) | rbuffat/pyidf | tests/test_refrigerationcondenserwatercooled.py | Python | apache-2.0 | 5,826 |
# This file is executed by __init__.py and ppimport hooks.
"""
Discrete Fourier Transform algorithms
=====================================
Fast Fourier Transforms:
fft --- FFT of arbitrary type periodic sequences
ifft --- Inverse of fft
fftn --- Multi-dimensional FFT
ifftn --- Inverse of fftn
fft2 --- Two-dimensional FFT
ifft2 --- Inverse of fft2
rfft --- FFT of real periodic sequences
irfft --- Inverse of rfft
Differential and pseudo-differential operators:
diff --- Differentiation and integration of periodic sequences
tilbert --- Tilbert transform: cs_diff(x,h,h)
itilbert --- Inverse Tilbert transform: sc_diff(x,h,h)
hilbert --- Hilbert transform: cs_diff(x,inf,inf)
ihilbert --- Inverse Hilbert transform: sc_diff(x,inf,inf)
cs_diff --- cosh/sinh pseudo-derivative of periodic sequences
sc_diff --- sinh/cosh pseudo-derivative of periodic sequences
ss_diff --- sinh/sinh pseudo-derivative of periodic sequences
cc_diff --- cosh/cosh pseudo-derivative of periodic sequences
shift --- Shift periodic sequences
Helper functions:
fftshift --- Shift zero-frequency component to center of spectrum
ifftshift --- Inverse of freqshift
dftfreq --- DFT sample frequencies
rfftfreq --- DFT sample frequencies (specific to rfft,irfft)
Extension modules:
_fftpack --- Provides functions zfft, drfft, zrfft, zfftnd,
destroy_*_cache
convolve --- Provides functions convolve, convolve_z,
init_convolution_kernel, destroy_convolve_cache
"""
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2',
'diff',
'tilbert','itilbert','hilbert','ihilbert',
'sc_diff','cs_diff','cc_diff','ss_diff',
'shift',
'rfftfreq'
]
if __doc__:
__doc_title__ = __doc__.lstrip().split('\n',1)[0]
else:
__doc_title__ = None
postpone_import = 1
global_symbols = ['fft','fftn','fft2','ifft','ifft2','ifftn',
'fftshift','ifftshift','fftfreq']
| huard/scipy-work | scipy/fftpack/info.py | Python | bsd-3-clause | 2,104 |
#!/usr/bin/python
import sys
def spam(divideBy):
try:
return 42 / divideBy
except ZeroDivisionError:
return sys.maxsize
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
| nibaozhu/project_x | src/test/p1/33.py | Python | unlicense | 190 |
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016, 2017
class DataAlreadyExistsError(RuntimeError):
def __init__(self, label):
self.message = str("Data with label '%s' already exists and cannot be added" % (label))
def get_patient_id(d):
return d['patient']['identifier']
def get_index_by_label(d, label):
for idx in range(len(d['data'])):
if d['data'][idx]['label'] == label:
return idx
return None
def get_sampled_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['values']
def get_coordinate_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueCoordinateData']['values']
def get_period_value(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['value']
def get_sampled_data_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['unit']
def get_period_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['unit']
def get_gain(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['gain']
def get_initValue(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['initVal']
def get_patient_ID(d):
return d['patient']['identifier']
def add_sampled_data(d, label, sampled_data, period_value, period_unit, update_if_exists=False):
# check if label already exists
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if update_if_exists == True:
v = {'valuesSampledData' : { 'values' : sampled_data, 'period' : { 'value' : period_value, 'unit' : period_unit }}}
d['data'][data_idx] = v
else:
raise DataAlreadyExistsError(label=label)
else:
v = {'label' : label, 'valuesSampledData' : { 'values' : sampled_data, 'period' : { 'value' : period_value, 'unit' : period_unit }}}
d['data'].append(v)
def add_coordinate_data(d, label, coords, replace_if_exists=False):
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if replace_if_exists == True:
v = {'valueCoordinateData' : {'values' : coords}}
d['data'][data_idx] = v
else:
raise DataAlreadyExistsError(label=label)
else:
v = {'label' : label, 'valueCoordinateData' : {'values' : coords}}
d['data'].append(v)
| IBMStreams/streamsx.health | samples/HealthcareJupyterDemo/package/healthdemo/utils.py | Python | apache-2.0 | 2,562 |
"""
WSGI config for chatter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chatter.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| scott-w/pyne-django-tutorial | chatter/chatter/wsgi.py | Python | mit | 389 |
class Solution(object):
def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# sliding window
max_len = 0
li = 0
ri = 0
l = 0
inserted = False
for ri, rval in enumerate(nums):
if rval == 1:
l += 1
max_len = max(max_len, l)
else:
if not inserted:
inserted = True
l += 1
max_len = max(max_len, l)
else:
while nums[li] == 1:
li += 1
li += 1
l = ri-li+1
max_len = max(max_len, l)
return max_len | daicang/Leetcode-solutions | 487-max-consecutive-ones-ii.py | Python | mit | 773 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Provides a great number of useful utility functions IRC. Things to muck around
with hostmasks, set bold or color on strings, IRC-case-insensitive dicts, a
nick class to handle nicks (so comparisons and hashing and whatnot work in an
IRC-case-insensitive fashion), and numerous other things.
"""
import re
import time
import random
import string
import textwrap
from cStringIO import StringIO as sio
import supybot.utils as utils
def debug(s, *args):
"""Prints a debug string. Most likely replaced by our logging debug."""
print '***', s % args
def isUserHostmask(s):
"""Returns whether or not the string s is a valid User hostmask."""
p1 = s.find('!')
p2 = s.find('@')
p3 = s.find('$')
if p1 < p2-1 and p1 >= 1 and p2 >= 3 and len(s) > p2+1 or p3 != -1:
return True
else:
return False
def isServerHostmask(s):
"""s => bool
Returns True if s is a valid server hostmask."""
return not isUserHostmask(s)
def nickFromHostmask(hostmask):
"""hostmask => nick
Returns the nick from a user hostmask."""
assert isUserHostmask(hostmask)
return hostmask.split('!', 1)[0]
def userFromHostmask(hostmask):
"""hostmask => user
Returns the user from a user hostmask."""
assert isUserHostmask(hostmask)
return hostmask.split('!', 1)[1].split('@', 1)[0]
def hostFromHostmask(hostmask):
"""hostmask => host
Returns the host from a user hostmask."""
assert isUserHostmask(hostmask)
return hostmask.split('@', 1)[1]
def splitHostmask(hostmask):
"""hostmask => (nick, user, host)
Returns the nick, user, host of a user hostmask."""
assert isUserHostmask(hostmask)
nick, rest = hostmask.split('!', 1)
user, host = rest.split('@', 1)
return (nick, user, host)
def joinHostmask(nick, ident, host):
"""nick, user, host => hostmask
Joins the nick, ident, host into a user hostmask."""
assert nick and ident and host
return '%s!%s@%s' % (nick, ident, host)
_rfc1459trans = string.maketrans(string.ascii_uppercase + r'\[]~',
string.ascii_lowercase + r'|{}^')
def toLower(s, casemapping=None):
"""s => s
Returns the string s lowered according to IRC case rules."""
if casemapping is None or casemapping == 'rfc1459':
return s.translate(_rfc1459trans)
elif casemapping == 'ascii': # freenode
return s.lower()
else:
raise ValueError, 'Invalid casemapping: %r' % casemapping
def strEqual(nick1, nick2):
"""s1, s2 => bool
Returns True if nick1 == nick2 according to IRC case rules."""
assert isinstance(nick1, basestring)
assert isinstance(nick2, basestring)
return toLower(nick1) == toLower(nick2)
nickEqual = strEqual
_nickchars = r'[]\`_^{|}'
nickRe = re.compile(r'^[A-Za-z%s][-0-9A-Za-z%s]*$'
% (re.escape(_nickchars), re.escape(_nickchars)))
def isNick(s, strictRfc=True, nicklen=None):
"""s => bool
Returns True if s is a valid IRC nick."""
if strictRfc:
ret = bool(nickRe.match(s))
if ret and nicklen is not None:
ret = len(s) <= nicklen
return ret
else:
return not isChannel(s) and \
not isUserHostmask(s) and \
not ' ' in s and not '!' in s
def isChannel(s, chantypes='#&+!', channellen=50):
"""s => bool
Returns True if s is a valid IRC channel name."""
return s and \
',' not in s and \
'\x07' not in s and \
s[0] in chantypes and \
len(s) <= channellen and \
len(s.split(None, 1)) == 1
_patternCache = utils.structures.CacheDict(1000)
def _hostmaskPatternEqual(pattern, hostmask):
try:
return _patternCache[pattern](hostmask) is not None
except KeyError:
# We make our own regexps, rather than use fnmatch, because fnmatch's
# case-insensitivity is not IRC's case-insensitity.
fd = sio()
for c in pattern:
if c == '*':
fd.write('.*')
elif c == '?':
fd.write('.')
elif c in '[{':
fd.write('[[{]')
elif c in '}]':
fd.write(r'[}\]]')
elif c in '|\\':
fd.write(r'[|\\]')
elif c in '^~':
fd.write('[~^]')
else:
fd.write(re.escape(c))
fd.write('$')
f = re.compile(fd.getvalue(), re.I).match
_patternCache[pattern] = f
return f(hostmask) is not None
_hostmaskPatternEqualCache = utils.structures.CacheDict(1000)
def hostmaskPatternEqual(pattern, hostmask):
"""pattern, hostmask => bool
Returns True if hostmask matches the hostmask pattern pattern."""
try:
return _hostmaskPatternEqualCache[(pattern, hostmask)]
except KeyError:
b = _hostmaskPatternEqual(pattern, hostmask)
_hostmaskPatternEqualCache[(pattern, hostmask)] = b
return b
def banmask(hostmask):
"""Returns a properly generic banning hostmask for a hostmask.
>>> banmask('nick!user@host.domain.tld')
'*!*@*.domain.tld'
>>> banmask('nick!user@10.0.0.1')
'*!*@10.0.0.*'
"""
assert isUserHostmask(hostmask)
host = hostFromHostmask(hostmask)
if utils.net.isIP(host):
L = host.split('.')
L[-1] = '*'
return '*!*@' + '.'.join(L)
elif utils.net.isIPV6(host):
L = host.split(':')
L[-1] = '*'
return '*!*@' + ':'.join(L)
else:
if '.' in host:
return '*!*@*%s' % host[host.find('.'):]
else:
return '*!*@' + host
_plusRequireArguments = 'fjklvobqeI'
_minusRequireArguments = 'fjklvobqeI'
def separateModes(args):
"""Separates modelines into single mode change tuples. Basically, you
should give it the .args of a MODE IrcMsg.
Examples:
>>> separateModes(['+ooo', 'jemfinch', 'StoneTable', 'philmes'])
[('+o', 'jemfinch'), ('+o', 'StoneTable'), ('+o', 'philmes')]
>>> separateModes(['+o-o', 'jemfinch', 'PeterB'])
[('+o', 'jemfinch'), ('-o', 'PeterB')]
>>> separateModes(['+s-o', 'test'])
[('+s', None), ('-o', 'test')]
>>> separateModes(['+sntl', '100'])
[('+s', None), ('+n', None), ('+t', None), ('+l', 100)]
"""
if not args:
return []
modes = args[0]
assert modes[0] in '+-', 'Invalid args: %r' % args
args = list(args[1:])
ret = []
for c in modes:
if c in '+-':
last = c
else:
if last == '+':
requireArguments = _plusRequireArguments
else:
requireArguments = _minusRequireArguments
if c in requireArguments:
arg = args.pop(0)
try:
arg = int(arg)
except ValueError:
pass
ret.append((last + c, arg))
else:
ret.append((last + c, None))
return ret
def joinModes(modes):
"""[(mode, targetOrNone), ...] => args
Joins modes of the same form as returned by separateModes."""
args = []
modeChars = []
currentMode = '\x00'
for (mode, arg) in modes:
if arg is not None:
args.append(arg)
if not mode.startswith(currentMode):
currentMode = mode[0]
modeChars.append(mode[0])
modeChars.append(mode[1])
args.insert(0, ''.join(modeChars))
return args
def bold(s):
"""Returns the string s, bolded."""
return '\x02%s\x02' % s
def reverse(s):
"""Returns the string s, reverse-videoed."""
return '\x16%s\x16' % s
def underline(s):
"""Returns the string s, underlined."""
return '\x1F%s\x1F' % s
# Definition of mircColors dictionary moved below because it became an IrcDict.
def mircColor(s, fg=None, bg=None):
"""Returns s with the appropriate mIRC color codes applied."""
if fg is None and bg is None:
return s
elif bg is None:
fg = mircColors[str(fg)]
return '\x03%s%s\x03' % (fg.zfill(2), s)
elif fg is None:
bg = mircColors[str(bg)]
# According to the mirc color doc, a fg color MUST be specified if a
# background color is specified. So, we'll specify 00 (white) if the
# user doesn't specify one.
return '\x0300,%s%s\x03' % (bg.zfill(2), s)
else:
fg = mircColors[str(fg)]
bg = mircColors[str(bg)]
# No need to zfill fg because the comma delimits.
return '\x03%s,%s%s\x03' % (fg, bg.zfill(2), s)
def canonicalColor(s, bg=False, shift=0):
"""Assigns an (fg, bg) canonical color pair to a string based on its hash
value. This means it might change between Python versions. This pair can
be used as a *parameter to mircColor. The shift parameter is how much to
right-shift the hash value initially.
"""
h = hash(s) >> shift
fg = h % 14 + 2 # The + 2 is to rule out black and white.
if bg:
bg = (h >> 4) & 3 # The 5th, 6th, and 7th least significant bits.
if fg < 8:
bg += 8
else:
bg += 2
return (fg, bg)
else:
return (fg, None)
def stripBold(s):
"""Returns the string s, with bold removed."""
return s.replace('\x02', '')
_stripColorRe = re.compile(r'\x03(?:\d{1,2},\d{1,2}|\d{1,2}|,\d{1,2}|)')
def stripColor(s):
"""Returns the string s, with color removed."""
return _stripColorRe.sub('', s)
def stripReverse(s):
"""Returns the string s, with reverse-video removed."""
return s.replace('\x16', '')
def stripUnderline(s):
"""Returns the string s, with underlining removed."""
return s.replace('\x1f', '').replace('\x1F', '')
def stripFormatting(s):
"""Returns the string s, with all formatting removed."""
# stripColor has to go first because of some strings, check the tests.
s = stripColor(s)
s = stripBold(s)
s = stripReverse(s)
s = stripUnderline(s)
return s.replace('\x0f', '').replace('\x0F', '')
class FormatContext(object):
def __init__(self):
self.reset()
def reset(self):
self.fg = None
self.bg = None
self.bold = False
self.reverse = False
self.underline = False
def start(self, s):
"""Given a string, starts all the formatters in this context."""
if self.bold:
s = '\x02' + s
if self.reverse:
s = '\x16' + s
if self.underline:
s = '\x1f' + s
if self.fg is not None or self.bg is not None:
s = mircColor(s, fg=self.fg, bg=self.bg)[:-1] # Remove \x03.
return s
def end(self, s):
"""Given a string, ends all the formatters in this context."""
if self.bold or self.reverse or \
self.fg or self.bg or self.underline:
# Should we individually end formatters?
s += '\x0f'
return s
class FormatParser(object):
def __init__(self, s):
self.fd = sio(s)
self.last = None
def getChar(self):
if self.last is not None:
c = self.last
self.last = None
return c
else:
return self.fd.read(1)
def ungetChar(self, c):
self.last = c
def parse(self):
context = FormatContext()
c = self.getChar()
while c:
if c == '\x02':
context.bold = not context.bold
elif c == '\x16':
context.reverse = not context.reverse
elif c == '\x1f':
context.underline = not context.underline
elif c == '\x0f':
context.reset()
elif c == '\x03':
self.getColor(context)
c = self.getChar()
return context
def getInt(self):
i = 0
setI = False
c = self.getChar()
while c.isdigit() and i < 100:
setI = True
i *= 10
i += int(c)
c = self.getChar()
self.ungetChar(c)
if setI:
return i
else:
return None
def getColor(self, context):
context.fg = self.getInt()
c = self.getChar()
if c == ',':
context.bg = self.getInt()
def wrap(s, length):
processed = []
chunks = textwrap.wrap(s, length)
context = None
for chunk in chunks:
if context is not None:
chunk = context.start(chunk)
context = FormatParser(chunk).parse()
processed.append(context.end(chunk))
return processed
def isValidArgument(s):
"""Returns whether s is strictly a valid argument for an IRC message."""
return '\r' not in s and '\n' not in s and '\x00' not in s
def safeArgument(s):
"""If s is unsafe for IRC, returns a safe version."""
if isinstance(s, unicode):
s = s.encode('utf-8')
elif not isinstance(s, basestring):
debug('Got a non-string in safeArgument: %r', s)
s = str(s)
if isValidArgument(s):
return s
else:
return repr(s)
def replyTo(msg):
"""Returns the appropriate target to send responses to msg."""
if isChannel(msg.args[0]):
return msg.args[0]
else:
return msg.nick
def dccIP(ip):
"""Returns in IP in the proper for DCC."""
assert utils.net.isIP(ip), \
'argument must be a string ip in xxx.yyy.zzz.www format.'
i = 0
x = 256**3
for quad in ip.split('.'):
i += int(quad)*x
x /= 256
return i
def unDccIP(i):
"""Takes an integer DCC IP and return a normal string IP."""
assert isinstance(i, (int, long)), '%r is not an number.' % i
L = []
while len(L) < 4:
L.append(i % 256)
i /= 256
L.reverse()
return '.'.join(utils.iter.imap(str, L))
class IrcString(str):
"""This class does case-insensitive comparison and hashing of nicks."""
def __new__(cls, s=''):
x = super(IrcString, cls).__new__(cls, s)
x.lowered = toLower(x)
return x
def __eq__(self, s):
try:
return toLower(s) == self.lowered
except:
return False
def __ne__(self, s):
return not (self == s)
def __hash__(self):
return hash(self.lowered)
class IrcDict(utils.InsensitivePreservingDict):
"""Subclass of dict to make key comparison IRC-case insensitive."""
def key(self, s):
if s is not None:
s = toLower(s)
return s
class IrcSet(utils.NormalizingSet):
"""A sets.Set using IrcStrings instead of regular strings."""
def normalize(self, s):
return IrcString(s)
def __reduce__(self):
return (self.__class__, (list(self),))
class FloodQueue(object):
timeout = 0
def __init__(self, timeout=None, queues=None):
if timeout is not None:
self.timeout = timeout
if queues is None:
queues = IrcDict()
self.queues = queues
def __repr__(self):
return 'FloodQueue(timeout=%r, queues=%s)' % (self.timeout,
repr(self.queues))
def key(self, msg):
return msg.user + '@' + msg.host
def getTimeout(self):
if callable(self.timeout):
return self.timeout()
else:
return self.timeout
def _getQueue(self, msg, insert=True):
key = self.key(msg)
try:
return self.queues[key]
except KeyError:
if insert:
# python--
# instancemethod.__repr__ calls the instance.__repr__, which
# means that our __repr__ calls self.queues.__repr__, which
# calls structures.TimeoutQueue.__repr__, which calls
# getTimeout.__repr__, which calls our __repr__, which calls...
getTimeout = lambda : self.getTimeout()
q = utils.structures.TimeoutQueue(getTimeout)
self.queues[key] = q
return q
else:
return None
def enqueue(self, msg, what=None):
if what is None:
what = msg
q = self._getQueue(msg)
q.enqueue(what)
def len(self, msg):
q = self._getQueue(msg, insert=False)
if q is not None:
return len(q)
else:
return 0
def has(self, msg, what=None):
q = self._getQueue(msg, insert=False)
if q is not None:
if what is None:
what = msg
for elt in q:
if elt == what:
return True
return False
mircColors = IrcDict({
'white': '0',
'black': '1',
'blue': '2',
'green': '3',
'red': '4',
'brown': '5',
'purple': '6',
'orange': '7',
'yellow': '8',
'light green': '9',
'teal': '10',
'light blue': '11',
'dark blue': '12',
'pink': '13',
'dark grey': '14',
'light grey': '15',
'dark gray': '14',
'light gray': '15',
})
# We'll map integers to their string form so mircColor is simpler.
for (k, v) in mircColors.items():
if k is not None: # Ignore empty string for None.
sv = str(v)
mircColors[sv] = sv
def standardSubstitute(irc, msg, text, env=None):
"""Do the standard set of substitutions on text, and return it"""
if isChannel(msg.args[0]):
channel = msg.args[0]
else:
channel = 'somewhere'
def randInt():
return str(random.randint(-1000, 1000))
def randDate():
t = pow(2,30)*random.random()+time.time()/4.0
return time.ctime(t)
def randNick():
if channel != 'somewhere':
L = list(irc.state.channels[channel].users)
if len(L) > 1:
n = msg.nick
while n == msg.nick:
n = utils.iter.choice(L)
return n
else:
return msg.nick
else:
return 'someone'
ctime = time.ctime()
localtime = time.localtime()
vars = IrcDict({
'who': msg.nick,
'nick': msg.nick,
'user': msg.user,
'host': msg.host,
'channel': channel,
'botnick': irc.nick,
'now': ctime, 'ctime': ctime,
'randnick': randNick, 'randomnick': randNick,
'randdate': randDate, 'randomdate': randDate,
'rand': randInt, 'randint': randInt, 'randomint': randInt,
'today': time.strftime('%d %b %Y', localtime),
'year': localtime[0],
'month': localtime[1],
'monthname': time.strftime('%b', localtime),
'date': localtime[2],
'day': time.strftime('%A', localtime),
'h': localtime[3], 'hr': localtime[3], 'hour': localtime[3],
'm': localtime[4], 'min': localtime[4], 'minute': localtime[4],
's': localtime[5], 'sec': localtime[5], 'second': localtime[5],
'tz': time.tzname[time.daylight],
})
if env is not None:
vars.update(env)
return utils.str.perlVariableSubstitute(vars, text)
if __name__ == '__main__':
import sys, doctest
doctest.testmod(sys.modules['__main__'])
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| jrabbit/ubotu-fr | src/ircutils.py | Python | bsd-3-clause | 20,860 |
"""Package initialisation file"""
__author__ = "Mark Slater <mws@hep.ph.bham.ac.uk>"
__date__ = "10 June 2008"
__version__ = "1.0"
from .Remote import Remote
| ganga-devs/ganga | ganga/GangaCore/Lib/Remote/__init__.py | Python | gpl-3.0 | 161 |
# Made by Mr. - Version 0.3 by DrLecter
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "320_BonesTellFuture"
BONE_FRAGMENT = 809
ADENA = 57
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "30359-04.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if id == CREATED :
st.setState(STARTING)
st.set("cond","0")
if st.getInt("cond")==0 :
if player.getRace().ordinal() != 2 :
htmltext = "30359-00.htm"
st.exitQuest(1)
elif player.getLevel() >= 10 :
htmltext = "30359-03.htm"
else:
htmltext = "30359-02.htm"
st.exitQuest(1)
else :
if st.getQuestItemsCount(BONE_FRAGMENT)<10 :
htmltext = "30359-05.htm"
else :
htmltext = "30359-06.htm"
st.giveItems(ADENA,8470)
st.takeItems(BONE_FRAGMENT,-1)
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
count=st.getQuestItemsCount(BONE_FRAGMENT)
if count<10 and st.getRandom(10)>7 :
st.giveItems(BONE_FRAGMENT,1)
if count == 9 :
st.playSound("ItemSound.quest_middle")
st.set("cond","2")
else :
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(320,qn,"Bones Tell Future")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30359)
QUEST.addTalkId(30359)
QUEST.addKillId(20517)
QUEST.addKillId(20518)
STARTED.addQuestDrop(20517,BONE_FRAGMENT,1) | zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/320_BonesTellFuture/__init__.py | Python | gpl-3.0 | 2,291 |
import os
import re
from datetime import *
import feedparser
from newspaper import Article,Config
import newspaper
import urllib.request
def writeFile(outPath,content):
file = open(outPath, 'w')
if file:
file.write(content)
file.close()
else:
print ("Error Opening File " + outPath)
def writeHtml(outPath,content,title,link,date,authors,tags):
print('date:authors:tags' + date + authors + tags)
html = '''<!DOCTYPE html>
<html lang="zh-cn">
<head>
<meta charset="utf-8"/>
<title>
'''
html = html + title + '</title>'
if(isinstance(date,datetime)):
date = date.strftime('%Y-%m-%d %H:%M')
if date != '':
html = html + '<meta name="date" content="' + date + '"/>'
if authors != '':
html = html + '<meta name="authors" content="' + authors + '" />'
if tags != '':
html = html + '<meta name="tags" content="' + tags + '" />'
html = html + '</head><body>'
html = html + 'From:<a href=' + link + '>' + link + '</a><br><br>'
html = html + content + '</body></html>'
force = 0
if(force == 0):
if os.path.exists(outPath):
print("The file " + outPath + " is existed, will ignore.")
else:
writeFile(outPath,html)
print("save to:" + outPath)
else:
writeFile(outPath,html)
print("save to:" + outPath)
def getDomain(url):
m = re.search(r'http[s]?://(.*?)/',url)
if m:
return m.group()
else:
return ''
def fixLinks(html,link):
def f(m):
return link + m.group(1)
reobj = re.compile('href="(/.*?)"')
new = reobj.sub(f,html)
return new
def getLinks(url,regex):
website = urllib.request.urlopen(url)
html = website.read().decode('utf-8')
regex_new = '"(' + regex + ')"'
print('regex:' + regex_new)
links = re.findall(regex_new, html)
return list(set(links))
def downloadFile(link,category,config,outputDir,date,tags):
print('download article from:' + link)
try:
try:
a = Article(link,config=config, keep_article_html=True)
a.download()
a.parse()
except Exception as e:
print("Error for download and parser:" + link)
print(e)
return 0
if a.title == '':
print("cannot find title for " + link)
return 0
print('title:' + a.title)
title2 = re.sub(' ','_',a.title)
title2 = re.sub('/','_',title2)
outFileDir = outputDir + os.sep + category + os.sep
if not os.path.exists(outFileDir):
os.makedirs(outFileDir)
outPath = outFileDir + title2 + '.html'
content = a.text
content_html = a.article_html
date2 = ''
try:
date2 = a.publish_date
except Exception as e:
print("Warning:cannot find date")
if(date2):
date = date2
authors = ','.join(a.authors)
if(content_html):
domain = getDomain(link)
content_html = fixLinks(content_html,domain)
writeHtml(outPath,content_html,a.title,link,date,authors,tags)
elif(content):
writeHtml(outPath,content,a.title,link,date,authors,tags)
else:
print('Error:cannot find content')
except Exception as e:
print('Exception:' + link)
print(e)
return 0
return 1
def downloadArticles(url,category,config,outputDir,max_number,regex_for_links,tags):
print('download from articles:' + url)
all = getLinks(url,regex_for_links)
for article in all[:max_number]:
downloadFile(article,category,config,outputDir,'',tags)
def downloadFeed(feed,category,config,outputDir,max_number,tags):
print('download from feed:' + feed)
d = feedparser.parse(feed)
for entry in d.entries[:max_number]:
print('entry:' + entry.title + ' ' + entry.link)
#today = datetime.today()
#days_ago = today - timedelta(days=max_days)
#d = datetime(entry.published_parsed)
#if(d < days_ago):
# continue
date = ''
try:
date = entry.published
except Exception as e:
print(e)
downloadFile(entry.link,category,config,outputDir,date,tags)
def downloadByConfig(urls,config,outputDir,max_number):
print('download from config')
for category in urls.keys():
print('category:' + category)
us = urls[category]
for u in us:
u2,type,regex_for_links,tags = u.split(',')
tags = re.sub(':',',',tags)
if(type == 'feed'):
downloadFeed(u2,category,config,outputDir,max_number,tags)
elif(type == 'articles'):
downloadArticles(u2,config,outputDir,category,max_number,regex_for_links,tags)
else: #article
downloadFile(u2,category,config,outputDir,'',tags)
| ciandcd/ciandcd-web | htmlExtractor/HECommon.py | Python | mit | 5,015 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 Paul Tremberth, Newlynn Labs
# See LICENSE for details.
import graph
import schema
import bz2
import csv
import traceback
import sys
# ----------------------------------------------------------------------
MERGED = '***MERGED***'
class CsvBatchWriter(object):
CSV_BATCH_SIZE = 100000
DEBUG = False
def __init__(self, filename, batch_size=CSV_BATCH_SIZE):
self.filename = filename
self.fp = None
self.csvwriter = None
self.batch_size = batch_size
self.output = []
self.so_far = 0
def initialize(self, header_fields):
self.fp = open(self.filename, 'wb')
self.csvwriter = csv.DictWriter(self.fp, header_fields, dialect="excel-tab")
self.csvwriter.writeheader()
def append(self, elem):
self.output.append(elem)
self.test_flush()
def extend(self, elems):
self.output.extend(elems)
self.test_flush()
def test_flush(self):
if len(self.output) >= self.batch_size:
self.flush()
def flush(self, delete_elements=False):
if self.output:
self.csvwriter.writerows(self.output)
self.so_far += len(self.output)
if self.DEBUG:
print " == %d rows written to %s (total=%d) ==" % (
len(self.output), self.filename, self.so_far)
if delete_elements:
for n in self.output:
del n
self.output = []
def close(self):
if self.fp:
self.fp.close()
class GraphExporter(object):
CSV_BATCH_SIZE = 100000
SUPPORTED_OUTPUT_FORMATS = ['neo4j']
DEBUG = True
def __init__(self, schema, format, dialect=csv.excel_tab, pretend=False):
# only supported format for now is Neo4j batch-import
# see: https://github.com/jexp/batch-import
if not self.supported_format(format):
raise ValueError
self.format = format
self.dialect = dialect
self.node_list = graph.NodeList()
self.relation_list = graph.RelationList()
self.nodes_csv_fields = None # used as CSV header column names
self.rels_csv_fields = None # used as CSV header column names
self.schema = dict((entity.name, entity) for entity in schema)
self.dumpfiles = {}
self.dumpfile_fields = {}
self.entity_order = []
self.output_nodes_files = {}
self.output_relations_files = {}
self.output_indexes_files = {}
self.pretend = pretend
def supported_format(self, format):
return format.lower() in [f.lower() for f in self.SUPPORTED_OUTPUT_FORMATS]
def feed_dumpfile(self, entity, filename, fields=None):
self.dumpfiles[entity] = filename
if fields:
self.dumpfile_fields[entity] = fields
self.entity_order.append(entity)
def set_output_nodes_file(self, entity, filename):
self.output_nodes_files[entity] = filename
def set_output_relations_file(self, entity, filename):
self.output_relations_files[entity] = filename
def set_output_indexes_file(self, entity, filename):
self.output_indexes_files[entity] = filename
def run(self):
self.read_schema()
#self.step_set_CSV_header_fields()
self.export()
def read_schema(self):
self.read_nodes_csv_fields()
self.read_rels_csv_fields()
def read_nodes_csv_fields(self):
# all Nodes SHOULD have their entity as a property
fields_begin = ['kind']
node_properties = []
for entity_name, entity in self.schema.iteritems():
if entity_name not in self.entity_order:
continue
if entity.fields:
for field in entity.fields:
# the following could be used to add a column type to CSV header fields
#node_properties.append(
#"%s%s" % (
#field.name,
#":%s" % field.db_field_type
#if field.db_field_type else ''))
node_properties.append(field.name)
self.nodes_csv_fields = fields_begin + list(set(node_properties) - set(fields_begin))
def read_rels_csv_fields(self):
fields_begin = ['start', 'end', 'rel_type']
rels_properties = []
for entity_name, entity in self.schema.iteritems():
if entity_name not in self.entity_order:
continue
if entity.relations:
for rel in entity.relations:
rels_properties.extend([prop.name for prop in rel.properties])
self.rels_csv_fields = fields_begin + list(
set(rels_properties) - set(fields_begin))
def export(self):
"""
Read dump files and write nodes and relations at the same time
"""
# write all nodes in ONE file and all relations in ONE file
# (works ONLY for Neo4j batch-import format)
onodes_filename = self.output_nodes_files.get(MERGED)
orels_filename = self.output_relations_files.get(MERGED)
nodes_csv_writer, rels_csv_writer = None, None
if onodes_filename:
if not self.pretend:
nodes_writer = CsvBatchWriter(onodes_filename, self.CSV_BATCH_SIZE)
nodes_writer.initialize(self.nodes_csv_fields)
if orels_filename:
if not self.pretend:
rels_writer = CsvBatchWriter(orels_filename, self.CSV_BATCH_SIZE)
rels_writer.initialize(self.rels_csv_fields)
index_writers = {}
# loop on dump files in order
if not self.entity_order:
self.entity_order = list(self.dumpfiles.iterkeys())
for entity_name in self.entity_order:
if not self.dumpfiles.get(entity_name) or not self.schema.get(entity_name):
if self.DEBUG:
print "no dump file or not schema configured for entity", entity_name
continue
if self.DEBUG:
print "--- processing file", self.dumpfiles[entity_name]
entity = self.schema.get(entity_name)
with self.open_dumpfile(self.dumpfiles[entity_name]) as dumpfile:
self.create_index_writers_if_needed(entity, index_writers)
self.export_tabledump(entity, dumpfile,
nodes_writer, rels_writer, index_writers)
# pending relations if any
if not self.pretend:
self.export_rels_csv(writer=rels_csv_writer)
# close all CSV writers
if nodes_csv_writer:
nodes_writer.close()
if rels_csv_writer:
rels_writer.close()
for w in index_writers.itervalues():
w.close()
def create_index_writers_if_needed(self, entity, index_writers):
indexes = entity.get_indexed_fields()
if indexes:
for index_name, indexed_fields in indexes.iteritems():
if index_name not in index_writers:
# check if output file has been configured for this index
index_filename = self.output_indexes_files.get(index_name)
if not index_filename:
print "no output file for index %s" % index_name
continue
# add a "node id" field
header_fields = ['node_id'] + [field.name for field in indexed_fields]
index_writer = CsvBatchWriter(index_filename, self.CSV_BATCH_SIZE)
index_writer.initialize(header_fields)
index_writers[index_name] = index_writer
def export_tabledump(self, entity, fp,
nodes_writer, rels_writer, index_writers):
stats = {'nodes': 0, 'rels': 0, 'indexed': 0}
if not entity:
print "know nothing about %s" % entity.name
return
PRINT_FREQUENCY = 25000
# should we write something to one or more indexes?
if index_writers:
indexes = entity.get_indexed_fields()
else:
indexes = None
node_id = 0
# read CSV file line by line
#print self.dialect
csvreader = csv.DictReader(fp, dialect=self.dialect)
for cnt, record in enumerate(csvreader, start=1):
node = None
# create a new node
primary_key_field = entity.get_primary_key_field()
if primary_key_field:
node = graph.Node(record, entity)
node_id = self.node_list.add_node(node)
if not node_id:
# FIXME: find something better
raise LookupError
# add it to the write queue
nodes_writer.append(node.get_dict(self.nodes_csv_fields))
stats['nodes'] += 1
if indexes:
for index_name, indexed_fields in indexes.iteritems():
index_writers.get(index_name).append(
node.get_dict(
['node_id'] + [field.name for field in indexed_fields]))
stats['indexed'] += 1
# add relations if needed
new_rels = [rel.get_dict(self.rels_csv_fields)
for rel in self.iter_relations(entity, record)]
rels_writer.extend(new_rels)
stats['rels'] += len(new_rels)
# hint to gc; there's surely something prettier
if node:
del node
del record
if self.DEBUG:
if not (cnt % PRINT_FREQUENCY):
print "\r line %8d - nodes(%8d), rels(%8d), idx(%8d) -- last node ID %d" % (
cnt, stats['nodes'], stats['rels'], stats['indexed'], node_id),
sys.stdout.flush()
if self.DEBUG:
print
print " --> Finished with %8d of entity %s" % (cnt, entity.name)
print "nodes(%8d), rels(%8d), idx(%8d) -- last node ID %d" % (
stats['nodes'], stats['rels'], stats['indexed'], node_id)
# write everything that's pending
writers = [nodes_writer, rels_writer] + list(index_writers.itervalues())
for w in writers:
if w:
w.flush()
def iter_relations(self, entity, record):
relation_definitions = entity.relations
if not relation_definitions:
return
for reldef in relation_definitions:
try:
origin_column = record.get(reldef.origin.db_column)
target_column = record.get(reldef.target.db_column)
if not origin_column and reldef.origin.null:
continue
if not target_column and reldef.target.null:
continue
origin_node_pos, target_node_pos = None, None
origin_node_pos = self.node_list.lookup_node_pos(
reldef.origin.entity,
int(origin_column))
target_node_pos = self.node_list.lookup_node_pos(
reldef.target.entity,
int(target_column))
if not origin_node_pos or not target_node_pos:
continue
# else:
# FIXME: store an unresolved relation
properties = {}
for prop in reldef.properties:
if isinstance(prop.value, schema.Column):
properties[prop.name] = record.get(prop.value.name)
else:
properties[prop.name] = prop.value
yield graph.Relation(
origin_node_pos,
target_node_pos,
properties)
except Exception, e:
traceback.print_exc()
raise e
def resolve_relation(self, r):
if not r.end:
target_node_pos = self.node_list.lookup_node_pos(
r.start_target_entity, r.start_fk)
if target_node_pos:
r.end = target_node_pos
def export_rels_csv(self, fp=None, writer=None):
BATCH_SIZE = 10000
if not writer and fp:
writer = csv.DictWriter(fp, self.rels_csv_fields, dialect="excel-tab")
writer.writeheader()
size = len(self.relation_list.relation_list)
print "%d relations to write" % size
output_relations = []
for cnt, rel in enumerate(self.relation_list.iter_rels(), start=1):
print "\r %8d/%8d (%.1f%%)" % (cnt, size, 100*cnt/size),
if not rel.end:
self.resolve_relation(rel)
output_relations.append(rel.get_dict())
del rel
if not (cnt % BATCH_SIZE):
self._flush_rows(writer, output_relations)
output_relations = []
if output_relations:
self._flush_rows(writer, output_relations)
print
@classmethod
def open_dumpfile(cls, filename):
if filename.endswith(('bz2',)):
return bz2.BZ2File(filename, 'rb')
else:
return open(filename, 'rb')
| redapple/sql2graph | sql2graph/export.py | Python | mit | 13,462 |
import os
import strutil
from astrodata.adutils import gemLog
log = None
"""This file contains the following utilities:
checkImageParam( image )
checkOutputParam( outfile, defaultValue='out.fits' )
def verifyOutlist( inlist, outlist )
checkParam( parameter, paramType, defaultValue, compareValue=0.0 )
checkFileFitExtension( filename )
"""
def checkImageParam(image, logBadlist=False):
"""
Tries to accomplish the same thing as most IRAF tasks in regards to how they
handle the input file parameter.
@param image:
What is supported:
Strings:
1) If the string starts with '@' then it is expected that after the '@' is
a filename with the input image filenames in it.
2) Input image filename.
List:
1) Must be a list of strings. It is assumed the each string refers an
input image filename.
@type image: String or List of Strings
@param logBadlist: Controls if lists of images that cannot be accessed are
written to the log or not.
@return: The list of filenames of images to be run. If an error occurs,
None is returned.
@rtype: list, None
"""
global log
if log==None:
log = gemLog.getGeminiLog()
root = os.path.dirname(image)
imageName = os.path.basename(image)
inList = []
if type(imageName) == str and len(imageName) > 0:
if imageName[0] == '@':
imageName = imageName[1:]
try:
image = os.path.join( root, imageName )
imageFile = open(image, 'r')
readList = imageFile.readlines()
# Removes any newline with strutil method 'chomp'
for i in range(len(readList)):
readList[i] = strutil.chomp(readList[i])
if (readList[i] == '') or (readList[i] == '\n')\
or (readList[i][0] == '#'):
continue
if os.path.dirname(readList[i]) == '':
readList[i] = os.path.join(root, readList[i])
nospace_str = readList[i].replace(' ','')
# Adds .fits if there is none
inList.append(strutil.appendFits(nospace_str))
imageFile.close()
except:
log.critical('An error occurred when opening and reading '+
'from the image '+os.path.basename(image))
return None
else:
inList.append(image)
# append fits? no @@GENERALIZE inList[0] = strutil.appendFits(inList[0])
# Exception for an image of type 'List'
elif type(image) == list:
for img in image:
if type(img) == str:
inList.append(img)
else:
log.warning('Type '+str(type(image))+
' is not supported. The only supported types are String'+
' and List of Strings.')
return None
else:
log.warning('Type'+ str(type(image))+
'is not supported. The only supported types are String '+
'and List of Strings.')
return None
outList = []
badList = []
for img in inList:
if not os.access(img,os.R_OK):
log.error('Cannot read file: '+str(img))
badList.append(img)
else:
outList.append(img)
if badList:
if logBadlist:
err = "\n\t".join(badList)
log.warning("Some files not found or cannot be opened:\n\t"+err)
return None
return outList
#------------------------------------------------------------------------------------------
def checkOutputParam(outfile, defaultValue='out.fits'):
"""
Tries to accomplish the same thing as most IRAF tasks in regards to
how they handle the output file parameter.
@param outfile:
What is supported:
Strings:
1) If the string starts with '@' then it is expected that after the '@' is a
filename with the output filenames in it.
2) Output file name.
List:
1) Must be a list of strings. It is assumed the each string refers to a
desired output file name.
@type outfile: String or List of Strings
@param defaultValue: If the outfile is '', then the defaultValue will
returned.
@type defaultValue: str
@return: A list with all the desired output names. If an error occurs,
None is returned.
@rtype: list, None
"""
global log
if log==None:
log = gemLog.getGeminiLog()
root = os.path.dirname(outfile)
outfileName = os.path.basename(outfile)
outList = []
if type(outfileName) == str:
outfile = checkParam(outfile, type(''), defaultValue)
if outfileName[0] == '@':
outfileName = outfileName[1:]
try:
outfile = os.path.join( root, outfileName )
outfileFile = open(outfile, 'r')
readList = outfileFile.readlines()
# Removes any newline with strutil method 'chomp'
for i in range(len(readList)):
readList[i] = strutil.chomp(readList[i])
if (readList[i] == '') or (readList[i] == '\n')\
or (readList[i][0] == '#'):
continue
if os.path.dirname(readList[i]) == '':
readList[i] = os.path.join(root, readList[i])
# Adds .fits if there is none
outList.append(strutil.appendFits(readList[i]))
except:
log.critical('An error occurred when opening and reading '+
'from the outfile '+os.path.basename(outfile))
return None
finally:
outfileFile.close()
else:
outList.append(outfile)
outList[0] = strutil.appendFits(outList[0])
# Exception for an image of type 'List'
elif type(outfile) == list:
for img in outfile:
if type(out) == str:
outList.append(out)
else:
log.warning('Type '+str(type(outfile))+
' is not supported. The only supported types are String'+
' and List of Strings.')
return None
else:
log.warning('Type'+ str(type(outfile))+
'is not supported. The only supported types are String '+
'and List of Strings.')
return None
for out in outList:
if not os.access(out,os.R_OK):
log.error('Cannot read file: '+str(out))
return outList
#------------------------------------------------------------------------------
def verifyOutlist( inlist, outlist ):
"""
Verifies that for every file in the inList, there is a corresponding
output file.
@param inList: A list of input file paths.
@type inList: list
@param outlist: A list of output file paths.
@type outlist: list
"""
global log
if log==None:
log = gemLog.getGeminiLog()
try:
if outlist == []:
# Will append unique filenames if none exist in outlist
for i in range(len(inlist)):
line = 'output' + str(i+1)+ '.fits'
outlist.append(line)
return outlist
elif len(outlist) < len(inlist):
# Will append unique filenames if not enough in outlist
l = len(inlist) - len(outlist)
for i in range(l):
line = 'output' + str(l+i)+ '.fits'
outlist.append(line)
return outlist
else:
return outlist
except:
log.error('An error occured while trying to verify'+
' the outputs existance for inlist '+repr(inlist))
return None
#------------------------------------------------------------------------------
def checkParam(parameter, paramType, defaultValue, compareValue=0.0):
"""
This is a very basic and general parameter checking function. Basically,
pass a parameter, expected type
and a default value. If it is the same type then:
1) If it is a list, return it.
2) If it is a string, check if it is '', if it is then return default value.
3) If it is a number, check that it is greater than 'compareValue', if it
is not then return defaultValue.
Example usage:
-checkParam( mode, type(''), 'constant' )
-checkParam( fwhm, type(0.0), 5.5, compareValue=1.5 )
@param parameter: Parameter for testing.
@type parameter: Any
@param paramType: What the expected type of the parameter should be.
@type paramType: type( Any )
@param defaultValue: What the default value will be if failures occur.
Hopefully, the type matches paramType.
@type defaultValue: type( Any )
@param compareValue: Value to compare against in the case the parameter
is a float or int. This will check whether
the parameter is greater than the compareValue.
@type compareValue: float or int
@return: Parameter if it is the correct type and compare, default if it
fails comparison, or None if errors.
@rtype: paramType or None
"""
global log
if log==None:
log = gemLog.getGeminiLog()
if type(parameter) == paramType:
if (paramType == type(0)) or (paramType == type(0.0)):
if parameter > compareValue:
return parameter
elif paramType == str:
if parameter != '':
return parameter
else:
return parameter
else:
log.warning('Type of parameter, '+str(type(parameter))+
' is not the correct type:'+str(paramType))
#$$$ There needs to be an exception class to properly handle this raise
raise 'Incorrect Parameter Type', type(parameter)
return defaultValue
#------------------------------------------------------------------------------
def checkFileFitExtension( filename ):
"""
Determines if the fits file has a [X], [sci,X] or [SCI,X], to see if a
particular extension is to be opened.
@param filename: Name of a fits file
@type filename: str
@return: Tuple with the root filename and extension specified.
In the case that no '[]' exists in the filename, (filename, None) is returned.
In the case of an error parsing the input filename, (None,None) is returned.
@rtype: tuple
"""
if filename.find('[') >= 0:
try:
# Getting the filename without extension specification
file = filename.split('[')[0]
# Getting extension for both possible [] cases
if filename.find('sci')>(-1) or filename.find('SCI')>(-1):
exten = int(filename.split( '[' )[1].split( ']' )[0].split(',')[1])
else:
exten = int(filename.split( '[' )[1].split( ']' )[0])
except:
return (None, None)
return (file, exten)
else:
return (filename, None)
#------------------------------------------------------------------------------
#---------------------------------------------------------------------------
def appendFits (images):
"""Append '.fits' to each name in 'images' that lacks an extension.
>>> print appendFits ('abc')
abc.fits
>>> print appendFits ('abc.fits')
abc.fits
>>> print appendFits (['abc', 'xyz.fits'])
['abc.fits', 'xyz.fits']
@param images: a file name or a list of file names
@type images: a string or a list of strings
@return: the input file names with '.fits' appended to each, unless
the name already ended in a recognized extension.
@rtype: list of strings
"""
if isinstance (images, str):
is_a_list = False
images = [images]
else:
is_a_list = True
modified = []
for image in images:
found = False
# extensions is a list of recognized filename extensions.
for extn in extensions:
if image.endswith (extn):
found = True
break
if found:
modified.append (image)
else:
modified.append (image + '.fits')
if is_a_list:
return modified
else:
return modified[0]
#---------------------------------------------------------------------------
def chomp(line):
"""
Removes newline(s) from end of line if present.
@param line: A possible corrupted line of code
@type line: str
@return: Line without any '\n' at the end.
@rtype: str
"""
if type(line) != str:
raise 'Bad Argument - Passed parameter is not str', type(line)
while (len(line) >=1) and (line[-1] == '\n'):
line = line[:-1]
return line
#------------------------------------------------------------------------------
def getDataFromInput(inputf, ext=None):
"""
!!! NOT FINISHED !!!
Retrieve the science data from a fits file, science data or AstroData.
"""
try:
import astrodata
from astrodata.AstroData import AstroData
astroPossible = True
except:
astroPossible = False
try:
import numpy as np
except:
raise
exttype = type(ext)
inputtype = type(inputf)
if ext is not None:
if exttype == int:
pass
else:
raise RuntimeError('Bad argument type. Received %s, expecting int.' %(str(exttype)))
else:
ext = 1
if inputtype == np.Array:
pass
elif astroPossible and inputtype == AstroData:
pass
elif inputtype == str:
pass
else:
raise RuntimeError('Bad argument type.')
| pyrrho314/recipesystem | trunk/astrodata/adutils/paramutil.py | Python | mpl-2.0 | 14,435 |
# encoding: utf-8
# module samba.dcerpc.netlogon
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/netlogon.so
# by generator 1.135
""" netlogon DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class netr_DELTA_TRUSTED_DOMAIN(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
controller_names = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
domain_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
num_controllers = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
posix_offset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
sdbuf = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
SecurityInformation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown3 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown4 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown6 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown7 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unknown8 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/netlogon/netr_DELTA_TRUSTED_DOMAIN.py | Python | gpl-2.0 | 1,909 |
"""
urlresolver XBMC Addon
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class FilehootResolver(UrlResolver):
name = "filehoot"
domains = ['filehoot.com']
pattern = '(?://|\.)(filehoot\.com)/(?:embed-)?([0-9a-z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
if '404 Not Found' in html:
raise ResolverError('The requested video was not found.')
pattern = "file\s*:\s*'([^']+)'\s*,\s*'provider'\s*:\s*'http"
match = re.search(pattern, html)
if match:
return match.group(1)
raise ResolverError('No video link found.')
def get_url(self, host, media_id):
return 'http://%s/embed-%s.html' % (host, media_id)
| koditraquinas/koditraquinas.repository | script.module.urlresolver/lib/urlresolver/plugins/filehoot.py | Python | gpl-2.0 | 1,599 |
from .base import BaseOp
from .. import objtypes, excepttypes, ssa_types
from ..constraints import ObjectConstraint, IntConstraint
class CheckCast(BaseOp):
def __init__(self, parent, target, args):
super(CheckCast, self).__init__(parent,args, makeException=True)
self.env = parent.env
self.target_tt = target
self.outExceptionCons = ObjectConstraint.fromTops(parent.env, [], (excepttypes.ClassCast,), nonnull=True)
def propagateConstraints(self, x):
for top in x.types.supers | x.types.exact:
if not objtypes.isSubtype(self.env, top, self.target_tt):
assert(not x.isConstNull())
return None, self.outExceptionCons, None
return None, None, None
class InstanceOf(BaseOp):
def __init__(self, parent, target, args):
super(InstanceOf, self).__init__(parent,args)
self.env = parent.env
self.target_tt = target
self.rval = parent.makeVariable(ssa_types.SSA_INT, origin=self)
def propagateConstraints(self, x):
rvalcons = IntConstraint.range(32, 0, 1)
return rvalcons, None, None | alexkasko/krakatau-java | krakatau-lib/src/main/resources/Lib/Krakatau/ssa/ssa_ops/checkcast.py | Python | gpl-3.0 | 1,131 |
# -*- test-case-name: twistedcaldav.test.test_sharing -*-
# #
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #
"""
Sharing behavior
"""
__all__ = [
"SharedResourceMixin",
"SharedHomeMixin",
]
from twext.who.idirectory import RecordType
from twisted.internet.defer import succeed, inlineCallbacks, DeferredList, \
returnValue
from twistedcaldav import customxml, caldavxml
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.linkresource import LinkFollowerMixIn
from txdav.common.datastore.sql_tables import _ABO_KIND_GROUP, \
_BIND_MODE_DIRECT, _BIND_MODE_INDIRECT, _BIND_MODE_OWN, _BIND_MODE_READ, \
_BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, \
_BIND_STATUS_DELETED, _BIND_STATUS_INVALID, _BIND_STATUS_INVITED
from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
from txdav.xml import element
from txdav.who.wiki import RecordType as WikiRecordType, WikiAccessLevel
from txweb2 import responsecode
from txweb2.dav.http import ErrorResponse, MultiStatusResponse
from txweb2.dav.resource import TwistedACLInheritable
from txweb2.dav.util import allDataFromStream, joinURL
from txweb2.http import HTTPError, Response, XMLResponse
class SharedResourceMixin(object):
"""
A mix-in for calendar/addressbook resources that implements sharing-related
functionality.
"""
@inlineCallbacks
def inviteProperty(self, request):
"""
Calculate the customxml.Invite property (for readProperty) from the
invites database.
"""
if config.Sharing.Enabled:
@inlineCallbacks
def invitePropertyElement(invitation, includeUID=True):
userid = "urn:x-uid:" + invitation.shareeUID
principal = yield self.principalForUID(invitation.shareeUID)
cn = principal.displayName() if principal else invitation.shareeUID
returnValue(customxml.InviteUser(
customxml.UID.fromString(invitation.uid) if includeUID else None,
element.HRef.fromString(userid),
customxml.CommonName.fromString(cn),
customxml.InviteAccess(invitationBindModeToXMLMap[invitation.mode]()),
invitationBindStatusToXMLMap[invitation.status](),
))
# See if this property is on the shared calendar
if self.isSharedByOwner():
invitations = yield self.validateInvites(request)
returnValue(customxml.Invite(
*[(yield invitePropertyElement(invitation)) for invitation in invitations]
))
# See if it is on the sharee calendar
if self.isShareeResource():
original = yield self._newStoreObject.ownerView()
if original is not None:
invitations = yield original.allInvitations()
invitations = yield self.validateInvites(request, invitations)
ownerPrincipal = yield self.principalForUID(self._newStoreObject.ownerHome().uid())
if ownerPrincipal is None:
owner = "invalid"
ownerCN = "Invalid"
else:
# FIXME: use urn:x-uid in all cases
if self.isCalendarCollection():
owner = ownerPrincipal.principalURL()
else:
owner = "urn:x-uid:" + ownerPrincipal.principalUID()
ownerCN = ownerPrincipal.displayName()
returnValue(customxml.Invite(
customxml.Organizer(
element.HRef.fromString(owner),
customxml.CommonName.fromString(ownerCN),
),
*[(yield invitePropertyElement(invitation, includeUID=False)) for invitation in invitations]
))
returnValue(None)
@inlineCallbacks
def upgradeToShare(self):
"""
Set the resource-type property on this resource to indicate that this
is the owner's version of a resource which has been shared.
"""
# Change status on store object
yield self._newStoreObject.setShared(True)
@inlineCallbacks
def downgradeFromShare(self, request):
# Change status on store object
yield self._newStoreObject.setShared(False)
# Remove all invitees
for invitation in (yield self._newStoreObject.allInvitations()):
yield self._newStoreObject.uninviteUIDFromShare(invitation.shareeUID)
returnValue(True)
@inlineCallbacks
def directShare(self, request):
"""
Directly bind an accessible calendar/address book collection into the
current principal's calendar/addressbook home.
@param request: the request triggering this action
@type request: L{IRequest}
@return: the (asynchronous) HTTP result to respond to the direct-share
request.
@rtype: L{Deferred} firing L{txweb2.http.Response}, failing with
L{HTTPError}
"""
# Need to have at least DAV:read to do this
yield self.authorize(request, (element.Read(),))
# Find current principal
authz_principal = self.currentPrincipal(request).children[0]
if not isinstance(authz_principal, element.HRef):
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "valid-principal"),
"Current user principal not a DAV:href",
))
principalURL = str(authz_principal)
if not principalURL:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "valid-principal"),
"Current user principal not specified",
))
sharee = (yield request.locateResource(principalURL))
# Check enabled for service
from twistedcaldav.directory.principal import DirectoryCalendarPrincipalResource
if not isinstance(sharee, DirectoryCalendarPrincipalResource):
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-principal"),
"Current user principal is not a calendar/addressbook enabled principal",
))
# Get the home collection
if self.isCalendarCollection():
shareeHomeResource = yield sharee.calendarHome(request)
elif self.isAddressBookCollection() or self.isGroup():
shareeHomeResource = yield sharee.addressBookHome(request)
else:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-principal"),
"No calendar/addressbook home for principal",
))
# TODO: Make sure principal is not sharing back to themselves
hostURL = (yield self.canonicalURL(request))
shareeHomeURL = shareeHomeResource.url()
if hostURL.startswith(shareeHomeURL):
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Can't share your own calendar or addressbook",
))
# Accept it
shareeView = yield self._newStoreObject.directShareWithUser(
sharee.principalUID(),
displayName=self.displayName()
)
# Return the URL of the shared calendar
sharedAsURL = joinURL(shareeHomeResource.url(), shareeView.name())
returnValue(XMLResponse(
code=responsecode.OK,
element=customxml.SharedAs(
element.HRef.fromString(sharedAsURL)
)
))
def isSharedByOwner(self):
"""
Return True if this is an owner shared calendar collection.
"""
try:
return self._newStoreObject.isSharedByOwner() if self._newStoreObject else False
except AttributeError:
return False
def setShare(self, share_url):
"""
Set the URL associated with this L{SharedResourceMixin}. (This
is only invoked on the sharee's resource, not the owner's.)
"""
self._isShareeResource = True
self._share_url = share_url
def isShareeResource(self):
"""
Return True if this is a sharee view of a shared collection.
"""
return (
hasattr(self, "_newStoreObject") and
hasattr(self._newStoreObject, "owned") and
not self._newStoreObject.owned() and
getattr(self._newStoreObject, "_bindMode", None) is not None
)
def removeShareeResource(self, request):
"""
Called when the sharee DELETEs a shared collection.
"""
return self._newStoreObject.deleteShare()
@inlineCallbacks
def _checkAccessControl(self):
"""
Check the shared access mode of this resource, potentially consulting
an external access method if necessary.
@return: a L{Deferred} firing a L{bytes} or L{None}, with one of the
potential values: C{"own"}, which means that the home is the owner
of the collection and it is not shared; C{"read-only"}, meaning
that the home that this collection is bound into has only read
access to this collection; C{"read-write"}, which means that the
home has both read and write access; C{"original"}, which means
that it should inherit the ACLs of the owner's collection, whatever
those happen to be, or C{None}, which means that the external
access control mechanism has dictate the home should no longer have
any access at all.
"""
if self._newStoreObject.direct():
owner = yield self.principalForUID(self._newStoreObject.ownerHome().uid())
sharee = yield self.principalForUID(self._newStoreObject.viewerHome().uid())
if owner.record.recordType == WikiRecordType.macOSXServerWiki:
# Access level comes from what the wiki has granted to the
# sharee
access = (yield owner.record.accessForRecord(sharee.record))
if access == WikiAccessLevel.read:
returnValue("read-only")
elif access == WikiAccessLevel.write:
returnValue("read-write")
else:
returnValue(None)
else:
# Check proxy access
proxy_mode = yield sharee.proxyMode(owner)
if proxy_mode == "none":
returnValue("original")
else:
returnValue("read-write" if proxy_mode == "write" else "read-only")
else:
# Invited shares use access mode from the invite
# Get the access for self
bindMode = yield self._newStoreObject.effectiveShareMode()
returnValue(invitationAccessFromBindModeMap.get(bindMode))
@inlineCallbacks
def shareeAccessControlList(self, request, *args, **kwargs):
"""
Return WebDAV ACLs appropriate for the current user accessing the
shared collection. For an "invite" share we take the privilege granted
to the sharee in the invite and map that to WebDAV ACLs. For a
"direct" share, if it is a wiki collection we map the wiki privileges
into WebDAV ACLs, otherwise we use whatever privileges exist on the
underlying shared collection.
@param request: the request used to locate the owner resource.
@type request: L{txweb2.iweb.IRequest}
@param args: The arguments for
L{txweb2.dav.idav.IDAVResource.accessControlList}
@param kwargs: The keyword arguments for
L{txweb2.dav.idav.IDAVResource.accessControlList}, plus
keyword-only arguments.
@return: the appropriate WebDAV ACL for the sharee
@rtype: L{davxml.ACL}
"""
assert self._isShareeResource, "Only call this for a sharee resource"
assert self.isCalendarCollection() or self.isAddressBookCollection(), "Only call this for a address book or calendar resource"
sharee = yield self.principalForUID(self._newStoreObject.viewerHome().uid())
access = yield self._checkAccessControl()
if access == "original" and not self._newStoreObject.ownerHome().external():
original = (yield request.locateResource(self._share_url))
result = (yield original.accessControlList(request, *args, **kwargs))
returnValue(result)
# Direct shares use underlying privileges of shared collection
userprivs = [
]
if access in ("read-only", "read-write",):
userprivs.append(element.Privilege(element.Read()))
userprivs.append(element.Privilege(element.ReadACL()))
userprivs.append(element.Privilege(element.ReadCurrentUserPrivilegeSet()))
if access in ("read-only",):
userprivs.append(element.Privilege(element.WriteProperties()))
if access in ("read-write",):
userprivs.append(element.Privilege(element.Write()))
proxyprivs = list(userprivs)
try:
proxyprivs.remove(element.Privilege(element.ReadACL()))
except ValueError:
# If wiki says no-access then ReadACL won't be in the list
pass
aces = (
# Inheritable specific access for the resource's associated principal.
element.ACE(
element.Principal(element.HRef(sharee.principalURL())),
element.Grant(*userprivs),
element.Protected(),
TwistedACLInheritable(),
),
)
if self.isCalendarCollection():
aces += (
# Inheritable CALDAV:read-free-busy access for authenticated users.
element.ACE(
element.Principal(element.Authenticated()),
element.Grant(element.Privilege(caldavxml.ReadFreeBusy())),
TwistedACLInheritable(),
),
)
# Give read access to config.ReadPrincipals
aces += config.ReadACEs
# Give all access to config.AdminPrincipals
aces += config.AdminACEs
if self.isCalendarCollection() and config.EnableProxyPrincipals:
aces += (
# DAV:read/DAV:read-current-user-privilege-set access for this principal's calendar-proxy-read users.
element.ACE(
element.Principal(element.HRef(joinURL(sharee.principalURL(), "calendar-proxy-read/"))),
element.Grant(
element.Privilege(element.Read()),
element.Privilege(element.ReadCurrentUserPrivilegeSet()),
element.Privilege(element.WriteProperties()),
),
element.Protected(),
TwistedACLInheritable(),
),
# DAV:read/DAV:read-current-user-privilege-set/DAV:write access for this principal's calendar-proxy-write users.
element.ACE(
element.Principal(element.HRef(joinURL(sharee.principalURL(), "calendar-proxy-write/"))),
element.Grant(*proxyprivs),
element.Protected(),
TwistedACLInheritable(),
),
)
returnValue(element.ACL(*aces))
@inlineCallbacks
def validUserIDForShare(self, userid, request=None):
"""
Test the user id to see if it is a valid identifier for sharing and
return a "normalized" form for our own use (e.g. convert mailto: to
urn:uuid).
@param userid: the userid to test
@type userid: C{str}
@return: C{str} of normalized userid or C{None} if
userid is not allowed.
"""
# First try to resolve as a calendar principal
principal = yield self.principalForCalendarUserAddress(userid)
if principal is None:
principal = yield self.principalForCalendarGroupAddress(userid)
if principal:
if request:
ownerPrincipal = (yield self.ownerPrincipal(request))
if ownerPrincipal is None or ownerPrincipal.principalURL() == principal.principalURL():
returnValue(None)
returnValue(principal.principalURL())
# TODO: we do not support external users right now so this is being hard-coded
# off in spite of the config option.
# elif config.Sharing.AllowExternalUsers:
# return userid
else:
returnValue(None)
@inlineCallbacks
def principalForCalendarGroupAddress(self, groupid):
"""
Get principal for group address if extant
"""
if (
config.Sharing.Enabled and
config.Sharing.Calendars.Enabled and
config.Sharing.Calendars.Groups.Enabled
):
# see if group
for principalCollection in self.principalCollections():
record = yield principalCollection.directory.recordWithCalendarUserAddress(groupid)
if record is not None and record.recordType == RecordType.group:
groupPrincipal = yield principalCollection.principalForRecord(record)
if groupPrincipal is not None:
returnValue(groupPrincipal)
returnValue(None)
@inlineCallbacks
def validateInvites(self, request, invitations=None):
"""
Make sure each userid in an invite is valid - if not re-write status.
"""
# assert request
if invitations is None:
invitations = yield self._newStoreObject.allInvitations()
adjusted_invitations = []
for invitation in invitations:
if invitation.status != _BIND_STATUS_INVALID:
if not (yield self.validUserIDForShare("urn:x-uid:" + invitation.shareeUID, request)):
self.log.error("Invalid sharee detected: {uid}", uid=invitation.shareeUID)
invitation = invitation._replace(status=_BIND_STATUS_INVALID)
invitation = invitation._replace(
mode=(
yield self._newStoreObject._effectiveShareMode(
invitation.mode, invitation.shareeUID, self._newStoreObject._txn
)
)
)
adjusted_invitations.append(invitation)
returnValue(adjusted_invitations)
def inviteUIDToShare(self, userid, cn, ace, summary, request):
""" Send out in invite first, and then add this user to the share list
@param userid:
@param ace: Must be one of customxml.ReadWriteAccess or customxml.ReadAccess
"""
# TODO: Check if this collection is shared, and error out if it isn't
resultIsList = True
if type(userid) is not list:
userid = [userid]
resultIsList = False
if type(cn) is not list:
cn = [cn]
dl = [self.inviteSingleUserToShare(_user, _cn, ace, summary, request) for _user, _cn in zip(userid, cn)]
return self._processShareActionList(dl, resultIsList)
def uninviteUIDFromShare(self, userid, ace, request):
"""
Send out in uninvite first, and then remove this user from the share list.
"""
# Do not validate the userid - we want to allow invalid users to be removed because they
# may have been valid when added, but no longer valid now. Clients should be able to clear out
# anything known to be invalid.
# TODO: Check if this collection is shared, and error out if it isn't
resultIsList = True
if type(userid) is not list:
userid = [userid]
resultIsList = False
dl = [self.uninviteSingleUserFromShare(user, ace, request) for user in userid]
return self._processShareActionList(dl, resultIsList)
def inviteUserUpdateToShare(self, userid, cn, aceOLD, aceNEW, summary, request):
resultIsList = True
if type(userid) is not list:
userid = [userid]
resultIsList = False
if type(cn) is not list:
cn = [cn]
dl = [self.inviteSingleUserUpdateToShare(_user, _cn, aceOLD, aceNEW, summary, request) for _user, _cn in zip(userid, cn)]
return self._processShareActionList(dl, resultIsList)
def _processShareActionList(self, dl, resultIsList):
def _defer(resultset):
results = [result if success else False for success, result in resultset]
return results if resultIsList else results[0]
return DeferredList(dl).addCallback(_defer)
@inlineCallbacks
def inviteSingleUserToShare(self, userid, cn, ace, summary, request): # @UnusedVariable
# We currently only handle local users
sharee = yield self.principalForCalendarUserAddress(userid)
if sharee is None:
sharee = yield self.principalForCalendarGroupAddress(userid)
if sharee is None:
returnValue(False)
try:
result = (yield self._newStoreObject.inviteUIDToShare(
sharee.principalUID(),
invitationBindModeFromXMLMap[type(ace)],
summary,
))
except Exception as e:
self.log.error("Could not send sharing invite '{userid}': {ex}", userid=userid, ex=e)
result = None
returnValue(result)
@inlineCallbacks
def uninviteSingleUserFromShare(self, userid, aces, request): # @UnusedVariable
# Cancel invites - we'll just use whatever userid we are given. However, if we
# cannot find a matching principal, try to extract the uid from the userid
# and use that (to allow invalid principals to be removed).
sharee = yield self.principalForCalendarUserAddress(userid)
if sharee is not None:
uid = sharee.principalUID()
elif userid.startswith("urn:x-uid:"):
uid = userid[10:]
else:
returnValue(False)
try:
result = (yield self._newStoreObject.uninviteUIDFromShare(uid))
except Exception as e:
self.log.error("Could not send sharing uninvite '{userid}': {ex}", userid=userid, ex=e)
result = None
returnValue(result)
@inlineCallbacks
def uninviteFromShare(self, invitation, request):
yield self._newStoreObject.uninviteFromShare(invitation)
returnValue(True)
def inviteSingleUserUpdateToShare(self, userid, commonName, acesOLD, aceNEW, summary, request): # @UnusedVariable
# Just update existing
return self.inviteSingleUserToShare(userid, commonName, aceNEW, summary, request)
@inlineCallbacks
def _xmlHandleInvite(self, request, docroot):
# Sharing must be enabled for this collection
if not self.canBeShared():
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Sharing not supported on this resource",
))
yield self.authorize(request, (element.Read(), element.Write()))
result = (yield self._handleInvite(request, docroot))
returnValue(result)
@inlineCallbacks
def _handleInvite(self, request, invitedoc):
def _handleInviteSet(inviteset):
userid = None
cn = None
access = None
summary = None
for item in inviteset.children:
if isinstance(item, element.HRef):
userid = str(item)
continue
if isinstance(item, customxml.CommonName):
cn = str(item)
continue
if isinstance(item, customxml.InviteSummary):
summary = str(item)
continue
if isinstance(item, customxml.ReadAccess) or isinstance(item, customxml.ReadWriteAccess):
access = item
continue
if userid and access and summary:
return (userid, cn, access, summary)
else:
error_text = []
if userid is None:
error_text.append("missing href")
if access is None:
error_text.append("missing access")
if summary is None:
error_text.append("missing summary")
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"%s: %s" % (", ".join(error_text), inviteset,),
))
def _handleInviteRemove(inviteremove):
userid = None
access = []
for item in inviteremove.children:
if isinstance(item, element.HRef):
userid = str(item)
continue
if isinstance(item, customxml.ReadAccess) or isinstance(item, customxml.ReadWriteAccess):
access.append(item)
continue
if userid is None:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Missing href: %s" % (inviteremove,),
))
if len(access) == 0:
access = None
else:
access = set(access)
return (userid, access)
setDict, removeDict, updateinviteDict = {}, {}, {}
okusers = set()
badusers = set()
for item in invitedoc.children:
if isinstance(item, customxml.InviteSet):
userid, cn, access, summary = _handleInviteSet(item)
setDict[userid] = (cn, access, summary)
# Validate each userid on add only
uid = (yield self.validUserIDForShare(userid, request))
if uid is None:
uid = yield self.principalForCalendarGroupAddress(userid)
(badusers if uid is None else okusers).add(userid)
elif isinstance(item, customxml.InviteRemove):
userid, access = _handleInviteRemove(item)
removeDict[userid] = access
# Treat removed userids as valid as we will fail invalid ones silently
okusers.add(userid)
# Only make changes if all OK
if len(badusers) == 0:
okusers = set()
badusers = set()
# Special case removing and adding the same user and treat that as an add
sameUseridInRemoveAndSet = [u for u in removeDict.keys() if u in setDict]
for u in sameUseridInRemoveAndSet:
removeACL = removeDict[u]
cn, newACL, summary = setDict[u]
updateinviteDict[u] = (cn, removeACL, newACL, summary)
del removeDict[u]
del setDict[u]
for userid, access in removeDict.iteritems():
result = (yield self.uninviteUIDFromShare(userid, access, request))
# If result is False that means the user being removed was not
# actually invited, but let's not return an error in this case.
okusers.add(userid)
for userid, (cn, access, summary) in setDict.iteritems():
result = (yield self.inviteUIDToShare(userid, cn, access, summary, request))
(okusers if result else badusers).add(userid)
for userid, (cn, removeACL, newACL, summary) in updateinviteDict.iteritems():
result = (yield self.inviteUserUpdateToShare(userid, cn, removeACL, newACL, summary, request))
(okusers if result else badusers).add(userid)
# In this case bad items do not prevent ok items from being processed
ok_code = responsecode.OK
else:
# In this case a bad item causes all ok items not to be processed so failed dependency is returned
ok_code = responsecode.FAILED_DEPENDENCY
# Do a final validation of the entire set of invites
invites = (yield self.validateInvites(request))
numRecords = len(invites)
# Set the sharing state on the collection
shared = self.isSharedByOwner()
if shared and numRecords == 0:
yield self.downgradeFromShare(request)
elif not shared and numRecords != 0:
yield self.upgradeToShare()
# Create the multistatus response - only needed if some are bad
if badusers:
xml_responses = []
xml_responses.extend([
element.StatusResponse(element.HRef(userid), element.Status.fromResponseCode(ok_code))
for userid in sorted(okusers)
])
xml_responses.extend([
element.StatusResponse(element.HRef(userid), element.Status.fromResponseCode(responsecode.FORBIDDEN))
for userid in sorted(badusers)
])
#
# Return response
#
returnValue(MultiStatusResponse(xml_responses))
else:
returnValue(responsecode.OK)
@inlineCallbacks
def _xmlHandleInviteReply(self, request, docroot):
# Sharing must be enabled for this collection
if not self.canShare():
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Sharing not supported on this resource",
))
yield self.authorize(request, (element.Read(), element.Write()))
result = (yield self._handleInviteReply(request, docroot))
returnValue(result)
def _handleInviteReply(self, request, docroot):
raise NotImplementedError
@inlineCallbacks
def xmlRequestHandler(self, request):
# Need to read the data and get the root element first
xmldata = (yield allDataFromStream(request.stream))
try:
doc = element.WebDAVDocument.fromString(xmldata)
except ValueError, e:
self.log.error("Error parsing doc ({ex}) Doc:\n {x}", ex=str(e), x=xmldata)
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Invalid XML",
))
root = doc.root_element
if type(root) in self.xmlDocHandlers:
result = (yield self.xmlDocHandlers[type(root)](self, request, root))
returnValue(result)
else:
self.log.error("Unsupported XML ({r})", r=root)
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Unsupported XML",
))
xmlDocHandlers = {
customxml.InviteShare: _xmlHandleInvite,
customxml.InviteReply: _xmlHandleInviteReply,
}
def isGroup(self):
try:
return self._newStoreObject._kind == _ABO_KIND_GROUP
except AttributeError:
return False
def POST_handler_content_type(self, request, contentType):
if self.isCollection() or self.isGroup():
if contentType:
if contentType in self._postHandlers:
return self._postHandlers[contentType](self, request)
else:
self.log.info("Got a POST on collection or group with an unsupported content type: {t}", t=contentType)
else:
self.log.info("Got a POST on collection or group with no content type")
return succeed(responsecode.FORBIDDEN)
_postHandlers = {
("application", "xml"): xmlRequestHandler,
("text", "xml"): xmlRequestHandler,
}
invitationBindStatusToXMLMap = {
_BIND_STATUS_INVITED: customxml.InviteStatusNoResponse,
_BIND_STATUS_ACCEPTED: customxml.InviteStatusAccepted,
_BIND_STATUS_DECLINED: customxml.InviteStatusDeclined,
_BIND_STATUS_INVALID: customxml.InviteStatusInvalid,
_BIND_STATUS_DELETED: customxml.InviteStatusDeleted,
}
invitationBindStatusFromXMLMap = dict((v, k) for k, v in invitationBindStatusToXMLMap.iteritems())
invitationBindModeToXMLMap = {
_BIND_MODE_READ: customxml.ReadAccess,
_BIND_MODE_WRITE: customxml.ReadWriteAccess,
}
invitationBindModeFromXMLMap = dict((v, k) for k, v in invitationBindModeToXMLMap.iteritems())
invitationAccessFromBindModeMap = {
_BIND_MODE_OWN: "own",
_BIND_MODE_READ: "read-only",
_BIND_MODE_WRITE: "read-write",
_BIND_MODE_DIRECT: "read-write",
_BIND_MODE_INDIRECT: "read-write",
}
class SharedHomeMixin(LinkFollowerMixIn):
"""
A mix-in for calendar/addressbook homes that defines the operations for
manipulating a sharee's set of shared calendars.
"""
@inlineCallbacks
def provisionShare(self, child, request=None):
"""
Set shared state and check access control.
"""
if child._newStoreObject is not None and not child._newStoreObject.owned():
ownerHomeURL = (yield self._otherPrincipalHomeURL(child._newStoreObject.ownerHome().uid()))
ownerView = yield child._newStoreObject.ownerView()
child.setShare(joinURL(ownerHomeURL, ownerView.name()) if ownerHomeURL else None)
access = yield child._checkAccessControl()
if access is None:
returnValue(None)
returnValue(child)
def _otherPrincipalHomeURL(self, otherUID):
# Is this only meant to be overridden?
pass
@inlineCallbacks
def acceptShare(self, request, inviteUID, summary):
# Accept the share
try:
shareeView = yield self._newStoreHome.acceptShare(inviteUID, summary)
except DirectoryRecordNotFoundError:
# Missing sharer record => fail request
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Invite UID not valid",
))
if shareeView is None:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Invite UID not valid",
))
# Return the URL of the shared collection
sharedAsURL = joinURL(self.url(), shareeView.shareName())
returnValue(XMLResponse(
code=responsecode.OK,
element=customxml.SharedAs(
element.HRef.fromString(sharedAsURL)
)
))
@inlineCallbacks
def declineShare(self, request, inviteUID):
# Remove it if it is in the DB
try:
result = yield self._newStoreHome.declineShare(inviteUID)
except DirectoryRecordNotFoundError:
# Missing sharer record => just treat decline as success
result = True
if not result:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Invite UID not valid",
))
returnValue(Response(code=responsecode.NO_CONTENT))
def _handleInviteReply(self, request, invitereplydoc):
"""
Handle a user accepting or declining a sharing invite
"""
hostUrl = None
accepted = None
summary = None
replytoUID = None
for item in invitereplydoc.children:
if isinstance(item, customxml.InviteStatusAccepted):
accepted = True
elif isinstance(item, customxml.InviteStatusDeclined):
accepted = False
elif isinstance(item, customxml.InviteSummary):
summary = str(item)
elif isinstance(item, customxml.HostURL):
for hosturlItem in item.children:
if isinstance(hosturlItem, element.HRef):
hostUrl = str(hosturlItem)
elif isinstance(item, customxml.InReplyTo):
replytoUID = str(item)
if accepted is None or hostUrl is None or replytoUID is None:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Missing required XML elements",
))
if accepted:
return self.acceptShare(request, replytoUID, summary=summary)
else:
return self.declineShare(request, replytoUID)
| macosforge/ccs-calendarserver | twistedcaldav/sharing.py | Python | apache-2.0 | 38,051 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pagegetter lite"""
import ujson as json
import twisted.python.failure
import datetime
import dateutil.parser
import hashlib
import logging
import time
import copy
from twisted.internet.defer import maybeDeferred
from twisted.web.client import _parse
from hiispider.requestqueuer import RequestQueuer
from hiispider.unicodeconverter import convertToUTF8, convertToUnicode
from hiispider.exceptions import StaleContentException
class ReportedFailure(twisted.python.failure.Failure):
pass
# A UTC class.
class CoordinatedUniversalTime(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
UTC = CoordinatedUniversalTime()
logger = logging.getLogger(__name__)
class PageGetter:
negitive_cache = {}
def __init__(self, rq=None):
"""
Create an Cassandra based HTTP cache.
**Arguments:**
* *cassandra_client* -- Cassandra client object.
**Keyword arguments:**
* *rq* -- Request Queuer object. (Default ``None``)
"""
if rq is None:
self.rq = RequestQueuer()
else:
self.rq = rq
def getPage(self,
url,
method='GET',
postdata=None,
headers=None,
agent="HiiSpider",
timeout=60,
cookies=None,
follow_redirect=1,
prioritize=False,
hash_url=None,
cache=0,
content_sha1=None,
confirm_cache_write=False,
check_only_tld=False,
disable_negative_cache=False,
):
"""
Make a cached HTTP Request.
**Arguments:**
* *url* -- URL for the request.
**Keyword arguments:**
* *method* -- HTTP request method. (Default ``'GET'``)
* *postdata* -- Dictionary of strings to post with the request.
(Default ``None``)
* *headers* -- Dictionary of strings to send as request headers.
(Default ``None``)
* *agent* -- User agent to send with request. (Default
``'HiiSpider'``)
* *timeout* -- Request timeout, in seconds. (Default ``60``)
* *cookies* -- Dictionary of strings to send as request cookies.
(Default ``None``).
* *follow_redirect* -- Boolean switch to follow HTTP redirects.
(Default ``True``)
* *prioritize* -- Move this request to the front of the request
queue. (Default ``False``)
* *hash_url* -- URL string used to indicate a common resource.
Example: "http://digg.com" and "http://www.digg.com" could both
use hash_url, "http://digg.com" (Default ``None``)
* *cache* -- Cache mode. ``1``, immediately return contents of
cache if available. ``0``, check resource, return cache if not
stale. ``-1``, ignore cache. (Default ``0``)
* *content_sha1* -- SHA-1 hash of content. If this matches the
hash of data returned by the resource, raises a
StaleContentException.
* *confirm_cache_write* -- Wait to confirm cache write before returning.
"""
request_kwargs = {
"method":method.upper(),
"postdata":postdata,
"headers":headers,
"agent":agent,
"timeout":timeout,
"cookies":cookies,
"follow_redirect":follow_redirect,
"prioritize":prioritize}
cache = int(cache)
cache=0
if cache not in [-1,0,1]:
raise Exception("Unknown caching mode.")
if not isinstance(url, str):
url = convertToUTF8(url)
if hash_url is not None and not isinstance(hash_url, str):
hash_url = convertToUTF8(hash_url)
# check negitive cache
host = _parse(url)[1]
# if check_only_tld is true then parse the url down to the top level domain
if check_only_tld:
host_split = host.split('.', host.count('.')-1)
host = host_split[len(host_split)-1]
if host in self.negitive_cache:
if not self.negitive_cache[host]['timeout'] < time.time():
logger.error('Found %s in negitive cache, raising last known exception' % host)
return self.negitive_cache[host]['error'].raiseException()
# Create request_hash to serve as a cache key from
# either the URL or user-provided hash_url.
if hash_url is None:
request_hash = hashlib.sha1(json.dumps([
url,
agent])).hexdigest()
else:
request_hash = hashlib.sha1(json.dumps([
hash_url,
agent])).hexdigest()
d = self.rq.getPage(url, **request_kwargs)
d.addCallback(self._checkForStaleContent, content_sha1, request_hash, host)
d.addErrback(self._getPageErrback, host)
return d
def _checkForStaleContent(self, data, content_sha1, request_hash, host):
if host in self.negitive_cache:
logger.error('Removing %s from negitive cache' % host)
del self.negitive_cache[host]
if "content-sha1" not in data:
data["content-sha1"] = hashlib.sha1(data["response"]).hexdigest()
if content_sha1 == data["content-sha1"]:
logger.debug("Raising StaleContentException (4) on %s" % request_hash)
raise StaleContentException(content_sha1)
else:
return data
def _getPageErrback(self, error, host):
try:
status = int(error.value.status)
except:
status = 500
if status >= 500:
if not host in self.negitive_cache:
logger.error('Adding %s to negitive cache' % host)
self.negitive_cache[host] = {
'timeout': time.time() + 300,
'retries': 1,
'error': error
}
else:
if self.negitive_cache[host]['retries'] <= 5:
self.negitive_cache[host]['timeout'] = time.time() + 600
self.negitive_cache[host]['retries'] += 1
else:
self.negitive_cache[host]['timeout'] = time.time() + 3600
self.negitive_cache[host]['retries'] += 1
self.negitive_cache[host]['error'] = error
logger.error('Updating negitive cache for host %s which has failed %d times' % (host, self.negitive_cache[host]['retries']))
error.raiseException()
| hiidef/hiispider | legacy/pagegetterlite.py | Python | mit | 6,744 |
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Shows how to use finmarketpy to create total return indices for FX spot (ie. calculates spot returns + carry returns)
"""
import pandas as pd
# For plotting
from chartpy import Chart, Style
# For loading market data
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from findatapy.timeseries import Calculations
from findatapy.util.loggermanager import LoggerManager
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='plotly')
market = Market(market_data_generator=MarketDataGenerator())
calculations = Calculations()
# Choose run_example = 0 for everything
# run_example = 1 - create daily total return indices from FX spot data + deposit for AUDJPY, and compare
# run_example = 2 - create intraday total return indices from FX spot data + deposit for GBPUSD, and compare with daily
run_example = 0
from finmarketpy.curve.fxspotcurve import FXSpotCurve
###### Create total return indices plot for AUDJPY (from perspective of a USD investor)
###### Compare with AUDJPY FX spot and BBG constructed AUDJPY total return indices
if run_example == 1 or run_example == 0:
# Get AUDJPY total returns from perspective of USD investor (via AUDUSD & JPYUSD and AUD, USD & JPY overnight deposit rates)
md_request = MarketDataRequest(start_date='01 Jan 1999', finish_date='01 Dec 2020',
data_source='bloomberg', cut='NYC', category='fx',
tickers=['AUDJPY'],
cache_algo='cache_algo_return',
abstract_curve=FXSpotCurve(construct_via_currency='USD', depo_tenor='ON'))
df_tot = market.fetch_market(md_request=md_request)
df_tot.columns = [x + '-tot-cuemacro' for x in df_tot.columns]
# Get spot data
md_request.abstract_curve = None
df_spot = market.fetch_market(md_request=md_request)
df_spot.columns = [x + '-spot' for x in df_spot.columns]
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
# Get Bloomberg calculated total return indices (for 1M forwards rolled)
md_request.category = 'fx-tot-forwards'
df_bbg_tot_forwards = market.fetch_market(md_request)
df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns]
# Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg
# indices relatively well (both from spot and 1M forwards). Also note the large difference with spot indices
# CAREFUL to fill down, before reindexing because 1M forwards indices are likely to have different publishing dates
df = calculations.join([df_tot, df_bbg_tot, df_spot, df_bbg_tot_forwards], how='outer').fillna(method='ffill')
df = calculations.create_mult_index_from_prices(df)
chart.plot(df)
###### Create total return indices plot for GBPUSD with intraday and daily data (from perspective of a USD investor)
###### Compare intraday and daily total return indices
if run_example == 2 or run_example == 0:
import pytz
# Get GBPUSD total returns from perspective of USD investor (via GBP and USD rates)
md_request = MarketDataRequest(start_date='01 Jan 2019', finish_date='01 Jul 2019',
data_source='bloomberg', cut='NYC', category='fx',
tickers=['GBPUSD'],
cache_algo='cache_algo_return',
abstract_curve=FXSpotCurve(construct_via_currency='USD', depo_tenor='ON'))
df_tot = market.fetch_market(md_request=md_request)
df_tot.columns = [x + '-tot-cuemacro' for x in df_tot.columns]
df_tot = df_tot.tz_localize(pytz.utc)
df_tot.index = df_tot.index + pd.Timedelta(hours=22) # Roughly NY close 2200 GMT
md_request.abstract_curve = None
# Get intraday spot data
md_request.freq = 'tick'
md_request.data_source = 'dukascopy'
df_intraday_spot = market.fetch_market(md_request=md_request)
df_intraday_spot = pd.DataFrame(df_intraday_spot.resample('1min').last().dropna())
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
md_request.freq = 'daily'
md_request.data_source = 'bloomberg'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
df_bbg_tot = df_bbg_tot.tz_localize(pytz.utc)
df_bbg_tot.index = df_bbg_tot.index + pd.Timedelta(hours=22) # Roughly NY close 2200 GMT
md_request = MarketDataRequest(start_date='01 Jan 2019', finish_date='01 Jul 2019',
data_source='bloomberg', cut='NYC', category='base-depos',
tickers=['GBPON', 'USDON'],
cache_algo='cache_algo_return')
# Join daily deposit data with intraday spot data
# OK to fill down, because deposit data isn't very volatile
df_deposit_rates = market.fetch_market(md_request).tz_localize(pytz.utc)
df_intraday_market = df_intraday_spot.join(df_deposit_rates, how='left')
df_intraday_market = df_intraday_market.fillna(method='ffill').fillna(method='bfill')
df_intraday_tot = FXSpotCurve().construct_total_return_index('GBPUSD', df_intraday_market, depo_tenor='ON')
df_intraday_spot.columns = [x + '-intraday-spot' for x in df_intraday_spot.columns]
df_intraday_tot.columns = [x + '-intraday-tot' for x in df_intraday_spot.columns]
# Combine into a single data frame and plot
df = calculations.join([df_bbg_tot, df_tot, df_intraday_tot, df_intraday_spot], how='outer').fillna(method='ffill')
df = calculations.create_mult_index_from_prices(df)
chart.plot(df) | cuemacro/finmarketpy | finmarketpy_examples/fx_spot_indices_examples.py | Python | apache-2.0 | 6,535 |
from sqlalchemy import create_engine, Column, Integer, Table
from sqlalchemy import String, DateTime, ForeignKey
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
engine = create_engine('postgresql:///game', convert_unicode=True)
session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = session.query_property()
def init_db():
Base.metadata.create_all(bind=engine)
| Drvanon/Game | database.py | Python | apache-2.0 | 586 |
"""
"""
from link.common import APIResponse
from link.wrappers import APIRequestWrapper, APIResponseWrapper
from requests.auth import AuthBase
import json
import requests
class ConsoleAuth(AuthBase):
"""
Does the authentication for Console requests.
"""
def __init__(self, token):
# setup any auth-related data here
self.token = token
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = self.token
return r
class ConsoleAPIResponseWrapper(APIResponseWrapper):
"""
Wraps a response from a console api
"""
def __init__(self, wrap_name = None, response = None):
super(ConsoleAPIResponseWrapper, self).__init__(response = response,
wrap_name = wrap_name)
@property
def xml(self):
"""
This api does not return xml
"""
raise NotImplementedError('This api does not return xml')
@property
def error(self):
"""
Console error is either an error in the wrapper response or
an error returned by the api in the json
"""
error = self._wrapped.error
if error:
return error
return self.json['response'].get('error')
@property
def error_code(self):
"""
return the error code
"""
return self.json['response'].get('error_code')
@property
def error_id(self):
"""
return the error_id
"""
return self.json['response'].get('error_code')
def noauth(self):
"""
Returns whether erorr is NOAUTH
"""
try:
# some endpoints dont return json
return self.json['response'].get('error_id') == 'NOAUTH'
except:
return False
class APIClientMessage(object):
"""
An APIObject could also be a node.
The key is really a key_tail. It does not need to have a hierarchy
"""
def __init__(self, message = None, warnings = None ,
error = None):
self._message = message
self.error = error
self.warnings = warnings
super(APIObject, self).__init__()
@classmethod
def api_object_name(cls):
return cls.__name__.lower()
#@property
#def json(self):
#return self._json
def __getitem__(self, name):
try:
return self.json[name]
except:
raise Exception('no json stored in this APIObject or API Response')
def __iter__(self):
return self.json.__iter__()
def get(self, name):
return self.message.get(name)
def __str__(self):
return json.dumps(self.message , cls = APIEncoder)
def __getitem__(self, key):
return self.message[key]
@property
def response_label(self):
"""
Only get's called the first time, then it is cached in self.NAME
"""
return self.api_object_name()
@property
def response(self):
_json = {}
#if there is an error don't continue
if self.error:
_json['error'] = self.error
return _json
_json['status'] = 'ok'
if self.message!=None:
_json['response'] = { self.response_label: self.message }
if self.warnings:
_json['warnings'] = self.warnings
return _json
@property
def message(self):
return self._message
def set_message(self, message):
self._message = message
from link.utils import array_pagenate
import types
class APIClient(APIClientMessage):
"""
Used to help make standardized Json responses to API's
"""
def __init__(self, message = None, warnings = None, error = None,
seek = None, response_id = None,auth=None):
super(APIResponse, self).__init__(message, error = error,
warnings = warnings)
if seek:
self.seek(*seek)
self._pages = None
if auth and isinstance(types.FunctionType):
#if its a function call then call it and set that to auth
self.auth = auth()
#let's try this out and see if its any good.
#each response get's a unique uuid
self.response_id = response_id
def auth(self):
raise NotImplementedError()
def seek(self, *kargs):
raise NotImplementedError()
#def __getitem__(self, key):
#return self.response[key]
#def get(self, key):
#return self.response.get(key)
#def iteritems(self):
#return self.message.iteritems()
def __str__(self):
return json.dumps(self.response, cls = APIEncoder)
def pagenate(self, per_page=100):
"""
Returns you an iterator of this response chunked into
"""
#TODO: need a test for this
self._pages = array_pagenate(per_page, self.message)
def next_page(self):
"""
Returns the next page that is in the generator
"""
if not self._pages:
self.pagenate()
#this is sorta weird, but you want to make that object's message just be
#next one in the list. Remove the Nones. There is probably a way to
#make it so it doesn't have to pad
try:
next = self._pages.next()
except StopIteration as e:
#if we are done then set the message to nothing
self.set_message([])
return self
message = [x for x in next if x !=None]
self.set_message(message)
#TODO: need a test for this
return self
@property
def response(self):
_json = {}
#if there is an error don't continue
if self.error:
_json['error'] = self.error
return _json
_json['status'] = 'ok'
if self.message!=None:
_json['response'] = { self.response_label: self.message }
if self.warnings:
_json['warnings'] = self.warnings
if self.response_id:
_json['response_id'] = self.response_id
return _json
class ConsoleClient(APIClient):
def __init__(self, wrap_name, base_url, user,password):
self.api = ConsoleAPIRequestWrapper(wrap_name = wrap_name, base_url=
base_url, user = user, password =
password)
if not self.check_token():
raise Exception("Unable to login to the Console API")
def check_token(self):
"""
checks the token of the api
:returns: True if the token passes
"""
try:
if self.api.token:
#TODO: There is a better check than it's there
return True
except:
raise Exception("No auth token found on the authentication")
return False
class ConsoleAPIRequestWrapper(APIRequestWrapper):
"""
Wrap the Console API
"""
def __init__(self, wrap_name=None, base_url=None, user=None, password=None):
self._token = None
super(ConsoleAPIRequestWrapper, self).__init__(wrap_name = wrap_name,
base_url=base_url,
user=user,
password=password,
response_wrapper =
ConsoleAPIResponseWrapper)
def authenticate(self):
"""
Write a custom auth property where we grab the auth token and put it in
the headers
"""
auth_json={'auth':{'username':self.user, 'password':self.password}}
self._wrapped = requests.session()
#send a post with no auth. prevents an infinite loop
auth_response = self.post('/auth', data = json.dumps(auth_json), auth =
None)
_token = auth_response.json['response']['token']
self._token = _token
self._wrapped.auth = ConsoleAuth(_token)
@property
def token(self):
"""
Returns the token from the api to tell us that we have been logged in
"""
if not self._token:
self._token = self.authenicate().token
return self._token
| uhjish/link | link/wrappers/consolewrappers.py | Python | apache-2.0 | 8,558 |
# coding=utf-8
import vim
from os.path import join
from .vim_interface import *
def set_vim_globals():
""" Sets global vim preferences and commands.
"""
# To update the date when files are modified
if get_save_dir() == "":
V + 'echom "vim-pad: IMPORTANT: please set g:pad#dir to a valid path in your vimrc."'
V + "redraw"
# vim-pad pollutes the MRU.vim list quite a lot, if let alone.
# This should fix that.
if vim.eval('exists(":MRU")') == "2":
mru_exclude_files = vim.eval("MRU_Exclude_Files")
if mru_exclude_files != '':
tail = "\|" + mru_exclude_files
else:
tail = ''
V + ("let MRU_Exclude_Files = '^" +
join(get_save_dir(), ".*") + tail + "'")
# we forbid writing backups of the notes
orig_backupskip = vim.eval("&backupskip")
V + ("let &backupskip='" +
",".join([orig_backupskip, join(get_save_dir(), "*")]) + "'")
| fmoralesc/vim-pad | pythonx/pad/vim_globals.py | Python | mit | 966 |
from common import TofbotTestCase, bot_action
from httpretty import HTTPretty, httprettified
from plugins.euler import EulerEvent
class TestEuler(TofbotTestCase):
@httprettified
def test_euler(self):
euler_nick = 'leonhard'
def set_score(score):
url = "http://projecteuler.net/profile/%s.txt" % euler_nick
country = 'country'
language = 'language'
level = 1
text = "%s,%s,%s,Solved %d,%d" % (euler_nick,
country,
language,
score,
level,
)
HTTPretty.register_uri(HTTPretty.GET, url,
body=text,
content_type="text/plain")
set_score(10)
self.bot.send("!euler_add leonhard")
# Get event to unschedule and manually fire it
(event_k, event) = self._find_event(EulerEvent)
self._delete_event(event_k)
self.assertOutput("!euler", "leonhard : Solved 10")
set_score(15)
l = bot_action(self.bot, event.fire)
self.assertEqual(l, ["leonhard : Solved 10 -> Solved 15"])
| p0nce/tofbot | tests/test_euler.py | Python | bsd-2-clause | 1,331 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.deprecated import deprecated_conditional
from pants.base.exceptions import TargetDefinitionException
class JavaThriftLibrary(JvmTarget):
"""A Java library generated from Thrift IDL files.
:API: public
"""
# TODO(John Sirois): Tasks should register the values they support in a plugin-registration goal.
# In general a plugin will contribute a target and a task, but in this case we have a shared
# target that can be used by at least 2 tasks - ThriftGen and ScroogeGen. This is likely not
# uncommon (gcc & clang) so the arrangement needs to be cleaned up and supported well.
_COMPILERS = frozenset(['thrift', 'scrooge'])
def __init__(self,
compiler=None,
language=None,
rpc_style=None,
namespace_map=None,
thrift_linter_strict=None,
default_java_namespace=None,
include_paths=None,
compiler_args=None,
**kwargs):
"""
:API: public
:param compiler: The compiler used to compile the thrift files. The default is defined in
the global options under ``--thrift-default-compiler``.
:param language: The language used to generate the output files. The default is defined in
the global options under ``--thrift-default-language``.
:param rpc_style: An optional rpc style to generate service stubs with. The default is defined
in the global options under ``--thrift-default-rpc-style``.
:param namespace_map: An optional dictionary of namespaces to remap {old: new}
:param thrift_linter_strict: If True, fail if thrift linter produces any warnings.
:param default_java_namespace: The namespace used for Java generated code when a Java
namespace is not explicitly specified in the IDL. The default is defined in the global
options under ``--thrift-default-default-java-namespace``.
:param compiler_args: Extra arguments to the compiler.
"""
super(JavaThriftLibrary, self).__init__(**kwargs)
def check_value_for_arg(arg, value, values):
if value and value not in values:
raise TargetDefinitionException(self, "{} may only be set to {} ('{}' not valid)"
.format(arg, ', or '.join(map(repr, values)), value))
return value
# The following fields are only added to the fingerprint via FingerprintStrategy when their
# values impact the outcome of the task. See JavaThriftLibraryFingerprintStrategy.
self._compiler = check_value_for_arg('compiler', compiler, self._COMPILERS)
self._language = language
deprecated_conditional(
lambda: rpc_style is not None,
'1.6.0.dev0',
'rpc_style',
'''
Deprecated property rpc_style used for {target}, use compiler_args instead.
e.g. [ \'--finagle\'] for \'finagle\'
and [\'--finagle\', \'--ostrich\'] for \'ostrich\'.
If both rpc_style and compiler_args are set then only compiler_args is used
and rpc_style is discarded.
'''.format(target=self.address.spec)
)
self._rpc_style = rpc_style
self.namespace_map = namespace_map
self.thrift_linter_strict = thrift_linter_strict
self._default_java_namespace = default_java_namespace
self._include_paths = include_paths
self._compiler_args = compiler_args
@property
def compiler(self):
return self._compiler
@property
def language(self):
return self._language
@property
def rpc_style(self):
return self._rpc_style
@property
def compiler_args(self):
return self._compiler_args
@property
def default_java_namespace(self):
return self._default_java_namespace
@property
def include_paths(self):
return self._include_paths
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
@property
def is_thrift(self):
return True
| fkorotkov/pants | src/python/pants/backend/codegen/thrift/java/java_thrift_library.py | Python | apache-2.0 | 4,264 |
"""Packaging settings."""
from codecs import open
from os.path import abspath, dirname, join
from subprocess import call
from setuptools import Command, find_packages, setup
from skywatch import __version__
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.rst'), encoding='utf-8') as file:
long_description = file.read()
setup(
name = 'skywatch',
version = __version__,
description = 'A simple skywatch command line program in Python.',
#long_description = long_description,
classifiers = [
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords = 'cli',
packages = find_packages(exclude=['docs', 'tests*']),
install_requires = ['docopt'],
entry_points = {
'console_scripts': [
'skywatch=skywatch.cli:main',
],
},
)
| abeer486/skywatch-python-cli | setup.py | Python | gpl-3.0 | 1,310 |
import numpy as np
import numpy.ma as ma
import projections.r2py.reval as reval
import projections.r2py.rparser as rparser
class SimpleExpr():
def __init__(self, name, expr):
self.name = name
self.tree = reval.make_inputs(rparser.parse(expr))
lokals = {}
exec(reval.to_py(self.tree, name), lokals)
self.func = lokals[name + '_st']
@property
def syms(self):
return reval.find_inputs(self.tree)
def eval(self, df, window=None):
try:
res = self.func(df)
except KeyError as e:
print("Error: input '%s' not defined" % e)
raise e
if not isinstance(res, np.ndarray):
if not window:
res = ma.masked_array(np.full(tuple(df.values())[0].shape, res,
dtype=np.float32))
else:
h = window[0][1] - window[0][0]
w = window[1][1] - window[1][0]
res = ma.masked_array(np.full((h, w), res, dtype=np.float32))
return res
| ricardog/raster-project | projections/simpleexpr.py | Python | apache-2.0 | 956 |
import sys
from resources.datatables import Options
def setup(core, object):
object.setAttachment('radial_filename', 'object/conversation')
object.setAttachment('conversationFile', 'respec')
object.setOptionsBitmask(Options.CONVERSABLE | Options.INVULNERABLE)
object.setStfFilename('mob/creature_names')
object.setStfName('respec_seller')
return | ProjectSWGCore/NGECore2 | scripts/object/mobile/respec_seller_f_1.py | Python | lgpl-3.0 | 352 |
"""
test file/image based on spoke base tests
"""
import pytest
from wheelcms_axle.tests.test_spoke import BaseSpokeTemplateTest, \
BaseSpokeTest
from wheelcms_spokes.file import FileType, File
from wheelcms_spokes.image import ImageType, Image
from django.core.files.uploadedfile import SimpleUploadedFile
from wheelcms_axle.tests.test_impexp import BaseSpokeImportExportTest
from wheelcms_axle.tests.test_search import BaseTestSearch
class BaseImageFileTemplateTest(BaseSpokeTemplateTest):
"""
Shared customization/tests
"""
def valid_files(self):
""" return an image, will work for both file and image uploads """
return dict(storage=SimpleUploadedFile("foo.png",
'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'))
def test_children_restriction(self, client):
""" by default, a file or image can't have kids """
assert self.type.children is not None
assert len(self.type.children) == 0
class BaseImageFileTest(BaseSpokeTest):
def test_download(self, client):
data = 'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00' \
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'
storage=SimpleUploadedFile("foo.png", data)
f = self.type.model(storage=storage,
filename="bar.png",
content_type="application/octet-stream").save()
spoke = self.type(f)
response = spoke.download(None, None, 'download')
assert response.content == data
assert response.has_header('Content-Disposition')
assert response['Content-Disposition'] == \
'attachment; filename=bar.png'
assert response['Content-Type'] == "application/octet-stream"
def test_download_defaults(self, client):
data = 'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00' \
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'
storage=SimpleUploadedFile("foo.png", data)
f = self.type.model(storage=storage).save()
spoke = self.type(f)
response = spoke.download(None, None, 'download')
assert response.content == data
assert response.has_header('Content-Disposition')
assert response['Content-Disposition'] == \
'attachment; filename=foo.png'
assert response['Content-Type'] == "image/png"
def test_filename_slash(self, client):
""" make sure the filename cannot contain directory components """
data = 'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00' \
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'
storage=SimpleUploadedFile("foo.png", data)
f = self.type.model(storage=storage, filename="/etc/passwd").save()
assert f.filename == "passwd"
f = self.type.model(storage=storage, filename="../foo.png").save()
assert f.filename == "foo.png"
def test_download_state(self, client):
""" file must be "visible" in order to be downloadable """
pytest.skip("TODO XXX")
class TestImageSpokeTemplate(BaseImageFileTemplateTest):
"""
Test the image spoke
"""
type = ImageType
class TestImageSpoke(BaseImageFileTest):
"""
Test the image spoke
"""
type = ImageType
class TestImageSpokeImpExp(BaseSpokeImportExportTest):
type = ImageType
spoke = ImageType
class TestImageSpokeSearch(BaseTestSearch):
type = ImageType
class TestFileSpokeTemplate(BaseImageFileTemplateTest):
"""
Test the file spoke
"""
type = FileType
class TestFileSpoke(BaseImageFileTest):
"""
Test the file spoke
"""
type = FileType
class TestImageFileImpExp(BaseSpokeImportExportTest):
type = FileType
spoke = FileType
def create(self, **kw):
f = SimpleUploadedFile("foo.png",
'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
t = self.type.model(storage=f, **kw).save()
tt = self.spoke(t)
return tt
def test_capable_serialize(self, client):
# import pytest; pytest.set_trace()
super(TestImageFileImpExp, self).test_capable_serialize(client)
class TestFileSpokeSearch(BaseTestSearch):
type = FileType
| wheelcms/wheelcms_spokes | wheelcms_spokes/tests/test_file_image.py | Python | bsd-2-clause | 4,501 |
# Generated by Django 3.1.8 on 2021-06-01 16:29
import TWLight.resources.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resources", "0073_stream_description_it"),
]
operations = [
migrations.AddField(
model_name="stream",
name="description_as",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_bcl",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_dag",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_diq",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_mnw",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_ms",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_scn",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AddField(
model_name="stream",
name="description_sr_ec",
field=models.TextField(
blank=True,
help_text="Optional description of this stream's resources.",
null=True,
),
),
migrations.AlterField(
model_name="language",
name="language",
field=models.CharField(
choices=[
("ar", "العربية"),
("as", "অসমীয়া"),
("bcl", "Bikol Central"),
("br", "brezhoneg"),
("da", "dansk"),
("dag", "dagbanli"),
("de", "Deutsch"),
("diq", "Zazaki"),
("en", "English"),
("en-gb", "British English"),
("eo", "Esperanto"),
("es", "español"),
("fa", "فارسی"),
("fi", "suomi"),
("fr", "français"),
("he", "עברית"),
("hi", "हिन्दी"),
("id", "Bahasa Indonesia"),
("it", "italiano"),
("ja", "日本語"),
("ko", "한국어"),
("lv", "latviešu"),
("mk", "македонски"),
("mnw", "ဘာသာ မန်"),
("mr", "मराठी"),
("ms", "Bahasa Melayu"),
("my", "မြန်မာဘာသာ"),
("pl", "polski"),
("pt", "português"),
("pt-br", "português do Brasil"),
("ro", "română"),
("ru", "русский"),
("scn", "sicilianu"),
("sr-ec", "sr-cyrl"),
("sv", "svenska"),
("ta", "தமிழ்"),
("tr", "Türkçe"),
("uk", "українська"),
("vi", "Tiếng Việt"),
("zh-hans", "中文(简体)"),
("zh-hant", "中文(繁體)"),
],
max_length=8,
unique=True,
validators=[TWLight.resources.models.validate_language_code],
),
),
]
| WikipediaLibrary/TWLight | TWLight/resources/migrations/0074_auto_20210601_1629.py | Python | mit | 4,788 |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import time
import logging
from oslo_config import cfg
from networking_vsphere.agent import ovsvapp_agent
from networking_vsphere.common import constants as ovsvapp_const
from networking_vsphere.common import error
from networking_vsphere.tests import base
from networking_vsphere.tests.unit.drivers import fake_manager
from networking_vsphere.utils import resource_util
from neutron.agent.common import ovs_lib
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent as ovs_agent # noqa
NETWORK_ID = 'fake_net_id'
VNIC_ADDED = 'VNIC_ADDED'
FAKE_DEVICE_ID = 'fake_device_id'
FAKE_VM = 'fake_vm'
FAKE_HOST_1 = 'fake_host_1'
FAKE_HOST_2 = 'fake_host_2'
FAKE_CLUSTER_MOID = 'fake_cluster_moid'
FAKE_CLUSTER_1 = 'fake_cluster_1'
FAKE_CLUSTER_2 = 'fake_cluster_2'
FAKE_VCENTER = 'fake_vcenter'
FAKE_PORT_1 = 'fake_port_1'
FAKE_PORT_2 = 'fake_port_2'
FAKE_PORT_3 = 'fake_port_3'
FAKE_PORT_4 = 'fake_port_4'
MAC_ADDRESS = '01:02:03:04:05:06'
FAKE_CONTEXT = 'fake_context'
FAKE_SG = {'fake_sg': 'fake_sg_rule'}
FAKE_SG_RULE = {'security_group_source_groups': ['fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'security_group_id': 'fake_id'
}],
'sg_provider_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68
}]
}
FAKE_SG_RULES = {FAKE_PORT_1: FAKE_SG_RULE}
FAKE_SG_RULES_MULTI_PORTS = {FAKE_PORT_1: FAKE_SG_RULE,
FAKE_PORT_2: FAKE_SG_RULE
}
FAKE_SG_RULES_MISSING = {FAKE_PORT_1: {'security_group_source_groups': [
'fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'sg_provider_rules': [],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress'
}]
}
}
FAKE_SG_RULES_PARTIAL = {FAKE_PORT_1: {'security_group_source_groups': [
'fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'sg_provider_rules': [],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'port_range_min': 22,
'port_range_max': 22
}]
}
}
DEVICE = {'id': FAKE_DEVICE_ID,
'cluster_id': FAKE_CLUSTER_1,
'host': FAKE_HOST_1,
'vcenter': FAKE_VCENTER}
class SampleEvent(object):
def __init__(self, type, host, cluster, srcobj, host_changed=False):
self.event_type = type
self.host_name = host
self.cluster_id = cluster
self.src_obj = srcobj
self.host_changed = host_changed
class VM(object):
def __init__(self, uuid, vnics):
self.uuid = uuid
self.vnics = vnics
class SamplePort(object):
def __init__(self, port_uuid, mac_address=None, pg_id=None):
self.port_uuid = port_uuid
self.mac_address = mac_address
self.pg_id = pg_id
class SamplePortUIDMac(object):
def __init__(self, port_uuid, mac_address):
self.port_uuid = port_uuid
self.mac_address = mac_address
class TestOVSvAppAgent(base.TestCase):
@mock.patch('neutron.common.config.init')
@mock.patch('neutron.common.config.setup_logging')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.RpcPluginApi')
@mock.patch('neutron.agent.securitygroups_rpc.SecurityGroupServerRpcApi')
@mock.patch('neutron.agent.rpc.PluginReportStateAPI')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.OVSvAppPluginApi')
@mock.patch('neutron.context.get_admin_context_without_session')
@mock.patch('neutron.agent.rpc.create_consumers')
@mock.patch('neutron.plugins.ml2.drivers.openvswitch.agent.'
'ovs_neutron_agent.OVSNeutronAgent.setup_integration_br')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.check_ovsvapp_agent_restart')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.setup_ovs_bridges')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.setup_security_br')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent._init_ovs_flows')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.'
'OVSFirewallDriver.setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
def setUp(self, mock_get_port_ofport,
mock_set_secure_mode, mock_create_ovs_bridge,
mock_setup_base_flows, mock_check_ovs_firewall_restart,
mock_init_ovs_flows, mock_setup_security_br,
mock_setup_ovs_bridges, mock_check_ovsvapp_agent_restart,
mock_setup_integration_br, mock_create_consumers,
mock_get_admin_context_without_session, mock_ovsvapp_pluginapi,
mock_plugin_report_stateapi, mock_securitygroup_server_rpcapi,
mock_rpc_pluginapi, mock_setup_logging, mock_init):
super(TestOVSvAppAgent, self).setUp()
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_check_ovsvapp_agent_restart.return_value = False
mock_get_port_ofport.return_value = 5
self.agent = ovsvapp_agent.OVSvAppAgent()
self.agent.run_refresh_firewall_loop = False
self.LOG = ovsvapp_agent.LOG
self.agent.monitor_log = logging.getLogger('monitor')
def _build_port(self, port):
port = {'admin_state_up': False,
'id': port,
'device': DEVICE,
'network_id': NETWORK_ID,
'physical_network': 'physnet1',
'segmentation_id': '1001',
'lvid': 1,
'network_type': 'vlan',
'fixed_ips': [{'subnet_id': 'subnet_uuid',
'ip_address': '1.1.1.1'}],
'device_owner': 'compute:None',
'security_groups': FAKE_SG,
'mac_address': MAC_ADDRESS,
'device_id': FAKE_DEVICE_ID
}
return port
def _build_update_port(self, port):
port = {'admin_state_up': False,
'id': port,
'network_id': NETWORK_ID,
'fixed_ips': [{'subnet_id': 'subnet_uuid',
'ip_address': '1.1.1.1'}],
'device_owner': 'compute:None',
'security_groups': FAKE_SG,
'mac_address': MAC_ADDRESS,
'device_id': FAKE_DEVICE_ID
}
return port
def test_setup_security_br_none(self):
cfg.CONF.set_override('security_bridge_mapping',
None, 'SECURITYGROUP')
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.LOG, 'warning') as mock_logger_warn,\
mock.patch.object(self.agent.sec_br, 'bridge_exists'
) as mock_ovs_bridge:
self.assertRaises(SystemExit,
self.agent.setup_security_br)
self.assertTrue(mock_logger_warn.called)
self.assertFalse(mock_ovs_bridge.called)
def test_setup_security_br(self):
cfg.CONF.set_override('security_bridge_mapping',
"br-fake:fake_if", 'SECURITYGROUP')
self.agent.sec_br = mock.Mock()
self.agent.int_br = mock.Mock()
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(ovs_lib, "OVSBridge") as mock_ovs_br, \
mock.patch.object(self.agent.sec_br,
"add_patch_port",
return_value=5), \
mock.patch.object(self.agent.int_br,
"add_patch_port",
return_value=6):
self.agent.setup_security_br()
self.assertTrue(mock_ovs_br.called)
self.assertTrue(self.agent.sec_br.add_patch_port.called)
self.assertTrue(mock_logger_info.called)
def test_recover_security_br_none(self):
cfg.CONF.set_override('security_bridge_mapping',
None, 'SECURITYGROUP')
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.LOG, 'warning') as mock_logger_warn, \
mock.patch.object(self.agent.sec_br, 'bridge_exists'
) as mock_ovs_bridge:
self.assertRaises(SystemExit,
self.agent.recover_security_br)
self.assertTrue(mock_logger_warn.called)
self.assertFalse(mock_ovs_bridge.called)
def test_recover_physical_bridges(self):
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
self.agent.bridge_mappings = n_utils.parse_mappings(
cfg.CONF.OVSVAPP.bridge_mappings)
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(self.LOG, 'error') as mock_logger_error, \
mock.patch.object(self.agent, "br_phys_cls") as mock_ovs_br, \
mock.patch.object(ovs_lib.BaseOVS,
"get_bridges",
return_value=['br-eth1']
), \
mock.patch.object(p_utils, 'get_interface_name'
) as mock_int_name, \
mock.patch.object(self.agent.int_br,
"get_port_ofport",
return_value=6) as mock_get_ofport:
self.agent.recover_physical_bridges(self.agent.bridge_mappings)
self.assertTrue(mock_logger_info.called)
self.assertFalse(mock_logger_error.called)
self.assertTrue(mock_ovs_br.called)
self.assertTrue(mock_get_ofport.called)
self.assertTrue(mock_int_name.called)
self.assertEqual(self.agent.int_ofports['physnet1'], 6)
def test_init_ovs_flows(self):
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
self.agent.bridge_mappings = n_utils.parse_mappings(
cfg.CONF.OVSVAPP.bridge_mappings)
self.agent.patch_sec_ofport = 5
self.agent.int_ofports = {'physnet1': 'br-eth1'}
self.agent.phys_ofports = {"physnet1": "br-eth1"}
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.br = mock.Mock()
with mock.patch.object(self.agent.int_br,
"delete_flows"
) as mock_int_br_delete_flows, \
mock.patch.object(self.agent,
"br_phys_cls") as mock_ovs_br, \
mock.patch.object(self.agent.int_br,
"add_flow") as mock_int_br_add_flow:
self.agent._init_ovs_flows(self.agent.bridge_mappings)
self.assertTrue(mock_int_br_delete_flows.called)
self.assertTrue(mock_ovs_br.called)
self.assertTrue(br.delete_flows.called)
self.assertTrue(br.add_flows.called)
self.assertTrue(mock_int_br_add_flow.called)
def test_update_port_bindings(self):
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
return_value=set(["fake_port"])
) as mock_update_ports_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._update_port_bindings()
self.assertTrue(mock_update_ports_binding.called)
self.assertFalse(self.agent.ports_to_bind)
self.assertFalse(mock_log_exception.called)
def test_update_port_bindings_rpc_exception(self):
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
side_effect=Exception()
) as mock_update_port_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._update_port_bindings)
self.assertTrue(mock_update_port_binding.called)
self.assertTrue(mock_log_exception.called)
self.assertEqual(set(['fake_port']),
self.agent.ports_to_bind)
def test_update_port_bindings_partial(self):
self.agent.ports_to_bind.add("fake_port1")
self.agent.ports_to_bind.add("fake_port2")
self.agent.ports_to_bind.add("fake_port3")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
return_value=set(["fake_port1",
"fake_port2"])
) as mock_update_port_binding, \
mock.patch.object(self.LOG, 'exception'):
self.agent._update_port_bindings()
self.assertTrue(mock_update_port_binding.called)
self.assertEqual(set(["fake_port3"]),
self.agent.ports_to_bind)
def test_setup_ovs_bridges_vlan(self):
cfg.CONF.set_override('tenant_network_types',
"vlan", 'OVSVAPP')
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
with mock.patch.object(self.agent, 'setup_physical_bridges'
) as mock_phys_brs, \
mock.patch.object(self.agent, '_init_ovs_flows'
) as mock_init_ovs_flows:
self.agent.setup_ovs_bridges()
mock_phys_brs.assert_called_with(self.agent.bridge_mappings)
mock_init_ovs_flows.assert_called_with(self.agent.bridge_mappings)
def test_setup_ovs_bridges_vxlan(self):
self.agent.local_ip = "10.10.10.10"
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent, 'setup_tunnel_br'
) as mock_setup_tunnel_br, \
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows:
self.agent.setup_ovs_bridges()
mock_setup_tunnel_br.assert_called_with("br-tun")
self.assertTrue(mock_setup_tunnel_br_flows.called)
def test_setup_ovs_bridges_vxlan_ofport(self):
cfg.CONF.set_override('tenant_network_types',
"vxlan", 'OVSVAPP')
cfg.CONF.set_override('local_ip',
"10.10.10.10", 'OVSVAPP')
cfg.CONF.set_override('tunnel_bridge',
"br-tun", 'OVSVAPP')
self.agent.tun_br = mock.Mock()
self.agent.int_br = mock.Mock()
self.agent.local_ip = "10.10.10.10"
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.tun_br,
"add_patch_port",
return_value=5), \
mock.patch.object(self.agent.int_br,
"add_patch_port",
return_value=6), \
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows:
self.agent.setup_ovs_bridges()
self.assertTrue(self.agent.tun_br.add_patch_port.called)
self.assertEqual(self.agent.patch_tun_ofport, 6)
self.assertEqual(self.agent.patch_int_ofport, 5)
self.assertTrue(mock_setup_tunnel_br_flows.called)
def test_mitigate_ovs_restart_vlan(self):
self.agent.refresh_firewall_required = False
self.agent.devices_to_filter = set(['1111'])
self.agent.cluster_host_ports = set(['1111'])
self.agent.cluster_other_ports = set(['2222'])
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(self.agent, "setup_integration_br"
) as mock_int_br, \
mock.patch.object(self.agent, "setup_physical_bridges"
) as mock_phys_brs, \
mock.patch.object(self.agent, "setup_security_br"
) as mock_sec_br, \
mock.patch.object(self.agent.sg_agent, "init_firewall"
) as mock_init_fw, \
mock.patch.object(self.agent, "setup_tunnel_br"
) as mock_setup_tunnel_br,\
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows, \
mock.patch.object(self.agent, "_init_ovs_flows"
) as mock_init_flows, \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent.mitigate_ovs_restart()
self.assertTrue(mock_int_br.called)
self.assertTrue(mock_phys_brs.called)
self.assertTrue(mock_sec_br.called)
self.assertFalse(mock_setup_tunnel_br.called)
self.assertFalse(mock_setup_tunnel_br_flows.called)
self.assertTrue(mock_init_fw.called)
self.assertTrue(mock_init_flows.called)
self.assertTrue(self.agent.refresh_firewall_required)
self.assertEqual(2, len(self.agent.devices_to_filter))
monitor_warning.assert_called_with("ovs: broken")
monitor_info.assert_called_with("ovs: ok")
self.assertTrue(mock_logger_info.called)
def test_mitigate_ovs_restart_vxlan(self):
self.agent.enable_tunneling = True
self.agent.refresh_firewall_required = False
self.agent.devices_to_filter = set(['1111'])
self.agent.cluster_host_ports = set(['1111'])
self.agent.cluster_other_ports = set(['2222'])
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(self.agent, "setup_integration_br"), \
mock.patch.object(self.agent, "setup_physical_bridges"
) as mock_phys_brs, \
mock.patch.object(self.agent, "setup_security_br"), \
mock.patch.object(self.agent.sg_agent, "init_firewall"
), \
mock.patch.object(self.agent, "setup_tunnel_br"
) as mock_setup_tunnel_br,\
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows, \
mock.patch.object(self.agent, "tunnel_sync"
) as mock_tun_sync, \
mock.patch.object(self.agent, "_init_ovs_flows"), \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent.mitigate_ovs_restart()
self.assertTrue(mock_setup_tunnel_br.called)
self.assertTrue(mock_setup_tunnel_br_flows.called)
self.assertFalse(mock_phys_brs.called)
self.assertTrue(mock_tun_sync.called)
self.assertTrue(self.agent.refresh_firewall_required)
self.assertEqual(len(self.agent.devices_to_filter), 2)
monitor_warning.assert_called_with("ovs: broken")
monitor_info.assert_called_with("ovs: ok")
self.assertTrue(mock_logger_info.called)
def test_mitigate_ovs_restart_exception(self):
self.agent.enable_tunneling = False
self.agent.refresh_firewall_required = False
self.agent.devices_to_filter = set()
self.agent.cluster_host_ports = set(['1111'])
self.agent.cluster_other_ports = set(['2222'])
with mock.patch.object(self.LOG, "info") as mock_logger_info, \
mock.patch.object(self.agent, "setup_integration_br",
side_effect=Exception()) as mock_int_br, \
mock.patch.object(self.agent, "setup_physical_bridges"
) as mock_phys_brs, \
mock.patch.object(self.agent, "setup_tunnel_br"
) as mock_setup_tunnel_br,\
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows, \
mock.patch.object(self.LOG, "exception"
) as mock_exception_log, \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent.mitigate_ovs_restart()
self.assertTrue(mock_int_br.called)
self.assertFalse(mock_phys_brs.called)
self.assertFalse(mock_setup_tunnel_br.called)
self.assertFalse(mock_setup_tunnel_br_flows.called)
self.assertFalse(mock_logger_info.called)
self.assertTrue(mock_exception_log.called)
self.assertFalse(self.agent.refresh_firewall_required)
self.assertEqual(0, len(self.agent.devices_to_filter))
monitor_warning.assert_called_with("ovs: broken")
self.assertFalse(monitor_info.called)
def _get_fake_port(self, port_id):
return {'id': port_id,
'port_id': port_id,
'mac_address': MAC_ADDRESS,
'fixed_ips': [{'subnet_id': 'subnet_uuid',
'ip_address': '1.1.1.1'}],
'security_groups': FAKE_SG,
'segmentation_id': 1232,
'lvid': 1,
'network_id': 'fake_network',
'device_id': FAKE_DEVICE_ID,
'admin_state_up': True,
'physical_network': 'physnet1',
'network_type': 'vlan'}
def _build_phys_brs(self, port):
phys_net = port['physical_network']
self.agent.phys_brs[phys_net] = {}
self.agent.phys_brs[phys_net]['eth_ofport'] = 5
br = self.agent.phys_brs[phys_net]['br'] = mock.Mock()
br.add_flows(port['segmentation_id'],
port['mac_address'],
5)
br.delete_flows(port['mac_address'],
port['segmentation_id'])
return br
def test_process_port(self):
fakeport = self._get_fake_port(FAKE_PORT_1)
self.agent.ports_dict = {}
br = self._build_phys_brs(fakeport)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = fakeport
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan:
status = self.agent._process_port(fakeport)
self.assertIn(FAKE_PORT_1, self.agent.ports_dict)
self.assertTrue(status)
mock_add_devices.assert_called_with([fakeport])
mock_prov_local_vlan.assert_called_with(fakeport)
self.assertTrue(br.add_flows.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
def test_process_port_existing_network(self):
fakeport = self._get_fake_port(FAKE_PORT_1)
self.agent.ports_dict = {}
br = self._build_phys_brs(fakeport)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = {}
net_id = fakeport['network_id']
self.agent.local_vlan_map[net_id] = self._build_lvm(fakeport)
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan:
status = self.agent._process_port(fakeport)
self.assertIn(FAKE_PORT_1, self.agent.ports_dict)
self.assertTrue(status)
mock_add_devices.assert_called_with([fakeport])
self.assertFalse(mock_prov_local_vlan.called)
self.assertTrue(br.add_flows.called)
def test_process_uncached_devices_with_few_devices(self):
devices = set(['123', '234', '345', '456', '567', '678',
'1123', '1234', '1345', '1456', '1567', '1678'])
with mock.patch('eventlet.GreenPool.spawn_n') as mock_spawn_thread, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices(devices)
self.assertTrue(mock_spawn_thread.called)
self.assertEqual(1, mock_spawn_thread.call_count)
self.assertFalse(mock_log_exception.called)
def test_process_uncached_devices_with_more_devices(self):
devices = set(['123', '234', '345', '456', '567', '678',
'1123', '1234', '1345', '1456', '1567', '1678',
'2123', '2234', '2345', '2456', '2567', '2678',
'3123', '3234', '3345', '3456', '3567', '3678',
'4123', '4234', '4345', '4456', '4567', '4678',
'5123', '5234', '5345', '5456', '5567', '5678',
'6123', '6234', '6345', '6456', '6567', '6678'])
with mock.patch('eventlet.GreenPool.spawn_n') as mock_spawn_thread, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices(devices)
self.assertTrue(mock_spawn_thread.called)
self.assertEqual(2, mock_spawn_thread.call_count)
self.assertFalse(mock_log_exception.called)
def test_process_uncached_devices_sublist_single_port_vlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
self.agent.ports_dict = {}
br = self._build_phys_brs(fakeport_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
devices = [FAKE_PORT_1]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_provision_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertEqual(1, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertTrue(mock_provision_local_vlan.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertTrue(br.add_flows.called)
def test_process_uncached_devices_sublist_multiple_port_vlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
self.agent.ports_dict = {}
br = self._build_phys_brs(fakeport_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.cluster_host_ports.add(FAKE_PORT_2)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
self.agent.vnic_info[FAKE_PORT_2] = fakeport_2
devices = [FAKE_PORT_1, FAKE_PORT_2]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertEqual(2, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_2, self.agent.vnic_info)
self.assertTrue(br.add_flows.called)
def test_process_uncached_devices_sublist_single_port_vxlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_1["network_type"] = p_const.TYPE_VXLAN
self.agent.ports_dict = {}
self.agent.local_vlan_map = {}
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
devices = [FAKE_PORT_1]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_populate_lvm'), \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertEqual(1, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
def test_process_uncached_devices_sublist_multiple_port_vxlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
fakeport_1["network_type"] = p_const.TYPE_VXLAN
fakeport_2["network_type"] = p_const.TYPE_VXLAN
self.agent.ports_dict = {}
self.agent.local_vlan_map = {}
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.cluster_host_ports.add(FAKE_PORT_2)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
self.agent.vnic_info[FAKE_PORT_2] = fakeport_2
devices = [FAKE_PORT_1, FAKE_PORT_2]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_populate_lvm'), \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertEqual(2, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_2, self.agent.vnic_info)
def test_process_uncached_devices_sublist_stale_vm_port(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
fakeport_3 = self._get_fake_port(FAKE_PORT_3)
self.agent.ports_dict = {}
self._build_phys_brs(fakeport_1)
self._build_phys_brs(fakeport_2)
self._build_phys_brs(fakeport_3)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.cluster_host_ports.add(FAKE_PORT_2)
self.agent.ports_to_bind = set([FAKE_PORT_3, FAKE_PORT_4])
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
self.agent.vnic_info[FAKE_PORT_2] = fakeport_2
self.agent.vnic_info[FAKE_PORT_3] = fakeport_3
devices = [FAKE_PORT_1, FAKE_PORT_2, FAKE_PORT_3]
self.agent.sg_agent.remove_devices_filter = mock.Mock()
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent.sg_agent,
'remove_devices_filter'
)as mock_remove_device_filter, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent, '_remove_stale_ports_flows'), \
mock.patch.object(self.agent, '_block_stale_ports'), \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertEqual(2, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_3, self.agent.ports_to_bind)
self.assertIn(FAKE_PORT_4, self.agent.ports_to_bind)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_2, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_3, self.agent.vnic_info)
mock_remove_device_filter.assert_called_with(FAKE_PORT_3)
def test_update_firewall(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
self._build_phys_brs(fakeport_1)
self._build_phys_brs(fakeport_2)
self.agent.devices_to_filter = set([FAKE_PORT_1,
FAKE_PORT_2])
self.agent.ports_dict = {FAKE_PORT_1: fakeport_1}
self.agent.vnic_info[FAKE_PORT_1] = {}
self.agent.vnic_info[FAKE_PORT_2] = {}
self.agent.refresh_firewall_required = True
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
) as mock_refresh_firewall, \
mock.patch.object(self.agent, '_provision_local_vlan'
), \
mock.patch.object(self.agent, '_remove_stale_ports_flows'), \
mock.patch.object(self.agent, '_block_stale_ports'), \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent._update_firewall()
self.assertFalse(self.agent.refresh_firewall_required)
self.assertFalse(self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_2, self.agent.ports_dict)
mock_get_ports_details_list.assert_called_with(
self.agent.context,
[FAKE_PORT_2],
self.agent.agent_id,
self.agent.vcenter_id,
self.agent.cluster_id)
mock_refresh_firewall.assert_called_with(set([FAKE_PORT_1,
FAKE_PORT_2]))
self.assertEqual(2, monitor_warning.call_count)
self.assertEqual(2, monitor_info.call_count)
def test_update_firewall_get_ports_exception(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
self.agent.devices_to_filter = set([FAKE_PORT_1,
FAKE_PORT_2])
self.agent.ports_dict = {FAKE_PORT_1: fakeport_1}
self.agent.refresh_firewall_required = True
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
side_effect=Exception()
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
) as mock_refresh_firewall, \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent._update_firewall()
self.assertTrue(self.agent.refresh_firewall_required)
self.assertEqual(set([FAKE_PORT_2]), self.agent.devices_to_filter)
self.assertNotIn(FAKE_PORT_2, self.agent.ports_dict)
mock_get_ports_details_list.assert_called_with(
self.agent.context,
[FAKE_PORT_2],
self.agent.agent_id,
self.agent.vcenter_id,
self.agent.cluster_id)
mock_refresh_firewall.assert_called_with(set([FAKE_PORT_1]))
self.assertEqual(2, monitor_warning.call_count)
self.assertEqual(1, monitor_info.call_count)
def test_check_for_updates_no_updates(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent.sg_agent, 'refresh_port_filters'
) as mock_refresh_port_filters, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_refresh_port_filters.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_ovs_restarted(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=0) as mock_check_ovs, \
mock.patch.object(self.agent, 'mitigate_ovs_restart'
) as mock_mitigate, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertTrue(mock_mitigate.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_devices_to_filter(self):
self.agent.refresh_firewall_required = True
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, 'mitigate_ovs_restart'
) as mock_mitigate, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall,\
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_mitigate.called)
self.assertTrue(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_firewall_refresh(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=True
) as mock_firewall_refresh,\
mock.patch.object(self.agent.sg_agent, 'refresh_port_filters'
) as mock_refresh_port_filters, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertTrue(mock_refresh_port_filters.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_port_bindings(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertTrue(mock_update_port_bindings.called)
def test_update_devices_up(self):
self.agent.devices_up_list.append(FAKE_PORT_1)
ret_value = {'devices_up': [FAKE_PORT_1],
'failed_devices_up': []}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_up",
return_value=ret_value
) as update_devices_up, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_up()
self.assertTrue(update_devices_up.called)
self.assertFalse(self.agent.devices_up_list)
self.assertFalse(log_exception.called)
def test_update_devices_up_rpc_exception(self):
self.agent.devices_up_list.append(FAKE_PORT_1)
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_up",
side_effect=Exception()
) as update_devices_up, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_up()
self.assertTrue(update_devices_up.called)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertTrue(log_exception.called)
def test_update_devices_up_partial(self):
self.agent.devices_up_list = [FAKE_PORT_1, FAKE_PORT_2, FAKE_PORT_3]
ret_value = {'devices_up': [FAKE_PORT_1, FAKE_PORT_2],
'failed_devices_up': [FAKE_PORT_3]}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_up",
return_value=ret_value
) as update_devices_up, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_up()
self.assertTrue(update_devices_up.called)
self.assertEqual([FAKE_PORT_3], self.agent.devices_up_list)
self.assertFalse(log_exception.called)
def test_update_devices_down(self):
self.agent.devices_down_list.append(FAKE_PORT_1)
ret_value = {'devices_down': [FAKE_PORT_1],
'failed_devices_down': []}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_down",
return_value=ret_value
) as update_devices_down, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_down()
self.assertTrue(update_devices_down.called)
self.assertFalse(self.agent.devices_down_list)
self.assertFalse(log_exception.called)
def test_update_devices_down_rpc_exception(self):
self.agent.devices_down_list.append(FAKE_PORT_1)
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_down",
side_effect=Exception()
) as update_devices_down, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_down()
self.assertTrue(update_devices_down.called)
self.assertEqual([FAKE_PORT_1], self.agent.devices_down_list)
self.assertTrue(log_exception.called)
def test_update_devices_down_partial(self):
self.agent.devices_down_list = [FAKE_PORT_1, FAKE_PORT_2, FAKE_PORT_3]
ret_value = {'devices_down': [FAKE_PORT_1, FAKE_PORT_2],
'failed_devices_down': [FAKE_PORT_3]}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_down",
return_value=ret_value
) as update_devices_down, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_down()
self.assertTrue(update_devices_down.called)
self.assertEqual([FAKE_PORT_3], self.agent.devices_down_list)
self.assertFalse(log_exception.called)
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state,
True)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertFalse(self.agent.use_call)
self.assertEqual(cfg.CONF.host,
self.agent.agent_state["host"])
def test_report_state_fail(self):
with mock.patch.object(self.agent.state_rpc,
"report_state",
side_effect=Exception()) as mock_report_st, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._report_state()
mock_report_st.assert_called_with(self.agent.context,
self.agent.agent_state,
True)
self.assertTrue(mock_log_exception.called)
def test_process_event_ignore_event(self):
vm = VM(FAKE_VM, [])
event = SampleEvent(VNIC_ADDED, FAKE_HOST_1,
FAKE_CLUSTER_MOID, vm)
with mock.patch.object(self.agent,
"_notify_device_added") as mock_add_vm, \
mock.patch.object(self.agent,
"_notify_device_updated") as mock_update_vm, \
mock.patch.object(self.agent,
"_notify_device_deleted") as mock_del_vm, \
mock.patch.object(self.LOG, 'debug') as mock_log_debug:
self.agent.process_event(event)
self.assertFalse(mock_add_vm.called)
self.assertFalse(mock_update_vm.called)
self.assertFalse(mock_del_vm.called)
self.assertTrue(mock_log_debug.called)
def test_process_event_exception(self):
vm = VM(FAKE_VM, [])
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
with mock.patch.object(self.agent,
"_notify_device_added",
side_effect=Exception()) as mock_add_vm, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(self.LOG, 'error') as mock_log_error:
self.agent.process_event(event)
self.assertTrue(mock_add_vm.called)
self.assertTrue(mock_log_error.called)
self.assertTrue(mock_log_exception.called)
def test_process_event_vm_create_nonics_non_host_non_cluster(self):
self.agent.esx_hostname = FAKE_HOST_2
vm = VM(FAKE_VM, [])
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent,
"_notify_device_added") as device_added:
self.agent.process_event(event)
self.assertTrue(device_added.called)
def test_process_event_vm_create_nonics_non_host(self):
self.agent.esx_hostname = FAKE_HOST_2
vm = VM(FAKE_VM, [])
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent,
"_notify_device_added") as device_added:
self.agent.process_event(event)
self.assertTrue(device_added.called)
self.assertEqual(FAKE_CLUSTER_MOID, self.agent.cluster_moid)
def test_process_event_vm_create_nics_non_host(self):
self.agent.esx_hostname = FAKE_HOST_2
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertIn(vnic.port_uuid, self.agent.devices_to_filter)
self.assertIn(vnic.port_uuid, self.agent.cluster_other_ports)
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
def test_process_event_vm_create_nics_host(self):
self.agent.esx_hostname = FAKE_HOST_1
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertIn(vnic.port_uuid, self.agent.devices_to_filter)
self.assertIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertNotIn(vnic.port_uuid, self.agent.cluster_other_ports)
def test_process_event_vm_updated_nonhost(self):
self.agent.esx_hostname = FAKE_HOST_2
vm_port1 = SamplePort(FAKE_PORT_1)
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm = VM(FAKE_VM, [vm_port1])
event = SampleEvent(ovsvapp_const.VM_UPDATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm, True)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.process_event(event)
self.assertIn(FAKE_PORT_1, self.agent.cluster_other_ports)
def test_process_event_vm_delete_hosted_vm_vlan(self):
self.agent.esx_hostname = FAKE_HOST_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self._build_lvm(port)
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
) as mock_post_del_vm, \
mock.patch.object(self.LOG, 'debug'), \
mock.patch.object(self.agent.net_mgr.get_driver(),
"delete_network") as mock_del_net:
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertTrue(mock_post_del_vm.called)
self.assertFalse(mock_del_net.called)
self.assertTrue(br.delete_flows.called)
def test_process_event_vm_delete_hosted_vm_vxlan(self):
self.agent.esx_hostname = FAKE_HOST_1
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
return_value=True) as (post_del_vm):
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertTrue(post_del_vm.called)
def test_process_event_vm_delete_non_hosted_vm(self):
self.agent.esx_hostname = FAKE_HOST_2
self.agent.cluster_other_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
return_value=True) as mock_post_del_vm, \
mock.patch.object(self.agent.net_mgr.get_driver(),
"delete_network") as mock_del_net:
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid,
self.agent.cluster_other_ports)
self.assertTrue(mock_post_del_vm.called)
self.assertFalse(mock_del_net.called)
def test_notify_device_added_with_hosted_vm(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=True) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.agent._notify_device_added(vm, host)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_time_sleep.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_added_rpc_exception(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
side_effect=Exception()) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
)as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_added, vm, host)
self.assertTrue(mock_log_exception.called)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_time_sleep.called)
def test_notify_device_added_with_retry(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=False) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.agent._notify_device_added(vm, host)
self.assertTrue(mock_get_ports.called)
self.assertTrue(mock_time_sleep.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_migration_vlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent._add_ports_to_host_ports([FAKE_PORT_1])
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, FAKE_HOST_2, True)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_host_vlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
br = self.agent.phys_brs[port['physical_network']]['br']
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(br.add_flows.called)
def test_notify_device_updated_vlan_rpc_exception(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding",
side_effect=Exception()
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_updated, vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(br.add_flows.called)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(mock_log_exception.called)
def test_notify_device_updated_host_vlan_multiple_nic(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
port1 = self._build_port(FAKE_PORT_1)
port2 = self._build_port(FAKE_PORT_2)
br1 = self._build_phys_brs(port1)
br2 = self._build_phys_brs(port2)
self.agent.ports_dict[port1['id']] = self.agent._build_port_info(port1)
self.agent.ports_dict[port2['id']] = self.agent._build_port_info(port2)
self._build_lvm(port1)
self._build_lvm(port2)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
self.assertEqual(1, mock_update_device_binding.call_count)
self.assertTrue(br1.add_flows.called)
self.assertTrue(br2.add_flows.called)
def _build_lvm(self, port):
net_id = port['network_id']
self.agent.local_vlan_map[net_id] = ovs_agent.LocalVLANMapping(
port['lvid'], port['network_type'],
port['physical_network'],
'1234')
def test_notify_device_updated_host_vxlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
port1 = self._build_port(FAKE_PORT_1)
port1['network_type'] = p_const.TYPE_VXLAN
self.agent.ports_dict[port1['id']] = self.agent._build_port_info(port1)
vm = VM(FAKE_VM, [vm_port1])
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_vxlan_rpc_exception(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding",
side_effect=Exception()
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_updated, vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(mock_log_exception.called)
def test_map_port_to_common_model_vlan(self):
expected_port = self._build_port(FAKE_PORT_1)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
network, port = self.agent._map_port_to_common_model(expected_port)
expected_name = expected_port['network_id'] + "-" + FAKE_CLUSTER_MOID
self.assertEqual(expected_name, network.name)
self.assertEqual(expected_port['id'], port.uuid)
def test_map_port_to_common_model_vxlan(self):
expected_port = self._build_port(FAKE_PORT_1)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
network, port = self.agent._map_port_to_common_model(expected_port, 1)
expected_name = expected_port['network_id'] + "-" + FAKE_CLUSTER_MOID
self.assertEqual(expected_name, network.name)
self.assertEqual(expected_port['id'], port.uuid)
def test_device_create_cluster_mismatch(self):
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_2
with mock.patch.object(self.agent,
'_process_create_ports',
return_value=True) as mock_create_ports, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE)
self.assertTrue(mock_logger_debug.called)
self.assertFalse(mock_create_ports.called)
def test_device_create_non_hosted_vm(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.esx_hostname = FAKE_HOST_2
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
mock_add_devices_fn.assert_called_with(ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(self.agent.devices_up_list)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan_sg_rule_missing(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MISSING
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan_sg_rule_partial_missing(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_PARTIAL
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vxlan(self):
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
ports = [port]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.local_vlan_map = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent,
'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'
) as mock_update_device_up, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_prov_local_vlan.called)
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertNotIn(FAKE_PORT_1, self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_update_device_up.called)
def test_device_create_hosted_vm_vxlan_sg_rule_missing(self):
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
ports = [port]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.local_vlan_map = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent,
'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MISSING
) as mock_expand_sg_rules, \
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'
) as mock_update_device_up, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_prov_local_vlan.called)
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_update_device_up.called)
def test_device_create_hosted_vm_create_port_exception(self):
ports = [self._build_port(FAKE_PORT_1)]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().create_port = mock.Mock(
side_effect=Exception())
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
), \
mock.patch.object(self.agent, '_provision_local_vlan'
), \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug, \
mock.patch.object(self.LOG, 'exception') as mock_log_excep:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent.device_create,
FAKE_CONTEXT, device=DEVICE,
ports=ports, sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_log_excep.called)
def test_port_update_admin_state_up(self):
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.cluster_host_ports = set([port['id']])
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
updated_port = self._build_update_port(FAKE_PORT_1)
updated_port['admin_state_up'] = True
self.devices_up_list = []
neutron_port = {'port': updated_port,
'segmentation_id': port['segmentation_id']}
with mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.port_update(FAKE_CONTEXT, **neutron_port)
self.assertEqual(neutron_port['port']['admin_state_up'],
self.agent.ports_dict[port['id']].
admin_state_up)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertFalse(mock_log_exception.called)
self.assertTrue(mock_logger_debug.called)
def test_device_update_maintenance_mode(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = True
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff") as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode"
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep'):
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertTrue(power_off.called)
self.assertTrue(maintenance_mode.called)
self.assertFalse(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=True)
self.assertFalse(log_exception.called)
def test_device_update_shutdown_mode(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = False
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff") as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode"
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep'):
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertFalse(power_off.called)
self.assertFalse(maintenance_mode.called)
self.assertTrue(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=True)
self.assertFalse(log_exception.called)
def test_device_update_ovsvapp_alreadly_powered_off(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = True
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff",
side_effect=Exception()) as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode"
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep'):
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertTrue(power_off.called)
self.assertTrue(maintenance_mode.called)
self.assertFalse(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=True)
self.assertTrue(log_exception.called)
def test_device_update_maintenance_mode_exception(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = True
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff",
side_effect=Exception()) as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode",
side_effect=Exception()
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep') as time_sleep:
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertTrue(power_off.called)
self.assertTrue(maintenance_mode.called)
self.assertFalse(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=False)
self.assertTrue(log_exception.called)
self.assertTrue(time_sleep.called)
def test_enhanced_sg_provider_updated(self):
kwargs = {'network_id': NETWORK_ID}
with mock.patch.object(self.LOG, 'info') as log_info, \
mock.patch.object(self.agent.sg_agent, "sg_provider_updated"
) as mock_sg_provider_updated:
self.agent.enhanced_sg_provider_updated(FAKE_CONTEXT, **kwargs)
self.assertTrue(log_info.called)
mock_sg_provider_updated.assert_called_with(NETWORK_ID)
def test_device_create_hosted_vm_vlan_multiple_physnet(self):
port1 = self._build_port(FAKE_PORT_1)
port2 = self._build_port(FAKE_PORT_2)
port2['physical_network'] = "physnet2"
port2['segmentation_id'] = "2005"
port2['network_id'] = "fake_net2"
ports = [port1, port2]
self._build_phys_brs(port1)
self._build_phys_brs(port2)
self.agent.phys_ofports = {}
self.agent.phys_ofports[port1['physical_network']] = 4
self.agent.phys_ofports[port2['physical_network']] = 5
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.int_br = mock.Mock()
self.agent.patch_sec_ofport = 1
self.agent.int_ofports = {'physnet1': 2, 'physnet2': 3}
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
), \
mock.patch.object(self.agent.int_br, 'provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MULTI_PORTS
), \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertEqual([FAKE_PORT_1, FAKE_PORT_2],
self.agent.devices_up_list)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_prov_local_vlan.called)
mock_prov_local_vlan.assert_any_call(
port1['network_type'],
port1['lvid'],
port1['segmentation_id'],
self.agent.patch_sec_ofport,
self.agent.int_ofports['physnet1'], None)
mock_prov_local_vlan.assert_any_call(
port2['network_type'],
port2['lvid'],
port2['segmentation_id'],
self.agent.patch_sec_ofport,
self.agent.int_ofports['physnet2'], None)
| VTabolin/networking-vsphere | networking_vsphere/tests/unit/agent/test_ovsvapp_agent.py | Python | apache-2.0 | 103,582 |
import pkg_resources
def get_dir():
"""Return the location of resources for report"""
return pkg_resources.resource_filename('naarad.resources', None)
| kilink/naarad | src/naarad/resources/__init__.py | Python | apache-2.0 | 157 |
"""
Podi, a command-line interface for Kodi.
Copyright (C) 2015 Peter Frost <slimeypete@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from .json_error import JSONResponseError
from .library_errors import NoMediaError
from .argument_errors import MissingArgumentError
| vroomfondle/podi | app/errors/__init__.py | Python | gpl-3.0 | 898 |
#!/usr/bin/env python
import os
import glob
import sys
import signal
import subprocess
# path = '/home/gnomex/HomeWorkMalwareAnalysis/Pcaps'
path = '/media/gnomex/zebras/kenner-pcaps'
src_path = '/home/gnomex/HomeWorkMalwareAnalysis/analysis/bad_pcaps'
new_path = '/home/gnomex/HomeWorkMalwareAnalysis/analysis/rlly_bad_pcaps'
PID = None
try:
for filename in glob.glob(os.path.join(path, '*.pcap')):
print("Reading {}".format(filename))
try:
# append _entropy_hcurve.jpg
proc = subprocess.Popen(["mv {}/{}_entropy_hcurve.jpg {}".format(src_path, os.path.basename(filename), new_path)], shell=True)
PID = proc.pid
proc.wait()
except Exception as e:
print("Something wrong here, file {}, exc {}".format(filename, e))
if PID:
proc.terminate()
raise
except (KeyboardInterrupt, SystemExit):
print("From lol: interrupt received, stopping...")
sys.exit(-1)
| gnomex/analysis | ids-bots/move_images.py | Python | gpl-3.0 | 992 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_convert_to_and_from_dict.py
DESCRIPTION:
This sample demonstrates how to convert models returned from an analyze operation
to and from a dictionary. The dictionary in this sample is then converted to a
JSON file, then the same dictionary is converted back to its original model.
USAGE:
python sample_convert_to_and_from_dict.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
import json
def convert_to_and_from_dict():
path_to_sample_documents = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"..",
"./sample_forms/forms/Form_1.jpg",
)
)
from azure.core.serialization import AzureJSONEncoder
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient, AnalyzeResult
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_documents, "rb") as f:
poller = document_analysis_client.begin_analyze_document(
"prebuilt-document", document=f
)
result = poller.result()
# convert the received model to a dictionary
analyze_result_dict = result.to_dict()
# save the dictionary as JSON content in a JSON file, use the AzureJSONEncoder
# to help make types, such as dates, JSON serializable
# NOTE: AzureJSONEncoder is only available with azure.core>=1.18.0.
with open('data.json', 'w') as f:
json.dump(analyze_result_dict, f, cls=AzureJSONEncoder)
# convert the dictionary back to the original model
model = AnalyzeResult.from_dict(analyze_result_dict)
# use the model as normal
print("----Converted from dictionary AnalyzeResult----")
print("Model ID: '{}'".format(model.model_id))
print("Number of pages analyzed {}".format(len(model.pages)))
print("API version used: {}".format(model.api_version))
print("----------------------------------------")
if __name__ == "__main__":
convert_to_and_from_dict()
| Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_convert_to_and_from_dict.py | Python | mit | 2,735 |
from django.db import models
class BaseModel(models.Model):
add_date = models.DateTimeField(auto_now_add=True, db_index=True)
update_date = models.DateTimeField(auto_now=True)
class Meta(object):
abstract = True
ordering = ['-add_date']
class Doctor(BaseModel):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
second_name = models.CharField(max_length=100)
monday_start = models.DateTimeField(null=True, blank=True)
monday_end = models.DateTimeField(null=True, blank=True)
monday_cabinet = models.CharField(max_length=20, null=True, blank=True)
tuesday_start = models.DateTimeField(null=True, blank=True)
tuesday_end = models.DateTimeField(null=True, blank=True)
tuesday_cabinet = models.CharField(max_length=20, null=True, blank=True)
wednesday_start = models.DateTimeField(null=True, blank=True)
wednesday_end = models.DateTimeField(null=True, blank=True)
wednesday_cabinet = models.CharField(max_length=20, null=True, blank=True)
thursday_start = models.DateTimeField(null=True, blank=True)
thursday_end = models.DateTimeField(null=True, blank=True)
thursday_cabinet = models.CharField(max_length=20, null=True, blank=True)
friday_start = models.DateTimeField(null=True, blank=True)
friday_end = models.DateTimeField(null=True, blank=True)
friday_cabinet = models.CharField(max_length=20, null=True, blank=True)
saturday_start = models.DateTimeField(null=True, blank=True)
saturday_end = models.DateTimeField(null=True, blank=True)
saturday_cabinet = models.CharField(max_length=20, null=True, blank=True)
sunday_start = models.DateTimeField(null=True, blank=True)
sunday_end = models.DateTimeField(null=True, blank=True)
sunday_cabinet = models.CharField(max_length=20, null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
specialization = models.CharField(max_length=100)
@property
def name(self):
parts = []
if self.first_name:
parts.append(self.first_name)
if self.last_name:
parts.append(self.last_name)
if self.second_name:
parts.append(self.second_name)
return " ".join(parts)
def __str__(self):
return self.name
class Address(BaseModel):
street = models.CharField(max_length=100)
house = models.CharField(max_length=20)
doctor = models.ForeignKey(Doctor)
@property
def address(self):
return "%s %s" % (self.street, self.house)
class Talon(BaseModel):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
second_name = models.CharField(max_length=100)
phone = models.CharField(max_length=100, null=True, blank=True)
birthday = models.DateField(null=True, blank=True)
cabinet = models.CharField(null=True, blank=True, max_length=100)
doctor = models.ForeignKey(Doctor)
address = models.ForeignKey(Address)
date_of_receipt = models.DateTimeField()
@property
def name(self):
parts = []
if self.first_name:
parts.append(self.first_name)
if self.last_name:
parts.append(self.last_name)
if self.second_name:
parts.append(self.second_name)
return " ".join(parts)
| rainum/registry | www/edoctor/models.py | Python | gpl-3.0 | 3,372 |
# -*- test-case-name: vumi.transports.vumi_bridge.tests.test_client -*-
import json
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.web.client import Agent, ResponseDone, ResponseFailed
from twisted.web import http
from twisted.protocols import basic
from twisted.python.failure import Failure
from vumi.message import Message
from vumi.utils import to_kwargs
from vumi import log
from vumi.errors import VumiError
class VumiBridgeError(VumiError):
"""Raised by errors encountered by VumiBridge."""
class VumiBridgeInvalidJsonError(VumiError):
"""Raised when invalid JSON is received."""
class VumiMessageReceiver(basic.LineReceiver):
delimiter = '\n'
message_class = Message
def __init__(self, message_class, callback, errback, on_connect=None,
on_disconnect=None):
self.message_class = message_class
self.callback = callback
self.errback = errback
self._response = None
self._wait_for_response = Deferred()
self._on_connect = on_connect or (lambda *a: None)
self._on_disconnect = on_disconnect or (lambda *a: None)
self.disconnecting = False
def get_response(self):
return self._wait_for_response
def handle_response(self, response):
self._response = response
if self._response.code == http.NO_CONTENT:
self._wait_for_response.callback(self._response)
else:
self._response.deliverBody(self)
def lineReceived(self, line):
d = Deferred()
d.addCallback(self.callback)
d.addErrback(self.errback)
line = line.strip()
try:
data = json.loads(line)
d.callback(self.message_class(
_process_fields=True, **to_kwargs(data)))
except ValueError, e:
f = Failure(VumiBridgeInvalidJsonError(line))
d.errback(f)
except Exception, e:
log.err()
f = Failure(e)
d.errback(f)
def connectionMade(self):
self._on_connect()
def connectionLost(self, reason):
# the PotentialDataLoss here is because Twisted didn't receive a
# content length header, which is normal because we're streaming.
if (reason.check(ResponseDone, ResponseFailed, http.PotentialDataLoss)
and self._response is not None
and not self._wait_for_response.called):
self._wait_for_response.callback(self._response)
if not self.disconnecting:
self._on_disconnect(reason)
def disconnect(self):
self.disconnecting = True
if self.transport and self.transport._producer is not None:
self.transport._producer.loseConnection()
self.transport._stopProxying()
class StreamingClient(object):
def __init__(self):
self.agent = Agent(reactor)
def stream(self, message_class, callback, errback, url,
headers=None, on_connect=None, on_disconnect=None):
receiver = VumiMessageReceiver(
message_class, callback, errback,
on_connect=on_connect,
on_disconnect=on_disconnect)
d = self.agent.request('GET', url, headers)
d.addCallback(lambda response: receiver.handle_response(response))
d.addErrback(log.err)
return receiver
| TouK/vumi | vumi/transports/vumi_bridge/client.py | Python | bsd-3-clause | 3,394 |
"""Adding census_year to state_congressional_table
Revision ID: 5456e2207d32
Revises: 17105e26eef4
Create Date: 2018-05-03 12:20:05.945295
"""
# revision identifiers, used by Alembic.
revision = '5456e2207d32'
down_revision = '17105e26eef4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('state_congressional', sa.Column('census_year', sa.Integer(), nullable=True))
op.create_index(op.f('ix_state_congressional_census_year'), 'state_congressional', ['census_year'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_state_congressional_census_year'), table_name='state_congressional')
op.drop_column('state_congressional', 'census_year')
# ### end Alembic commands ###
| fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/5456e2207d32_adding_census_year_to_state_.py | Python | cc0-1.0 | 1,112 |
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
neg = False
if (x < 0):
neg = True
x *= -1
digits = []
number = 0
while (x != 0):
digits.append(x % 10)
x = x // 10
digits = digits[::-1]
for i in range(len(digits)):
number += digits[i] * pow(10, i)
if neg:
number *= -1
# the ternary condition is only necessary due to problem pedantics with overflow, overflow does not occur in python
return 0 if (number > pow(2, 31) - 1) or (number < -1 * pow(2, 32)) else number
| njvelat/leetcode | reverse-integer.py | Python | gpl-3.0 | 545 |
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
from colors import Colors
from demoitem import DemoItem
from demoitemanimation import DemoItemAnimation
from demotextitem import DemoTextItem
from scanitem import ScanItem
class ButtonBackground(DemoItem):
def __init__(self, type, highlighted, pressed, logicalSize, scene, parent):
super(ButtonBackground, self).__init__(scene, parent)
self.type = type
self.highlighted = highlighted
self.pressed = pressed
self.logicalSize = logicalSize
self.useSharedImage('%s%d%d%d' % (__file__, type, highlighted, pressed))
def createImage(self, matrix):
if self.type in (TextButton.SIDEBAR, TextButton.PANEL):
return self.createRoundButtonBackground(matrix)
else:
return self.createArrowBackground(matrix)
def createRoundButtonBackground(self, matrix):
scaledRect = matrix.mapRect(QtCore.QRect(0, 0,
self.logicalSize.width(), self.logicalSize.height()))
image = QtGui.QImage(scaledRect.width(), scaledRect.height(),
QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(QtGui.QColor(0, 0, 0, 0).rgba())
painter = QtGui.QPainter(image)
painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(QtCore.Qt.NoPen)
if Colors.useEightBitPalette:
painter.setPen(QtGui.QColor(120, 120, 120))
if self.pressed:
painter.setBrush(QtGui.QColor(60, 60, 60))
elif self.highlighted:
painter.setBrush(QtGui.QColor(100, 100, 100))
else:
painter.setBrush(QtGui.QColor(80, 80, 80))
else:
outlinebrush = QtGui.QLinearGradient(0, 0, 0, scaledRect.height())
brush = QtGui.QLinearGradient(0, 0, 0, scaledRect.height())
brush.setSpread(QtGui.QLinearGradient.PadSpread)
highlight = QtGui.QColor(255, 255, 255, 70)
shadow = QtGui.QColor(0, 0, 0, 70)
sunken = QtGui.QColor(220, 220, 220, 30)
if self.type == TextButton.PANEL:
normal1 = QtGui.QColor(200, 170, 160, 50)
normal2 = QtGui.QColor(50, 10, 0, 50)
else:
normal1 = QtGui.QColor(255, 255, 245, 60)
normal2 = QtGui.QColor(255, 255, 235, 10)
if self.pressed:
outlinebrush.setColorAt(0, shadow)
outlinebrush.setColorAt(1, highlight)
brush.setColorAt(0, sunken)
painter.setPen(QtCore.Qt.NoPen)
else:
outlinebrush.setColorAt(1, shadow)
outlinebrush.setColorAt(0, highlight)
brush.setColorAt(0, normal1)
if not self.highlighted:
brush.setColorAt(1, normal2)
painter.setPen(QtGui.QPen(outlinebrush, 1))
painter.setBrush(brush)
if self.type == TextButton.PANEL:
painter.drawRect(0, 0, scaledRect.width(), scaledRect.height())
else:
painter.drawRoundedRect(0, 0, scaledRect.width(),
scaledRect.height(), 10, 90, QtCore.Qt.RelativeSize)
return image
def createArrowBackground(self, matrix):
scaledRect = matrix.mapRect(QtCore.QRect(0, 0,
self.logicalSize.width(), self.logicalSize.height()))
image = QtGui.QImage(scaledRect.width(), scaledRect.height(),
QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(QtGui.QColor(0, 0, 0, 0).rgba())
painter = QtGui.QPainter(image)
painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(QtCore.Qt.NoPen)
if Colors.useEightBitPalette:
painter.setPen(QtGui.QColor(120, 120, 120))
if self.pressed:
painter.setBrush(QtGui.QColor(60, 60, 60))
elif self.highlighted:
painter.setBrush(QtGui.QColor(100, 100, 100))
else:
painter.setBrush(QtGui.QColor(80, 80, 80))
else:
outlinebrush = QtGui.QLinearGradient(0, 0, 0, scaledRect.height())
brush = QtGui.QLinearGradient(0, 0, 0, scaledRect.height())
brush.setSpread(QtGui.QLinearGradient.PadSpread)
highlight = QtGui.QColor(255, 255, 255, 70)
shadow = QtGui.QColor(0, 0, 0, 70)
sunken = QtGui.QColor(220, 220, 220, 30)
normal1 = QtGui.QColor(200, 170, 160, 50)
normal2 = QtGui.QColor(50, 10, 0, 50)
if self.pressed:
outlinebrush.setColorAt(0, shadow)
outlinebrush.setColorAt(1, highlight)
brush.setColorAt(0, sunken)
painter.setPen(QtCore.Qt.NoPen)
else:
outlinebrush.setColorAt(1, shadow)
outlinebrush.setColorAt(0, highlight)
brush.setColorAt(0, normal1)
if not self.highlighted:
brush.setColorAt(1, normal2)
painter.setPen(QtGui.QPen(outlinebrush, 1))
painter.setBrush(brush);
painter.drawRect(0, 0, scaledRect.width(), scaledRect.height())
xOff = scaledRect.width() / 2
yOff = scaledRect.height() / 2
sizex = 3.0 * matrix.m11()
sizey = 1.5 * matrix.m22()
if self.type == TextButton.UP:
sizey *= -1
path = QtGui.QPainterPath()
path.moveTo(xOff, yOff + (5 * sizey))
path.lineTo(xOff - (4 * sizex), yOff - (3 * sizey))
path.lineTo(xOff + (4 * sizex), yOff - (3 * sizey))
path.lineTo(xOff, yOff + (5 * sizey))
painter.drawPath(path)
return image
class TextButton(DemoItem):
BUTTON_WIDTH = 180
BUTTON_HEIGHT = 19
LEFT, RIGHT = range(2)
SIDEBAR, PANEL, UP, DOWN = range(4)
ON, OFF, HIGHLIGHT, DISABLED = range(4)
def __init__(self, text, align=LEFT, userCode=0, scene=None, parent=None, type=SIDEBAR):
super(TextButton, self).__init__(scene, parent)
# Prevent a circular import.
from menumanager import MenuManager
self._menu_manager = MenuManager.instance()
self.menuString = text
self.buttonLabel = text
self.alignment = align
self.buttonType = type
self.userCode = userCode
self.scanAnim = None
self.bgOn = None
self.bgOff = None
self.bgHighlight = None
self.bgDisabled = None
self.state = TextButton.OFF
self.setAcceptsHoverEvents(True)
self.setCursor(QtCore.Qt.PointingHandCursor)
# Calculate the button size.
if type in (TextButton.SIDEBAR, TextButton.PANEL):
self.logicalSize = QtCore.QSize(TextButton.BUTTON_WIDTH, TextButton.BUTTON_HEIGHT)
else:
self.logicalSize = QtCore.QSize(int((TextButton.BUTTON_WIDTH / 2.0) - 5), int(TextButton.BUTTON_HEIGHT * 1.5))
def setMenuString(self, menu):
self.menuString = menu
def prepare(self):
if not self.prepared:
self.prepared = True
self.setupHoverText()
self.setupScanItem()
self.setupButtonBg()
def boundingRect(self):
return QtCore.QRectF(0, 0, self.logicalSize.width(),
self.logicalSize.height())
def setupHoverText(self):
if not self.buttonLabel:
return
textItem = DemoTextItem(self.buttonLabel, Colors.buttonFont(),
Colors.buttonText, -1, self.scene(), self)
textItem.setZValue(self.zValue() + 2)
textItem.setPos(16, 0)
def setupScanItem(self):
if Colors.useButtonBalls:
scanItem = ScanItem(None, self)
scanItem.setZValue(self.zValue() + 1)
self.scanAnim = DemoItemAnimation(scanItem)
self.scanAnim.timeline.setLoopCount(1)
x = 1.0
y = 1.5
stop = TextButton.BUTTON_WIDTH - scanItem.boundingRect().width() - x
if self.alignment == TextButton.LEFT:
self.scanAnim.setDuration(2500)
self.scanAnim.setPosAt(0.0, QtCore.QPointF(x, y))
self.scanAnim.setPosAt(0.5, QtCore.QPointF(x, y))
self.scanAnim.setPosAt(0.7, QtCore.QPointF(stop, y))
self.scanAnim.setPosAt(1.0, QtCore.QPointF(x, y))
scanItem.setPos(QtCore.QPointF(x, y))
else:
self.scanAnim.setPosAt(0.0, QtCore.QPointF(stop, y))
self.scanAnim.setPosAt(0.5, QtCore.QPointF(x, y))
self.scanAnim.setPosAt(1.0, QtCore.QPointF(stop, y))
scanItem.setPos(QtCore.QPointF(stop, y))
def setState(self, state):
self.state = state
self.bgOn.setRecursiveVisible(state == TextButton.ON)
self.bgOff.setRecursiveVisible(state == TextButton.OFF)
self.bgHighlight.setRecursiveVisible(state == TextButton.HIGHLIGHT)
self.bgDisabled.setRecursiveVisible(state == TextButton.DISABLED)
if state == TextButton.DISABLED:
self.setCursor(QtCore.Qt.ArrowCursor)
else:
self.setCursor(QtCore.Qt.PointingHandCursor)
def setupButtonBg(self):
self.bgOn = ButtonBackground(self.buttonType, True, True,
self.logicalSize, self.scene(), self)
self.bgOff = ButtonBackground(self.buttonType, False, False,
self.logicalSize, self.scene(), self)
self.bgHighlight = ButtonBackground(self.buttonType, True, False,
self.logicalSize, self.scene(), self)
self.bgDisabled = ButtonBackground(self.buttonType, True, True,
self.logicalSize, self.scene(), self)
self.setState(TextButton.OFF)
def hoverEnterEvent(self, event):
if self.locked or self.state == TextButton.DISABLED:
return
if self.state == TextButton.OFF:
self.setState(TextButton.HIGHLIGHT)
if Colors.noAnimations and Colors.useButtonBalls:
# Wait a bit in the beginning to enhance the effect. We have
# to do this here so that the adaption can be dynamic.
self.scanAnim.setDuration(1000)
self.scanAnim.setPosAt(0.2, self.scanAnim.posAt(0))
if (self._menu_manager.window.fpsMedian > 10 or Colors.noAdapt or
Colors.noTimerUpdate):
if Colors.useButtonBalls:
self.scanAnim.play(True, True)
def hoverLeaveEvent(self, event):
if self.state == TextButton.DISABLED:
return
self.setState(TextButton.OFF)
if Colors.noAnimations and Colors.useButtonBalls:
self.scanAnim.stop()
def mousePressEvent(self, event):
if self.state == TextButton.DISABLED:
return
if self.state == TextButton.HIGHLIGHT or self.state == TextButton.OFF:
self.setState(TextButton.ON)
def mouseReleaseEvent(self, event):
if self.state == TextButton.ON:
self.setState(TextButton.OFF)
if not self.locked and self.boundingRect().contains(event.pos()):
self._menu_manager.itemSelected(self.userCode, self.menuString)
def animationStarted(self, _):
if self.state == TextButton.DISABLED:
return
self.setState(TextButton.OFF)
| martyngigg/pyqt-msvc | examples/demos/qtdemo/textbutton.py | Python | gpl-3.0 | 13,494 |
"""
Hello from easygui/__init__.py
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# __all__ must be defined in order for Sphinx to generate the API automatically.
from future import standard_library
standard_library.install_aliases()
__all__ = ['buttonbox',
'diropenbox',
'fileopenbox',
'filesavebox',
'textbox',
'ynbox',
'ccbox',
'boolbox',
'indexbox',
'msgbox',
'integerbox',
'multenterbox',
'enterbox',
'exceptionbox',
'choicebox',
'codebox',
'passwordbox',
'multpasswordbox',
'multchoicebox',
'EgStore',
'eg_version',
'egversion',
'abouteasygui',
]
# Import all functions that form the API
from .boxes.base_boxes import buttonbox
from .boxes.base_boxes import diropenbox
from .boxes.base_boxes import fileopenbox
from .boxes.base_boxes import filesavebox
from .boxes.text_box import textbox
from .boxes.derived_boxes import ynbox
from .boxes.derived_boxes import ccbox
from .boxes.derived_boxes import boolbox
from .boxes.derived_boxes import indexbox
from .boxes.derived_boxes import msgbox
from .boxes.derived_boxes import integerbox
from .boxes.derived_boxes import multenterbox
from .boxes.derived_boxes import enterbox
from .boxes.derived_boxes import exceptionbox
from .boxes.derived_boxes import choicebox
from .boxes.derived_boxes import codebox
from .boxes.derived_boxes import passwordbox
from .boxes.derived_boxes import multpasswordbox
from .boxes.derived_boxes import multchoicebox
from .boxes.egstore import EgStore
from .boxes.about import eg_version, egversion, abouteasygui | draperjames/qtpandas | qtpandas/ui/fallback/easygui/__init__.py | Python | mit | 1,847 |
"""empty message
Revision ID: e95429507f03
Revises: 82e227787c88
Create Date: 2017-10-08 04:00:04.899374
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e95429507f03'
down_revision = '82e227787c88'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('client',
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('client_id', sa.String(length=100), nullable=False),
sa.Column('client_secret', sa.String(length=50), nullable=False),
sa.Column('_is_confidential', sa.Boolean(), nullable=False),
sa.Column('_allowed_grant_types', sa.Text(), nullable=False),
sa.Column('_redirect_uris', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('client_id')
)
op.create_index(op.f('ix_client_client_secret'), 'client', ['client_secret'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_client_client_secret'), table_name='client')
op.drop_table('client')
# ### end Alembic commands ###
| coderadi/OAuth-server | migrations/versions/e95429507f03_.py | Python | mit | 1,195 |
#!/usr/bin/env python
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| alvarolopez/pyocci | setup.py | Python | apache-2.0 | 171 |
"""
Paste your code here
"""
| fabriziodemaria/LeetCode-Tree-Parser | LC-Parser/here.py | Python | bsd-3-clause | 29 |
# coding: utf-8
# In[13]:
import tkinter as tk
import sqlite3
import numpy as np
import pandas as pd
from pandas import HDFStore
#import pubchempy as pcp
#import json
#from urllib.request import urlopen
#import urllib
#from urllib.request import urlopen
#from bs4 import BeautifulSoup
#from rdkit import Chem
#from rdkit.Chem.Draw import IPythonConsole
#from rdkit.Chem import Draw
#from rdkit.Chem import PandasTools
import os as os
from os import system, path, remove
import glob
import time
import shutil
import platform
host = platform.node()
#file modes for chmod commands
mod = 0o755
btime = time.strftime("%Y-%m-%d %H:%M")
#print(btime)
#hostflag = 0
if host == 'boron':
home = '/home/huffman/work/matinsy/'
dbfile = home+'db/cheminventory.db'
webhtmldir = './'
webmsdsdir = webhtmldir+'msds/'
websafetyplansdir = webhtmldir+'Lab_Specific_Hygiene_Plans/'
htmldir = '/home/huffman/public_html/sdsweb/'
safetyplansdir = htmldir+'Lab_Specific_Hygiene_Plans/'
safetyplansnoplan = './noplans.html'
msdsdir = htmldir+'msds/'
roomfile = home+'etc/allrooms.dat'
cheminfodir = htmldir+'cheminfo/'
cheminfodata = home+'cheminfodata/'
evacplandir = home+'evac_plans/'
webevacplandir = webhtmldir+'evac_plans/'
elif host == 'msds.wcu.edu':
home = '/wwbintz/'
dbfile = home+'/matinsy/db/cheminventory.db'
htmldir = home+'public_html/'
webhtmldir = './'
safetyplansdir = htmldir+'Lab_Specific_Hygiene_Plans/'
safetyplansnoplan = './noplans.html'
msdsdir = htmldir+'msds/'
webmsdsdir = webhtmldir+'msds/'
websafetyplansdir = webhtmldir+'Lab_Specific_Hygiene_Plans/'
roomfile = home+'matinsy/etc/allrooms.dat'
cheminfodir = htmldir+'cheminfo/'
cheminfodata = home+'matinsy/cheminfodata/'
webevacplandir = webhtmldir+'evac_plans/'
else:
pass
print('********************************************')
bmsg = ' websync beginning '
print(host,bmsg,btime)
# In[14]:
storagedict = {'g':"General",'w':"Corrosive",'r':'Flammable','y':'Oxidizer','b':'Toxic','none':'none or null: checkme','blank':'blank:checkme','hw':'hw:fixme','2':'2:fixme','1':'1:fixme','3':'3:fixme','4':'4:fixme','unk':'unk:fixme','na':'na:fixme','[CH2CH(CH2NH2•HCl)]n':'[CH2CH(CH2NH2•HCl)]n:fixme'}
# In[15]:
###delete old html files
#TODO: make into function
#files = glob.glob(htmldir+'sds_*.html')
#print(files)
#for file in files:
#print(file)
# try:
# remove(file)
# except (OSError,e): ## if failed, report it back to the user ##
# print("Error: {0} {1} - %s.".format(e.filename,e.strerror) )
def deloldhtmlfiles():
'''this function has a problem. It deletes the entire site, so there is a dead time when the data may not be available.
Better would be to delete one file at a time, but this idea would lead to html files remaining after they have been deleted
from the db.
'''
###delete old html files
#TODO: make into function
files = glob.glob(htmldir+'sds_*.html')
#print(files)
for file in files:
#print(file)
try:
remove(file)
except (OSError,e): ## if failed, report it back to the user ##
print("Error: {0} {1} - %s.".format(e.filename,e.strerror) )
# In[16]:
def getevaclinkNSB(room):
NL = ['109','110','111','112','113','114','115','116','119A','119B','119C','123','125']
SL = ['106','103','M101','132A','132','130','128','126']
plandict = {'NS100N':'NSB_Evac_Complete.pdf#page=2'}
plandict['NS100S'] = 'NSB_Evac_Complete.pdf#page=1'
plandict['NS200N'] = 'NSB_Evac_Complete.pdf#page=4'
plandict['NS200S'] = 'NSB_Evac_Complete.pdf#page=3'
plandict['NS300N'] = 'NSB_Evac_Complete.pdf#page=6'
plandict['NS300S'] = 'NSB_Evac_Complete.pdf#page=5'
plandict['NSGN'] = 'NSB_Evac_Complete.pdf#page=7'
plandict['NSGS'] = 'NSB_Evac_Complete.pdf#page=8'
plandict['NSGM'] = 'NSB_Evac_Complete.pdf#page=9'
plandefault = 'NSB_Evac_Complete.pdf'
blkey = room[:2]
flkey = room[2]
roomnum = room[2:]
#print(blkey,flkey,roomnum)
if flkey == 'G':
blflkey = blkey+flkey+'N'
Nfile = webevacplandir+plandict[blflkey]
blflkey = blkey+flkey+'S'
Sfile = webevacplandir+plandict[blflkey]
blflkey = blkey+flkey+'M'
Mfile = webevacplandir+plandict[blflkey]
evaclink = '<ul> <li><a href='+Nfile+'> Northside Evacuation Plan </a><li><a href='+Mfile+'> Middle of the floor Evacuation Plan </a><li> <a href='+Sfile+'> Southside Evacuation Plan </a></ul>'
pass
else:
blflkey = blkey+flkey+'00'+'N'
Nfile = webevacplandir+plandict.get(blflkey,plandefault)
blflkey = blkey+flkey+'00'+'S'
Sfile = webevacplandir+plandict.get(blflkey,plandefault)
evaclink = '<ul> <li><a href='+Nfile+'> Northside Evacuation Plan </a><li> <a href='+Sfile+'> Southside Evacuation Plan </a></ul>'
return evaclink
getevaclinkNSB('NS423')
# In[17]:
###get room non inventory links
room = 'NS322'
#print(rooms)
def getdirfromroom(room):
#files = glob.glob(safetyplansdir+room+'*/*')
files = glob.glob(safetyplansdir+room+'*/*.pdf')# only pdf files
#print(files)
if not files:
files = [safetyplansnoplan]
#print(files)
return files
def mkfiles4web(files):
#files = getdirfromroom(room)
webfiles = []
#print(files)
for file in files:
#webfiles.append(file.split('/')[-1])
webfiles.append('/'.join(file.split('/')[-2:]))
return webfiles
def getevaclinkbkup(room):
###find evac plan
rmdict = {'not':'noplans.html', 'NS':'NSB_Evac_Complete.pdf','ST':'Stillwell_Evac_Complete.pdf','MK':'McKee_Evac_Complete.pdf'}
rmkey = room[:2]
flkey = room[2]
#print(flkey)
if rmkey not in ['BA','HB','HH','MR','de']:
file = webevacplandir+rmdict[rmkey]
evaclink = '<a href='+file+'> Evacuation Plan </a>'
else:
file = webevacplandir+rmdict['not']
evaclink = '<a href='+file+'> Evacuation Plan </a>'
return evaclink
#getevaclinkbkup('NS322')
def getevaclink(room):
rmkey = room[:2]
###find evac plan
files = getdirfromroom(room)
files = mkfiles4web(files)
ind = [i for i, s in enumerate(files) if 'evac_plan' in s]
#print(ind)
if not ind:
ind = [0]
evaclink = getevaclinkbkup(room)
if rmkey == 'NS':
evaclink = getevaclinkNSB(room)
else:
evaclink = '<a href='+websafetyplansdir+files[ind[0]]+'> Evacuation Plan </a>'
return evaclink
def getchplink(room):
###find CHP
files = getdirfromroom(room)
files = mkfiles4web(files)
ind = [i for i, s in enumerate(files) if 'CHP' in s]
if not ind:
ind = [0]
chplink = '<a href='+websafetyplansdir+files[ind[0]]+'> Chemical Hygene Plan </a>'
return chplink
def getsoplinks_org(room):
###findSOPs
files = getdirfromroom(room)
#files = mkfiles4web(files)
#ind = [i for i, s in enumerate(files) if 'SOP' in s]
#soplinks = []
#for i in range(len(ind)):
# soplinks.append(files[ind[i]])
#### parse out type of sop
soplinklabels = []
for link in soplinks:
soplinklabels.append('SOP for ' +link.split('/')[-1].split('_')[0][15:])
soplink = []
for i in range(len(soplinks)):
soplink.append('<li> <a href='+websafetyplansdir+soplinks[i]+'>'+soplinklabels[i]+'</a> \n')
sopb = '<ul>'
sope = '</ul>'
return sopb+' '.join(soplink)+sope
def getsoplinks(room):
###findSOPs
#files = getdirfromroom(room)
files = glob.glob(safetyplansdir+room+'*/*SOP*.pdf')# only pdf files
files = mkfiles4web(files)
#print(room,files)
#ind = [i for i, s in enumerate(files) if 'SOP' in s]
soplinks = []
for i,file in enumerate(files):
#print(i,file)
soplinks.append(file)
#### parse out type of sop
soplinklabels = []
for link in soplinks:
bindex = link.index('SOP')+3
eindex = link.find('_',bindex)
#efindex = link.rfind('.pdf')
#eindex = np.min([e_index,efindex])
soplinklabels.append('SOP for ' +link[bindex:eindex])
soplink = []
for i in range(len(soplinks)):
soplink.append('<li> <a href='+websafetyplansdir+soplinks[i]+'>'+soplinklabels[i]+'</a> \n')
sopb = '<ul>'
sope = '</ul>'
return sopb+' '.join(soplink)+sope
#print(getsoplinks('NS323'))
def getcheminfolink(room):
file = cheminfodir+room+'.html'
file = cheminfodir+'test.html'
cheminfolink = evaclink = '<a href='+file+'> Extra Chemical Information </a>'
return cheminfolink
#files = getdirfromroom(room)
#print(getevaclink(room))
#print(getchplink(room))
#print(getsoplinks(room))
#files = mkfiles4web(files)
#files[0].split('/')
#file = files[0]
#'/'.join(file.split('/')[-2:])
#file
# In[18]:
def mkweblink(webaddress,text):
link = '<A HREF='+ webaddress +'>'+text+'</A>'
return link
def getsdsfilename(CAS,reorder):
flag = ''#g2g' #good to go
msdsbase = msdsdir
if CAS ==None:
CAS = 'none' #generic
flag = 'dbnoCAS'
#CAS = ''
if reorder==None:
#reorder='none' #generic
reorder = 'none'
#altfname = CAS +'.pdf'
flag = 'dbnoreorder' ### db contains no reorder number old style sds file is the
altfname = CAS +'.pdf'
fname = CAS+'_'+reorder +'.pdf'
#else:
#fname = CAS+'_'+reorder +'.pdf'
#if tmp[-4:] == None:
# fname = tmp[:-5]
#else:
# fname = tmp
#fname += '.pdf'
sdsfilename = msdsbase + fname
webfname = webmsdsdir+fname.split('/')[-1] ##check what this does
sdsaltfilename = msdsbase + altfname
webaltfname = webmsdsdir+altfname.split('/')[-1] ##check what this does
#print(webfname)
#if flag == 'g2g':
if path.isfile(sdsfilename) == True:
link = mkweblink(webfname,fname)
#link = '<A HREF='+ webfname +'>'+fname+'</A>'
message = flag + ''
elif path.isfile(sdsaltfilename) == True:
link = mkweblink(webaltfname,altfname)
message = flag+' No product specific SDS'
else:
link = ''#mkweblink(webfname,fname)
message = flag+ ' missing sds file =>'+fname
#link = '<A HREF='+ webfname +'>'+fname+'</A>'
#if path.isfile(sdsfilename) == True:
# missing = ''
# flag = 0
# pass
#else:
# missing = fname
#print(CAS,reorder)
return link,message
CAS = '10025-77-1__'
reorder = 's25317a'
reorder = None
CAS = '91-17-8'
CAS = None
reorder = '36117'
link,missing = getsdsfilename(CAS,reorder)
#print(link,missing)
# In[36]:
def getstorage_old(CAS,dbfile):
conn = sqlite3.connect(dbfile)
c = conn.cursor()
#regtype = []
c.execute('select HazardClass from Chem where CAS=?',[CAS])
#c.execute('SELECT catid, regtype,bot.name, bot.cas FROM bot, coi WHERE bot.cas =coi.cas AND bot.cas != \'\' AND bot.room != \'retired\' AND bot.room != \'UNK\' ORDER BY room AND bot.room =?',[room])
#c.execute('select RegType from Coi where CAS =?',[CAS])
tmp = c.fetchall()
#print(tmp)
#for i in range(len(tmp1)):
#room.append(tmp1[i][0])
#CATID.append(tmp1[i][0])
if tmp:
HC = tmp[0][0]
else:
HC = 'none'
if HC ==None:
HC = 'none'
conn.commit()
c.close()
return storagedict[HC ]
def getstorage(CAS,dbfile):
conn = sqlite3.connect(dbfile)
c = conn.cursor()
#regtype = []
c.execute('select HazardClass from Chem where CAS=?',[CAS])
#c.execute('SELECT catid, regtype,bot.name, bot.cas FROM bot, coi WHERE bot.cas =coi.cas AND bot.cas != \'\' AND bot.room != \'retired\' AND bot.room != \'UNK\' ORDER BY room AND bot.room =?',[room])
#c.execute('select RegType from Coi where CAS =?',[CAS])
tmp = c.fetchall()
#print(tmp)
#for i in range(len(tmp1)):
#room.append(tmp1[i][0])
#CATID.append(tmp1[i][0])
if tmp:
HC = tmp[0][0]
else:
HC = 'none'
if HC ==None:
HC = 'none'
conn.commit()
c.close()
return HC
def gethazard(CAS,dbfile):
conn = sqlite3.connect(dbfile)
c = conn.cursor()
#regtype = []
c.execute('select Health,Flammability,Reactivity,Special from Chem where CAS=?',[CAS])
#c.execute('SELECT catid, regtype,bot.name, bot.cas FROM bot, coi WHERE bot.cas =coi.cas AND bot.cas != \'\' AND bot.room != \'retired\' AND bot.room != \'UNK\' ORDER BY room AND bot.room =?',[room])
#c.execute('select RegType from Coi where CAS =?',[CAS])
tmp = c.fetchall()
#print(tmp)
#for i in range(len(tmp1)):
#room.append(tmp1[i][0])
#CATID.append(tmp1[i][0])
if tmp:
H = tmp[0][0]
F = tmp[0][1]
R = tmp[0][2]
S = tmp[0][3]
else:
H = None
F = None
R = None
S = None
conn.commit()
c.close()
return H,F,R,S
def getregtype(CAS,dbfile):
conn = sqlite3.connect(dbfile)
c = conn.cursor()
#regtype = []
c.execute('select Regtype from Coi where CAS=?',[CAS])
#c.execute('SELECT catid, regtype,bot.name, bot.cas FROM bot, coi WHERE bot.cas =coi.cas AND bot.cas != \'\' AND bot.room != \'retired\' AND bot.room != \'UNK\' ORDER BY room AND bot.room =?',[room])
#c.execute('select RegType from Coi where CAS =?',[CAS])
tmp = c.fetchall()
#print(tmp)
#for i in range(len(tmp1)):
#room.append(tmp1[i][0])
#CATID.append(tmp1[i][0])
if tmp:
regtype = tmp[0][0]
else:
regtype = None
if regtype == 'None':
regtype == None
conn.commit()
c.close()
return regtype #chemname,CAS,reorder,CATID
def writehtml(ofile,df,roomdf):
room = df.room.unique()
room = room.tolist()[0]
#print(getevaclink(room))
#print(getchplink(room))
#print(getsoplinks(room))
cheminfolink = getcheminfolink(room)
evaclink = getevaclink(room)
chplink = getchplink(room)
soplink = getsoplinks(room)
table = df.style.applymap(highlight_vals, subset=['regtype']).set_table_attributes("border=1").render()
nfpatable = roomdf.to_html(na_rep='0', col_space=12)##ask wes about this
out = ' '.join(('<HTML>\n <HEAD><TITLE>SDS chemical inventory </TITLE> </HEAD>\n<BODY>\n <H1> ',room,
'</H1><H2>Evacuation plans </H2>',evaclink, \
'<H2> Copies of hygiene plans and Standard Operating Procedures </H2>',chplink, soplink, \
'<H2> NFPA max scores </H2>',nfpatable, \
# '<H2> Extra Chemical information </H2>',cheminfolink, \
"<H2> Chemical Inventory </H2>\n \
\n<H4 style=\"color:red\" > RED Column (regtype) indicates Potentially Hazardous Substance warnings</H4>\n \
\n",table,'</BODY>\n </HTML>\n'))
#tp = "<HTML>\n <HEAD><TITLE>SDS chemical inventory </TITLE> \
# </HEAD>\n<BODY>\n \
# <H1>Evaculation plans for this room </H1> \
# <H1> Hygene plans for this room </H1> \
# <H1> NFPA max scores for this room </H1> \
#
# <H1 style=\"color:red\" > Chemical Inventory RED are Potentially Hazardous Substances</H1>\n \
# \n"
#dn = '</BODY>\n </HTML>\n'
with open(ofile, 'w') as f:
f.write(out)
#f.write( tp)
#f.write(df.style.applymap(highlight_vals, subset=['regtype']).set_table_attributes("border=1").render())
#f.write(dn)
os.chmod(ofile, mod)
return
def highlight_vals(val):
if val != 'none':
return 'color: red'
else:
return ''
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val != 'none' else 'black'
return 'color: %s'.format(color)
#def
def getallbots(dbfile):
conn = sqlite3.connect(dbfile)
c = conn.cursor()
rooms = []
catid = []
CAS = []
d = {}
#c.execute('select catid,room,CAS,reorder,name from Bot')
c.execute('select catid,room,CAS,reorder,name,Manufacturer from Bot')
tmp1 = c.fetchall()
for i in range(len(tmp1)):
catid.append(tmp1[i][0])
rooms.append(tmp1[i][1])
CAS.append(tmp1[i][2])
tmp2 = []
for j in range(1,len(tmp1[i])):
tmp2.append(tmp1[i][j])
d[tmp1[i][0]] = tmp2
conn.commit()
c.close()
return d
# In[37]:
d = getallbots(dbfile)
df= pd.DataFrame.from_dict(d,orient='index')
#df.rename(columns={0:'room',1:'CAS',2:'reorder',3:'name'},inplace=True)
df.rename(columns={0:'room',1:'CAS',2:'reorder',3:'name',4:'Manufacturer'},inplace=True)
df.index.name = 'CATID'
df['regtype'] = None
df['msds_file'] = None
df['messages'] = None
df['storage'] = None
#df['chemclass'] = None #cameo
#print(df['CAS'][df.index == 110586])
roomsdf = df.room.unique()
roomslist = roomsdf.tolist()
#filter rooms that should not be rooms
tmp = [ x for x in roomslist if 'retire' not in x ]
tmp = [ x for x in tmp if 'combine' not in x ]
tmp = [ x for x in tmp if 'neu' not in x ]
rooms = tmp
#CAS = '7440-38-2'
#CAS = '993-43-1'
#CAS = 'na4'
#CAS = '1-0-0001'
#regtype = getregtype(CAS,dbfile)
#HC = getstorage(CAS,dbfile)
#H,F,R,S = gethazard(CAS,dbfile)
#print(HC)
missinglist = []
for CATID in df.index:
#print('CATID',CATID)
CAS = df.loc[CATID].CAS
reorder = df.loc[CATID].reorder
regtype = getregtype(CAS,dbfile)
HC = getstorage(CAS,dbfile)
link,message = getsdsfilename(CAS,reorder)
missinglist.append(missing)
df.set_value(CATID,'storage',HC)
df.set_value(CATID,'regtype',regtype)
df.set_value(CATID,'msds_file',link)
df.set_value(CATID,'messages',message)
#######other links put this accessable via cheminfo webpage
#df['chemclass'] = None #cameo
#store = HDFStore(cheminfodata+'store.h5')
#dfs = store['dfstruct'] # load it
#dfs = dfs[['name','cid','smiles']]
#PandasTools.AddMoleculeColumnToFrame(dfs,'smiles','Molecule',includeFingerprints=True)
#rgn = 31
#paturl = '<a href=https://cameochemicals.noaa.gov/react/'+'{0}'.format(rgn)+ '> CAMEO </a>'
#smilepat = 'C1=CC=C(C=C1)O' #phenol
#pat = Chem.MolFromSmiles(smilepat)
#founddf = dfs[dfs['Molecule'] >=pat]
#caswithpat = founddf.index.tolist()
#df['chemclass'][df['CAS'].isin(caswithpat)] = paturl
# concat reorder and manufacuture
#df['manreorder'] = df.reorder.astype(str).str.cat(df.Manufacturer.astype(str), sep=', ')
#TODO: Make a dictionary to convert HC to storeage info
dumplist = ['retire*','UNK','combine','neut','Unk']
mask = df.room.notnull()
for item in dumplist:
mask = mask & ~df.room.str.contains(item)
#mask = df.room.isin( dumplist)
dfout = df[mask].sort_values('room')
#filter out hazardous waste bottles from inventory
#mask = ~df.CAS.str.contains('0-0-0000')
#dfout = dfout[mask]
#dfrooms = {}# put here room specific hygene plan, evac plans, nfps
#print(dfout['room'])
#print(mask)
#
#if path.isfile(fname) == True:
# pass
#else:
# missing = fname
# In[38]:
#df[df['reorder'] == 'None']
#df.head()
#dfout[['CAS','reorder']]
#dfout[dfout['CAS'] == '77-09-8']
#dfout
#CAS = '110-82-7'
#room = 'NS205'#
#H,F,R,S = gethazard(CAS,dbfile)
#print(H,F,R,S)
#for room in rooms:
# tmp = dfout[dfout['room'] == room]['CAS']
# hazdict = {}
# for CAS in tmp.unique():
# H,F,R,S = gethazard(CAS,dbfile)
# #TODO: convert all non-numbers to 0
# hazdict[CAS] = [H,F,R,S]
# In[39]:
tp = '<HTML>\n <HEAD><TITLE>SDS chemical inventory searchable </TITLE></HEAD>\n<BODY>\n<H1 style=\"color:red\" > College of Arts and Sciences Chemical Inventory</H1>\n<H2>Survey for Acknowlegment link of Safety Training</H2> <a href="https://wcu.az1.qualtrics.com/jfe/form/SV_9AIPM7mTueMaA8B">Survey Link</a>\n'
hd = '<H1>'
he = '</H1>\n'
lt = '<UL>'
le = '</UL>'
li = '<LI>'
dn = '</BODY>\n </HTML>\n'
# In[40]:
def findmaxhaz(L):
newL = []
for code in set(L):
#print(code)
try:
val = int(code)
except ValueError:
pass
else:
newL.append(val)
maxhaz = np.max(newL)
return maxhaz
def findShazmat(L):
if 'w' in L:
Shazmat = 'Water Reactive'
else:
Shazmat = ''
return Shazmat
def mkhazardtable2(room,df):
tmpcas = set(df[df['room'] == room]['CAS'])
#print(tmpcas)
hazdict = {}
for i,CAS in enumerate(tmpcas):
#print(CAS)
H,F,R,S = gethazard(CAS,dbfile)
#print(i,H,F,R,S)
hazdict[CAS] = [H,F,R,S]
#print(hazdict[CAS])
#hdf = pd.DataFrame(hazdict,dtype=[int,int,int,str]).T
hdf = pd.DataFrame(hazdict,index=['H','F','R','S']).T#,
hdf.replace(np.nan,0,inplace=True)
hdf.replace('na',0,inplace=True)
roomd = {}
for hc in ['H','F','R']:
L = hdf[hc].tolist()
maxhaz = findmaxhaz(L)
roomd[hc] = maxhaz
LS = list(set(hdf['S']))
Shazmat = findShazmat(LS)
roomd['S'] = Shazmat
roomdf = pd.DataFrame(pd.Series(roomd))
roomdf.rename(columns={0:'max score'},inplace=True)
#dtypes={'H':'int','F':'int','R':'int','S':'str'}
#hdf['S'] = hdf['S'].apply(lambda x: str(x))
#hdf.rename(columns={0:'H',1:'F',2:'R',3:'S'},inplace=True)
#for c in hdf.columns:
#print(hdf[c].astype(dtypes[c]),c,dtypes[c])
#hdf[c] = hdf[c].astype(dtypes[c])
return roomdf,hdf
# In[41]:
##output room files
roomsarray = dfout.room.unique()
rooms = roomsarray.tolist()
if '' in rooms:
rooms.remove('')
else:
pass
#rooms.remove('')
#rooms = ['NS202']
#rooms = ['NS323']
deloldhtmlfiles() ###delete old room files
for room in rooms:
dfroomout = dfout[dfout.room == room].replace(np.nan,' ')
mask = ~dfroomout.CAS.str.contains('0-0-0000')#filter out waste
dfroomout = dfroomout[mask]
mask = ~dfroomout.CAS.str.contains('1-0-0001')#filter out blank
dfroomout = dfroomout[mask]
if dfroomout.empty:
pass
else:
#print('room',room)
roomdf,hdf = mkhazardtable2(room,dfroomout)
ofile = htmldir+'sds_'+room +'.html'
#writehtml(ofile,dfroomout.sort_values('name'),roomdf)
writehtml(ofile,dfroomout.sort_values(['storage','name']),roomdf)
# In[42]:
room
# In[47]:
##output flat
dfout.replace(np.nan,' ',inplace=True)
dfout.sort_values(['room','storage','name'],inplace=True)
ofile = htmldir+'flat.html'
if os.path.isfile(ofile) == True:
remove(ofile)
else:
pass
with open(ofile, 'w') as f:
f.write( tp)
f.write(dfout.style.applymap(highlight_vals, subset=['regtype']).set_table_attributes("border=1").render())
f.write(dn)
os.chmod(ofile, mod)
# In[44]:
datestamp ='Website last updated: '+ time.strftime("%Y-%m-%d %H:%M")
#write master sds file index file
tp = '<HTML>\n <HEAD><TITLE>SDS chemical inventory </TITLE></HEAD>\n<BODY>\n<H1 style=\"color:red\" > Links in RED are Potentially Hazardous Substances</H1>\n<H2>See the last words in each red link for additional info</H2>\n'
tp = '<HTML>\n <HEAD><TITLE>SDS chemical inventory searchable </TITLE></HEAD>\n<BODY>\n<H1 style=\"color:red\" > College of Arts and Sciences Chemical Inventory</H1>\n<H2>Survey for Acknowlegment link of Safety Training</H2> <a href="https://wcu.az1.qualtrics.com/jfe/form/SV_9AIPM7mTueMaA8B">Survey Link</a>\n'
dn = '</BODY>\n </HTML>\n'
lt = '<UL>'
le = '</UL>'
li = '<LI>'
ofile = htmldir+'index.html'
htmlbase=htmldir
files = glob.glob(htmldir+"sds_*.html")
f = open(ofile, 'w')
f.write( tp)
f.write(lt)
flatlink = '<H2><A HREF=flat.html>The Whole Enchilada</A></H2>\n'
f.write(flatlink)
for file in np.sort(files):
#print(file)
#room = file.split('_')[1].split('.')[0] ###old
room = file.split('/')[-1].split('_')[-1].split('.')[0]
pathfile = webhtmldir+file.split('/')[-1]
link = li+'<A HREF='+pathfile+'>'+room+'</A>\n'
#print(room)
f.write(link)
f.write(le)
#f.write('<H3> <a href="cheminfodata/test.html">Extra Chemical Information</a></H3>')
f.write('<H3> <a href="ZZZ_problems.html">DB Integrety Checks</a></H3>')
f.write(datestamp)
f.write(dn)
f.close()
#copy index.html to sds.html
sdsfile = htmldir+'sds.html'
remove(sdsfile)
shutil.copy2(ofile,sdsfile)
os.chmod(sdsfile,mod)
#os.chmod(ofile, mod)# i don't have ownership to this file
# In[45]:
msg = 'website complete at '
etime = time.strftime("%Y-%m-%d %H:%M")
print(msg,etime)
# In[29]:
#rooms = roomsarray.tolist()
#print(rooms)
# In[30]:
#fname = '/wwbintz/public_html/msds/7664-93-9_290000acs.pdf'
#webmsdsdir+fname.split('/')[-1]
# In[31]:
#roomdf.head()
#roomdf.to_html(col_space=12)
# In[32]:
##chmod for msds and Lab_specific blah
#files = glob.glob(msdsdir+'*')
#for file in files:
# os.chmod(file, mod)
#
#dirs = glob.glob(safetyplansdir+'*')
#for d in dirs:
# os.chmod(d, mod)
# path = d+'/'
# #print(path)
# files = glob.glob(path+'*')
# #print(files)
# for file in files:
# #print(file)
# os.chmod(file, mod)
# In[33]:
#print(room,file,link)
#file.split('/')[-1].split('_')[-1].split('.')[0]
#room='NS202'
#dfroomout = dfout[dfout.room == room]
#room = dfroomout.room.unique()
#room = room.tolist()[0]
#room
# In[34]:
#Hdf = pd.DataFrame(hazdict).T
#Hdf.rename(columns={0:'H',1:'F',2:'R',3:'S'},inplace=True)
#Hdf['H'].isnull()
#Hdf['H'].max()
#Hcol = Hdf['H'].as_matrix()
#L = 'H'
#roomdf = Hdf[Hdf[L] == Hdf[L].isnull()].max()
#print(Hdf['S'][Hdf['S'] == 'w'])
#print(Hdf['S'])
#TODO: list all S in room( W = Water reactive)
#print(Hdf.max(axis=0,skipna=True))
#print(Hdf.head())
#nfpasdict = {'w':"Water Reactive",'NaN':' '}
#roomdf['S'] = nfpasdict['w']
#roomdf = pd.DataFrame(roomdf)
#roomdf.rename(columns={0:'table'})
#rooms
#def mkhazardtable(room,dfout):
# nfpasdict = {'w':"Water Reactive",'ox':' ','na':' '}
# tmp = dfout[dfout['room'] == room]['CAS']
# hazdict = {}
# for CAS in tmp.unique():
# H,F,R,S = gethazard(CAS,dbfile)
# print(H,F,R,S)
# #if S == 'ox':
# #S = none
# if S == None:
# S = 'na'
# if S == '':
# S = 'na'
# #TODO: convert all non-numbers to 0
# hazdict[CAS] = [H,F,R,S]
# print(hazdict[CAS])
# Hdf = pd.DataFrame(hazdict).T
# Hdf.rename(columns={0:'H',1:'F',2:'R',3:'S'},inplace=True)
# #print(Hdf)
# roomd = {}
# for L in ['H','F','R']: ######loop through each safety and generate series
# tmp = Hdf[L][Hdf[L].notnull()].as_matrix()
#print('room',room,'tmp',tmp)
##check if tmp is empty
# if not tmp[tmp != 'na'].all():
# roomd[L] = 0
# elif not np.unique(tmp).any():
# roomd[L] = 0
# else:
#print(pd.Series(tmp[np.core.defchararray.isnumeric(tmp)]).max())
# print('room',room,'tmp',np.unique(tmp))
# roomd[L] = tmp[tmp != 'na'].max()
#roomd[L] = Hdf[L][Hdf[L] == Hdf[L].isnull()].max()
# tmp = Hdf['S'][Hdf['S'].notnull()]
#print('S',np.unique(tmp))
# if 'w' in np.unique(tmp):
#print('here')
# roomd['S'] = nfpasdict['w']
# if tmp.empty:
# roomd['S'] = nfpasdict['na']
#print(room,roomd)
# roomdf = pd.DataFrame(pd.Series(roomd))
#print('roomdf',roomdf)
# roomdf.rename(columns={0:'max score'},inplace=True)
# return roomdf
#room = 'NS205'
#L = 'S'
#tmp = pd.Series({'F': 0, 'H': 0, 'R': 0})
#print(tmp)
#roomdf,Hdf = mkhazardtable(room,dfout)
#print(roomdf)
#tmp = Hdf[L][Hdf[L].notnull()]
#print(np.unique(tmp))
#if 'w' in np.unique(tmp):
#print('here')
#tmp[tmp != 'na'].max()
#print(S)
#print(roomdf)
# In[35]:
#python script.py >> /wwbintz/matinsy/var/websync.log 2>&1
# ## db integrety checking
# maybe put this stuff in a different file
# In[ ]:
| bbqtaco/matinsy | mksdswebsite.py | Python | gpl-3.0 | 28,123 |
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/datasets/__init__.py | Python | apache-2.0 | 3,616 |
# -*- coding: utf-8 -*-
import os
import wx
from DynaUI import *
SF_000A4 = wx.SizerFlags().Border(wx.ALL, 4)
SF_110A4 = wx.SizerFlags().Expand().Border(wx.ALL, 4).Proportion(1)
SF_410A4 = wx.SizerFlags().Expand().Border(wx.ALL, 4).Proportion(4)
SF_010A4 = wx.SizerFlags().Expand().Border(wx.ALL, 4)
SF_001A4 = wx.SizerFlags().Center().Border(wx.ALL, 4)
SF_010A2 = wx.SizerFlags().Expand().Border(wx.ALL, 2)
SIZE_NORMAL = wx.Size(48, 30)
SIZE_LONG = wx.Size(80, 30)
SIZE_SQUARE = wx.Size(24, 24)
# ===================================================== Showcase ======================================================
class ExampleUI(wx.Frame):
def __init__(self, r, s, l):
self.R = r
self.S = s
self.L = l
super().__init__(parent=None, title="DynaUI", pos=wx.DefaultPosition, size=wx.Size(1280, 800))
self.SetIcon(wx.Icon(self.R["__DynaUI__"]))
self.SetFont(self.R["FONT_N"])
self.SetDoubleBuffered(True)
self.Tool = Tool(self, edge=("", "B"))
self.VTool = Tool(self, edge=("T", "RB"), orientation=wx.VERTICAL)
self.Info = Info(self, edge=("T", ""))
self.Info.AddItems((wx.StaticText(self.Info, label="status"), 0))
items = (
("N", "TOOL_SETTING", None), "|",
("N", "TOOL_SETTING", (self.Tool.SetItemSize, wx.Size(64, 64))),
("N", "TOOL_SETTING", (self.Tool.SetItemSize, wx.Size(32, 32))), "|",
("T", "TOOL_SETTING", None, {"toggle": 1, "res": "R"}), "|",
("B", "TOOL_SETTING", None, {"toggle": 1, "group": "whatever", "res": "X"}),
("B", "TOOL_SETTING", None, {"toggle": 0, "group": "whatever", "res": "Y"}),
("B", "TOOL_SETTING", None, {"toggle": 0, "group": "whatever", "res": "Z"}),)
self.Tool.AddItems(*items)
self.VTool.AddItems(*items)
self.Main = BaseControl(self)
MainSizer = wx.BoxSizer(wx.VERTICAL)
self.Main.SetSizer(MainSizer)
# ==================================================
MainSizer.Add(SectionHead(self.Main, orientation=wx.HORIZONTAL, tag="---- Various Buttons | Resource/Border Style ----"), SF_010A4)
Sizer = wx.WrapSizer()
Sizer.Add(ButtonToggle(self.Main, size=SIZE_NORMAL, tag="Off", tag2="On", toggle=True), SF_000A4)
self.AddSeparator(Sizer)
Sizer.Add(ButtonBundle(self.Main, size=SIZE_NORMAL, tag="1/3", group="whatever"), SF_000A4)
Sizer.Add(ButtonBundle(self.Main, size=SIZE_NORMAL, tag="2/3", group="whatever", toggle=True), SF_000A4)
Sizer.Add(ButtonBundle(self.Main, size=SIZE_NORMAL, tag="3/3", group="whatever"), SF_000A4)
self.AddSeparator(Sizer)
Sizer.Add(Button(self.Main, size=SIZE_NORMAL, tag="X", res="X"), SF_000A4)
Sizer.Add(Button(self.Main, size=SIZE_NORMAL, tag="Y", res="Y"), SF_000A4)
Sizer.Add(Button(self.Main, size=SIZE_NORMAL, tag="Z", res="Z"), SF_000A4)
self.AddSeparator(Sizer)
Sizer.Add(PickerColor(self.Main, size=SIZE_NORMAL), SF_000A4)
Sizer.Add(PickerFont(self.Main, size=SIZE_NORMAL), SF_000A4)
Sizer.Add(PickerDirection(self.Main, size=SIZE_NORMAL), SF_000A4)
Sizer.Add(PickerNumber(self.Main, size=SIZE_NORMAL), SF_000A4)
Sizer.Add(PickerValue(self.Main, size=SIZE_NORMAL, selected=-1, choices=[str(i) for i in range(40)]), SF_000A4)
self.AddSeparator(Sizer)
b1 = Button(self.Main, size=SIZE_NORMAL, tag=("Click", "B", 0, -2), pic=(self.R["UI_IMAGE2"], "T", 0, 2), func=lambda: (b2.Enable(not b2.IsEnabled()), b2.ReDraw()))
b2 = Button(self.Main, size=SIZE_NORMAL, tag=("Click", "LT", 2, 2), pic=(self.R["UI_IMAGE2"], "RB", -2, -2), func=lambda: (b1.Enable(not b1.IsEnabled()), b1.ReDraw()))
Sizer.Add(b1, SF_000A4)
Sizer.Add(b2, SF_000A4)
Sizer.Add(Slider(self.Main, size=(212, 30)), SF_010A4)
Sizer.Add(HyperLink(self.Main, size=SIZE_LONG, tag=("GitHub", "L", 24), pics=(self.R["AP_ARROW_U"], "L", 4), url="github.com/yadizhou/DynaUI"), SF_110A4)
Sizer.Add(SwitchingText(self.Main, size=wx.Size(80, 20), values=("Switching", "Text", "Example"), bg="D"), SF_000A4)
MainSizer.Add(Sizer)
Sizer = wx.WrapSizer()
for res in ("D", "L", "B"):
for edge in (None, "D", "L", "EM", "BE", "H", "V"):
Sizer.Add(Button(self.Main, size=SIZE_NORMAL, tag="%s\n%s" % (res, edge), res=res, edge=edge), SF_000A4)
if res != "B":
self.AddSeparator(Sizer)
MainSizer.Add(Sizer)
# ==================================================
MainSizer.Add(SectionHead(self.Main, orientation=wx.HORIZONTAL, tag="---- Position: Tag & Image | Miscellaneous ----"), SF_010A4)
Sizer = wx.BoxSizer(wx.HORIZONTAL)
GridSizer = wx.GridSizer(3, 0, 0)
for pos in ("LT", "T", "RT", "L", "C", "R", "LB", "B", "RB"):
GridSizer.Add(Button(self.Main, size=SIZE_SQUARE, tag=(pos, pos), res="L", edge="EM"))
Sizer.Add(GridSizer, SF_000A4)
GridSizer = wx.GridSizer(3, 0, 0)
for pos in ("LT", "T", "RT", "L", "C", "R", "LB", "B", "RB"):
GridSizer.Add(ButtonBundle(self.Main, size=SIZE_SQUARE, pic=(self.R["UI_IMAGE2"], pos), group="whatever", res="D", edge="BE"))
Sizer.Add(GridSizer, SF_000A4)
Sizer.Add(Line(self.Main, orientation=wx.VERTICAL), SF_010A4)
SubSizer = wx.BoxSizer(wx.HORIZONTAL)
SubSizer.Add(StaticBitmap(self.Main, bitmap=self.R["UI_IMAGE3"], size=wx.Size(48, 48), bg="B"), SF_000A4)
SubSizer.Add(Line(self.Main), SF_010A4)
SubSizer.Add(TextWithHint(self.Main, hint="TextWithHint", style=wx.TE_MULTILINE), SF_110A4)
Sizer.Add(SubSizer, SF_010A4)
Sizer.Add(Line(self.Main, orientation=wx.VERTICAL), SF_010A4)
GridSizer = wx.GridSizer(2, 0, 0, 0)
GridSizer.Add(Button(self.Main, size=SIZE_LONG, tag="Dialog1", func=(self.ShowDialog, Dialog1)))
GridSizer.Add(Button(self.Main, size=SIZE_LONG, tag="Dialog2", func=(self.ShowDialog, Dialog2)))
GridSizer.Add(Button(self.Main, size=SIZE_LONG, tag="Dialog3", func=(self.ShowDialog, Dialog3)))
GridSizer.Add(Button(self.Main, size=SIZE_LONG, tag="Dialog4", func=(self.ShowDialog, Dialog4)))
GridSizer.Add(Button(self.Main, size=SIZE_LONG, tag="Dialog5", func=(self.ShowDialog, Dialog5)))
Sizer.Add(GridSizer, SF_000A4)
Sizer.Add(Line(self.Main, orientation=wx.VERTICAL), SF_010A4)
MainSizer.Add(Sizer, 0, wx.EXPAND)
# ==================================================
MainSizer.Add(SectionHead(self.Main, orientation=wx.HORIZONTAL, tag="------- Scrolled --------"), SF_010A4)
Sizer = wx.BoxSizer(wx.HORIZONTAL)
Sizer.Add(ListCtrl(self.Main, (("ListBox",), ("is",), ("one",), ("column",), ("ListCtrl",)) * 100, (-1,)), SF_110A4)
Sizer.Add(ListCtrl(self.Main, [("A%s" % i, "B%s" % i, "C%s" % i) for i in range(200)], (-2, -2, -2)), SF_110A4)
iv = ImageViewer(self.Main, self.R["UI_IMAGE4"])
Sizer.Add(Hider(self.Main, targets=(iv,), edge=None), SF_010A2)
Sizer.Add(iv, SF_410A4)
MainSizer.Add(Sizer, 1, wx.EXPAND)
MainSizer.Add(SectionHead(self.Main, orientation=wx.HORIZONTAL, tag="------- ArtProvider --------"), SF_010A4)
Sizer = wx.WrapSizer()
for key in self.R:
if key.startswith("AP_"):
t = ToolNormal(self.Main, size=SIZE_SQUARE, pics=self.R[key], res="_L", edge=None)
t.SetTip(self.Info.SetStatus, key)
Sizer.Add(t, SF_000A4)
MainSizer.Add(Sizer, 0, wx.EXPAND)
# ==================================================
MiddleSizer = wx.BoxSizer(wx.HORIZONTAL)
MiddleSizer.Add(self.VTool, 0, wx.EXPAND)
MiddleSizer.Add(self.Main, 1, wx.EXPAND)
FrameSizer = wx.BoxSizer(wx.VERTICAL)
FrameSizer.Add(self.Tool, 0, wx.EXPAND)
FrameSizer.Add(MiddleSizer, 1, wx.EXPAND)
FrameSizer.Add(self.Info, 0, wx.EXPAND)
self.SetSizer(FrameSizer)
self.Layout()
self.Center(wx.BOTH)
def ShowDialog(self, dlg):
d = BaseDialog(self, "Dialog", main=dlg)
d.SetSize(d.GetEffectiveMinSize())
d.Center()
d.Play("FADEIN")
def AddSeparator(self, Sizer):
Sizer.Add(Separator(self.Main, orientation=wx.VERTICAL), SF_010A4)
class Dialog1(BaseMain):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(Sizer)
self.AddSectionHead(Sizer, tag="SectionHead", shape="C")
self.AddButton(Sizer, label="Button", tag="Click me")
self.AddButtonToggle(Sizer, label="ButtonToggle", tags=("No", "Yes"))
self.AddButtonBundle(Sizer, label="ButtonBundle", choices=list("012345"), rows=2)
self.AddStaticText(Sizer, label="StaticText", value="Dialog Example")
self.AddLineCtrl(Sizer, label="LineCtrl")
self.AddTextCtrl(Sizer, label="TextCtrl")
self.AddListBox(Sizer, label="ListBox", choices=list("012345"), selected=3)
self.AddPickerValue(Sizer, label="PickerValue", choices=list("012345"), selected=2)
self.AddSeparator(Sizer)
self.AddPickerFile(Sizer, label="PickerFile")
class Dialog2(BaseMain):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(Sizer)
Left = wx.BoxSizer(wx.VERTICAL)
Middle = wx.BoxSizer(wx.VERTICAL)
Right = wx.BoxSizer(wx.VERTICAL)
self.AddButton(Left, label="", tag="Button", width=40)
self.AddButton(Left, label="", tag="Button", width=-1)
self.AddButton(Left, label="123", tag="Button", width=40)
self.AddButton(Left, label="123", tag="Button", width=-1)
self.AddSeparator(Left)
self.AddButtonToggle(Left, label="", tags=("Button", "Nottub"), width=40)
self.AddButtonToggle(Left, label="", tags=("Button", "Nottub"), width=-1)
self.AddButtonToggle(Left, label="123", tags=("Button", "Nottub"), width=40)
self.AddButtonToggle(Left, label="123", tags=("Button", "Nottub"), width=-1)
self.AddSeparator(Left)
self.AddPickerValue(Left, label="", choices=("A", "B", "C"), width=40)
self.AddPickerValue(Left, label="", choices=("A", "B", "C"), width=-1)
self.AddPickerValue(Left, label="123", choices=("A", "B", "C", "D", "E"), width=40)
self.AddPickerValue(Left, label="123", choices=("A", "B", "C", "D", "E", "F"), width=-1)
self.AddStaticText(Middle, label="STA", value="ssss", width=-1)
self.AddLineCtrl(Middle, "TEXT", width=100)
self.AddLineCtrl(Middle, "TEXT", width=-1)
self.AddSeparator(Middle)
self.AddTextCtrl(Middle, "TEXT", inline=1, height=-1, width=-1)
self.AddTextCtrl(Middle, "TEXT", inline=1, height=40, width=-1)
self.AddTextCtrl(Middle, "TEXT", inline=1, height=-1, width=40)
self.AddTextCtrl(Middle, "TEXT", inline=1, height=40, width=40)
self.AddTextCtrl(Right, "TEXT", inline=0, height=-1, width=-1)
self.AddTextCtrl(Right, "TEXT", inline=0, height=40, width=-1)
self.AddTextCtrl(Right, "TEXT", inline=0, height=-1, width=40)
self.AddTextCtrl(Right, "TEXT", inline=0, height=40, width=40)
self.AddTextCtrl(Right, "", inline=0, height=-1, width=-1)
self.AddTextCtrl(Right, "", inline=0, height=40, width=-1)
self.AddTextCtrl(Right, "", inline=0, height=-1, width=40)
self.AddTextCtrl(Right, "", inline=0, height=40, width=40)
self.AddStdButton(Right)
Sizer.Add(Left, 0, wx.EXPAND)
self.AddSeparator(Sizer)
Sizer.Add(Middle, 1, wx.EXPAND)
self.AddSeparator(Sizer)
Sizer.Add(Right, 1, wx.EXPAND)
class Dialog3(BaseMain):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(Sizer)
self.AddListBox(Sizer, "List", width=-1, height=-1, inline=1, choices=list("123456"))
self.AddListBox(Sizer, "List", width=-1, height=-1, inline=0, choices=list("123456"))
self.AddListBox(Sizer, "List", width=-1, height=60, inline=1, choices=list("123456"))
self.AddListBox(Sizer, "List", width=-1, height=60, inline=0, choices=list("123456"))
self.AddListBox(Sizer, "List", width=60, height=-1, inline=1, choices=list("123456"))
self.AddListBox(Sizer, "List", width=60, height=-1, inline=0, choices=list("123456"))
self.AddListBox(Sizer, "List", width=60, height=60, inline=1, choices=list("123456"))
self.AddListBox(Sizer, "List", width=60, height=60, inline=0, choices=list("123456"))
class Dialog4(BaseMain):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Sizer = wx.BoxSizer(wx.HORIZONTAL)
Left = self.AddPerpendicularSizer(Sizer, 1, wx.EXPAND)
self.AddLine(Sizer)
Right = self.AddPerpendicularSizer(Sizer, 1, wx.EXPAND)
self.SetSizer(Sizer)
self.AddButtonBundle(Left, label="", width=24, choices=list("123456"), rows=1)
self.AddButtonBundle(Left, label="", width=24, choices=list("123456"), rows=2)
self.AddButtonBundle(Left, label="Bundled", width=24, choices=list("123456"), rows=3)
self.AddButtonBundle(Left, label="Bundled", width=24, choices=list("123456"), rows=4)
self.AddButtonBundle(Left, label="Bundled", width=24, choices=list("123456"), rows=5)
self.AddButtonBundle(Left, label="Bundled", width=24, choices=list("123456"), rows=6)
self.AddButtonBundle(Left, label="Bundled", width=24, choices=list("123456"), rows=7)
self.AddButtonBundle(Right, label="", width=24, choices=list("1234567"), rows=1)
self.AddButtonBundle(Right, label="", width=24, choices=list("1234567"), rows=2)
self.AddButtonBundle(Right, label="Bundled", width=24, choices=list("1234567"), rows=3)
self.AddButtonBundle(Right, label="Bundled", width=24, choices=list("1234567"), rows=4)
self.AddButtonBundle(Right, label="Bundled", width=24, choices=list("1234567"), rows=5)
self.AddButtonBundle(Right, label="Bundled", width=24, choices=list("1234567"), rows=6)
self.AddButtonBundle(Right, label="Bundled", width=24, choices=list("1234567"), rows=7)
class Dialog5(BaseMain):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(Sizer)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag=("Z", "T"), shape="R"), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag="Z", shape="C"), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag=("Z", "B"), shape="L"), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag="Z", shape="R"), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag=("Z", "T"), shape="B", zOrder=1), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag="Z", shape="R", zOrder=1), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag=("Z", "B"), shape="C", zOrder=1), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag="Z", shape="L", zOrder=1), SF_010A2)
Sizer.Add(SectionHead(self, orientation=wx.VERTICAL, tag=("Z", "T"), shape="R", zOrder=1), SF_010A2)
SubSizer = wx.BoxSizer(wx.VERTICAL)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" TWO ARROW ", shape="B"), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" RECTANGLE ", shape="S"), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" TWO CORNER ", shape="C"), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" LEFT ARROW ", shape="L"), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" RIGHT ARROW", shape="R"), SF_010A2)
Sizer.Add(SubSizer, 1)
SubSizer = wx.BoxSizer(wx.VERTICAL)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" TWO ARROW ", shape="B", zOrder=1), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" RECTANGLE ", shape="S", zOrder=1), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" TWO CORNER ", shape="C", zOrder=1), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" LEFT ARROW ", shape="L", zOrder=1), SF_010A2)
SubSizer.Add(SectionHead(self, orientation=wx.HORIZONTAL, tag=" RIGHT ARROW", shape="R", zOrder=1), SF_010A2)
Sizer.Add(SubSizer, 1)
if __name__ == "__main__":
App = wx.App(redirect=0)
Resource = Resource()
Setting = Setting()
Locale = Locale()
MAIN_PATH = os.path.dirname(os.path.realpath(__file__))
Here = lambda f="": os.path.join(MAIN_PATH, f)
Resource["TOOL_SETTING"] = GetBitmaps(wx.Bitmap(Here("image1.png")), 20, 20)
Resource["UI_IMAGE2"] = wx.Bitmap(Here("image2.png"))
Resource["UI_IMAGE3"] = wx.Bitmap(Here("image3.png"))
Resource["UI_IMAGE4"] = wx.Bitmap(Here("image4.jpg"))
Locale["TOOL_SETTING"] = "Tool Buttons"
Frame = ExampleUI(Resource, Setting, Locale)
Frame.Show()
App.MainLoop()
| yadizhou/DynaUI | DynaUI/demo/demo.py | Python | gpl-3.0 | 17,491 |
__version_info__ = (0, 2, 0, 'final', 0)
def get_version():
version = '%s.%s' % (__version_info__[0], __version_info__[1])
if __version_info__[2]:
version = '%s.%s' % (version, __version_info__[2])
if __version_info__[3] != 'final':
version = '%s%s' % (version, __version_info__[3])
if __version_info__[4]:
version = '%s%s' % (version, __version_info__[4])
return version
__version__ = get_version()
| zsiciarz/pyaavso | pyaavso/__init__.py | Python | mit | 456 |
from django.conf.urls import patterns, include, url
from pdp import views
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'enableIndia.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.login),
url(r'^login/', views.login, name='login'),
url(r'^loginconfirm/', views.loginconfirm, name='loginconfirm'),
url(r'^mentor/', views.mentor_dashboard, name='mentor_dashboard'),
url(r'^mentee/', views.mentee_dashboard, name='mentee_dashboard'),
url(r'^administrator/', views.admin_dashboard, name='admin_dashboard'),
url(r'^moderator/', views.moderator_dashboard, name='moderator_dashboard'),
url(r'^register/', views.register, name='register'),
url(r'^search/', views.search, name='search'),
url(r'^checklist/[a-zA-Z]+/[a-zA-Z:/\.]+/', views.checklist, name='checklist'),
)
| pratheekms/cfg14 | enableIndia/urls.py | Python | mit | 961 |
"""initial migration
Revision ID: 310d770bc073
Revises:
Create Date: 2017-03-26 19:56:35.383426
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '310d770bc073'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('Entity_Types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('Entities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('entity_type_id_fk', sa.Integer(), nullable=False),
sa.Column('parent_id_fk', sa.Integer(), nullable=True),
sa.Column('last_data_fetch_ts', sa.Integer(), nullable=False, default=0),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_type_id_fk'], ['Entity_Types.id'], ),
sa.ForeignKeyConstraint(['parent_id_fk'], ['Entities.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table('Meta_Attributes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('entity_type_id_fk', sa.Integer(), nullable=False),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_type_id_fk'], ['Entity_Types.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table('Series_Attributes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('entity_type_id_fk', sa.Integer(), nullable=False),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.Column('type', sa.Enum('real', 'enum', name='series_type'), nullable=False,
default='real'),
sa.Column('refresh_time', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_type_id_fk'], ['Entity_Types.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table('Tag_Attributes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('entity_type_id_fk', sa.Integer(), nullable=False),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_type_id_fk'], ['Entity_Types.id'], ),
sa.PrimaryKeyConstraint('id'))
op.create_table('Entity_Meta',
sa.Column('entity_id_fk', sa.Integer(), nullable=False),
sa.Column('meta_id_fk', sa.Integer(), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_id_fk'], ['Entities.id'], ),
sa.ForeignKeyConstraint(['meta_id_fk'], ['Meta_Attributes.id'], ),
sa.PrimaryKeyConstraint('entity_id_fk', 'meta_id_fk'))
op.create_table('Entity_Tags',
sa.Column('entity_id_fk', sa.Integer(), nullable=False),
sa.Column('tag_id_fk', sa.Integer(), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.Column('delete_ts', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entity_id_fk'], ['Entities.id'], ),
sa.ForeignKeyConstraint(['tag_id_fk'], ['Tag_Attributes.id'], ),
sa.PrimaryKeyConstraint('entity_id_fk', 'tag_id_fk'))
def downgrade():
op.drop_table('Entity_Tags')
op.drop_table('Entity_Meta')
op.drop_table('Tag_Attributes')
op.drop_table('Series_Attributes')
op.drop_table('Meta_Attributes')
op.drop_table('Entities')
op.drop_table('Entity_Types')
| qiubit/luminis | backend/alembic/versions/310d770bc073_initial_migration.py | Python | mit | 4,336 |
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.distance import Distance
from geocoder.location import Location
from geocoder.arcgis import ArcgisQuery
from geocoder.baidu import BaiduQuery
from geocoder.bing import BingQuery, BingQueryDetail
from geocoder.canadapost import CanadapostQuery
from geocoder.freegeoip import FreeGeoIPQuery
from geocoder.gaode import GaodeQuery
from geocoder.geocodefarm import GeocodeFarmQuery
from geocoder.geolytica import GeolyticaQuery
from geocoder.gisgraphy import GisgraphyQuery
from geocoder.here import HereQuery
from geocoder.ipinfo import IpinfoQuery
from geocoder.komoot import KomootQuery
from geocoder.locationiq import LocationIQQuery
from geocoder.mapbox import MapboxQuery
from geocoder.mapquest import MapquestQuery
from geocoder.mapzen import MapzenQuery
from geocoder.maxmind import MaxmindQuery
from geocoder.opencage import OpenCageQuery
from geocoder.osm import OsmQuery, OsmQueryDetail
from geocoder.ottawa import OttawaQuery
from geocoder.tamu import TamuQuery
from geocoder.tomtom import TomtomQuery
from geocoder.tgos import TgosQuery
from geocoder.uscensus import USCensusQuery
from geocoder.yahoo import YahooQuery
from geocoder.yandex import YandexQuery
from geocoder.w3w import W3WQuery
from geocoder.arcgis_reverse import ArcgisReverse
from geocoder.baidu_reverse import BaiduReverse
from geocoder.bing_reverse import BingReverse
from geocoder.gaode_reverse import GaodeReverse
from geocoder.geocodefarm_reverse import GeocodeFarmReverse
from geocoder.gisgraphy_reverse import GisgraphyReverse
from geocoder.here_reverse import HereReverse
from geocoder.locationiq_reverse import LocationIQReverse
from geocoder.komoot_reverse import KomootReverse
from geocoder.mapbox_reverse import MapboxReverse
from geocoder.mapquest_reverse import MapquestReverse
from geocoder.mapzen_reverse import MapzenReverse
from geocoder.opencage_reverse import OpenCageReverse
from geocoder.osm_reverse import OsmReverse
from geocoder.uscensus_reverse import USCensusReverse
from geocoder.w3w_reverse import W3WReverse
from geocoder.yandex_reverse import YandexReverse
from geocoder.mapquest_batch import MapquestBatch
from geocoder.bing_batch_forward import BingBatchForward
from geocoder.bing_batch_reverse import BingBatchReverse
from geocoder.uscensus_batch import USCensusBatch
# Geonames Services
from geocoder.geonames import GeonamesQuery
from geocoder.geonames_details import GeonamesDetails
from geocoder.geonames_children import GeonamesChildren
from geocoder.geonames_hierarchy import GeonamesHierarchy
# Google Services
from geocoder.google import GoogleQuery
from geocoder.google_timezone import TimezoneQuery
from geocoder.google_reverse import GoogleReverse
from geocoder.google_elevation import ElevationQuery
from geocoder.google_places import PlacesQuery
options = {
'osm': {
'geocode': OsmQuery,
'details': OsmQueryDetail,
'reverse': OsmReverse,
},
'tgos': {
'geocode': TgosQuery
},
'here': {
'geocode': HereQuery,
'reverse': HereReverse,
},
'baidu': {
'geocode': BaiduQuery,
'reverse': BaiduReverse
},
'gaode': {
'geocode': GaodeQuery,
'reverse': GaodeReverse
},
'yahoo': {'geocode': YahooQuery},
'tomtom': {'geocode': TomtomQuery},
'arcgis': {
'geocode': ArcgisQuery,
'reverse': ArcgisReverse
},
'ottawa': {'geocode': OttawaQuery},
'mapbox': {
'geocode': MapboxQuery,
'reverse': MapboxReverse,
},
'maxmind': {'geocode': MaxmindQuery},
'ipinfo': {'geocode': IpinfoQuery},
'geonames': {
'geocode': GeonamesQuery,
'details': GeonamesDetails,
'timezone': GeonamesDetails,
'children': GeonamesChildren,
'hierarchy': GeonamesHierarchy
},
'freegeoip': {'geocode': FreeGeoIPQuery},
'w3w': {
'geocode': W3WQuery,
'reverse': W3WReverse,
},
'yandex': {
'geocode': YandexQuery,
'reverse': YandexReverse
},
'mapquest': {
'geocode': MapquestQuery,
'reverse': MapquestReverse,
'batch': MapquestBatch
},
'geolytica': {'geocode': GeolyticaQuery},
'canadapost': {'geocode': CanadapostQuery},
'opencage': {
'geocode': OpenCageQuery,
'reverse': OpenCageReverse,
},
'bing': {
'geocode': BingQuery,
'details': BingQueryDetail,
'reverse': BingReverse,
'batch': BingBatchForward,
'batch_reverse': BingBatchReverse
},
'google': {
'geocode': GoogleQuery,
'reverse': GoogleReverse,
'timezone': TimezoneQuery,
'elevation': ElevationQuery,
'places': PlacesQuery,
},
'mapzen': {
'geocode': MapzenQuery,
'reverse': MapzenReverse,
},
'komoot': {
'geocode': KomootQuery,
'reverse': KomootReverse,
},
'tamu': {
'geocode': TamuQuery
},
'geocodefarm': {
'geocode': GeocodeFarmQuery,
'reverse': GeocodeFarmReverse,
},
'uscensus': {
'geocode': USCensusQuery,
'reverse': USCensusReverse,
'batch': USCensusBatch
},
'locationiq': {
'geocode': LocationIQQuery,
'reverse': LocationIQReverse,
},
'gisgraphy': {
'geocode': GisgraphyQuery,
'reverse': GisgraphyReverse,
},
}
def get(location, **kwargs):
"""Get Geocode
:param ``location``: Your search location you want geocoded.
:param ``provider``: The geocoding engine you want to use.
:param ``method``: Define the method (geocode, method).
"""
provider = kwargs.get('provider', 'bing').lower().strip()
method = kwargs.get('method', 'geocode').lower().strip()
if isinstance(location, (list, dict)) and method == 'geocode':
raise ValueError("Location should be a string")
if provider not in options:
raise ValueError("Invalid provider")
else:
if method not in options[provider]:
raise ValueError("Invalid method")
return options[provider][method](location, **kwargs)
def distance(*args, **kwargs):
"""Distance tool measures the distance between two or multiple points.
:param ``location``: (min 2x locations) Your search location you want geocoded.
:param ``units``: (default=kilometers) Unit of measurement.
> kilometers
> miles
> feet
> meters
"""
return Distance(*args, **kwargs)
def location(location, **kwargs):
"""Parser for different location formats
"""
return Location(location, **kwargs)
def google(location, **kwargs):
"""Google Provider
:param ``location``: Your search location you want geocoded.
:param ``method``: (default=geocode) Use the following:
> geocode
> places
> reverse
> batch
> timezone
> elevation
"""
return get(location, provider='google', **kwargs)
def mapbox(location, **kwargs):
"""Mapbox Provider
:param ``location``: Your search location you want geocoded.
:param ``proximity``: Search nearby [lat, lng]
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
> batch
"""
return get(location, provider='mapbox', **kwargs)
def yandex(location, **kwargs):
"""Yandex Provider
:param ``location``: Your search location you want geocoded.
:param ``maxRows``: (default=1) Max number of results to fetch
:param ``lang``: Chose the following language:
> ru-RU — Russian (by default)
> uk-UA — Ukrainian
> be-BY — Belarusian
> en-US — American English
> en-BR — British English
> tr-TR — Turkish (only for maps of Turkey)
:param ``kind``: Type of toponym (only for reverse geocoding):
> house - house or building
> street - street
> metro - subway station
> district - city district
> locality - locality (city, town, village, etc.)
"""
return get(location, provider='yandex', **kwargs)
def w3w(location, **kwargs):
"""what3words Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: W3W API key.
:param ``method``: Chose a method (geocode, method)
"""
return get(location, provider='w3w', **kwargs)
def baidu(location, **kwargs):
"""Baidu Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: Baidu API key.
:param ``referer``: Baidu API referer website.
"""
return get(location, provider='baidu', **kwargs)
def gaode(location, **kwargs):
"""Gaode Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: Gaode API key.
:param ``referer``: Gaode API referer website.
"""
return get(location, provider='gaode', **kwargs)
def komoot(location, **kwargs):
"""Ottawa Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='komoot', **kwargs)
def ottawa(location, **kwargs):
"""Ottawa Provider
:param ``location``: Your search location you want geocoded.
:param ``maxRows``: (default=1) Max number of results to fetch
"""
return get(location, provider='ottawa', **kwargs)
def elevation(location, **kwargs):
"""Elevation - Google Provider
:param ``location``: Your search location you want to retrieve elevation data.
"""
return get(location, method='elevation', provider='google', **kwargs)
def places(location, **kwargs):
"""Places - Google Provider
:param ``location``: Your search location you want geocoded.
:param ``proximity``: Search within given area (bbox, bounds, or around latlng)
"""
return get(location, method='places', provider='google', **kwargs)
def timezone(location, **kwargs):
"""Timezone - Google Provider
:param ``location``: Your search location you want to retrieve timezone data.
:param ``timestamp``: Define your own specified time to calculate timezone.
"""
return get(location, method='timezone', provider='google', **kwargs)
def reverse(location, provider="google", **kwargs):
"""Reverse Geocoding
:param ``location``: Your search location you want to reverse geocode.
:param ``key``: (optional) use your own API Key from Bing.
:param ``provider``: (default=google) Use the following:
> google
> bing
"""
return get(location, method='reverse', provider=provider, **kwargs)
def bing(location, **kwargs):
"""Bing Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from Bing.
:param ``maxRows``: (default=1) Max number of results to fetch
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='bing', **kwargs)
def yahoo(location, **kwargs):
"""Yahoo Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='yahoo', **kwargs)
def geolytica(location, **kwargs):
"""Geolytica (Geocoder.ca) Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='geolytica', **kwargs)
def opencage(location, **kwargs):
"""Opencage Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from OpenCage.
"""
return get(location, provider='opencage', **kwargs)
def arcgis(location, **kwargs):
"""ArcGIS Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='arcgis', **kwargs)
def here(location, **kwargs):
"""HERE Provider
:param ``location``: Your search location you want geocoded.
:param ``app_code``: (optional) use your own Application Code from HERE.
:param ``app_id``: (optional) use your own Application ID from HERE.
:param ``maxRows``: (default=1) Max number of results to fetch
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='here', **kwargs)
def nokia(location, **kwargs):
"""HERE Provider
:param ``location``: Your search location you want geocoded.
:param ``app_code``: (optional) use your own Application Code from HERE.
:param ``app_id``: (optional) use your own Application ID from HERE.
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='here', **kwargs)
def tomtom(location, **kwargs):
"""TomTom Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from TomTom.
:param ``maxRows``: (default=1) Max number of results to fetch
"""
return get(location, provider='tomtom', **kwargs)
def mapquest(location, **kwargs):
"""MapQuest Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from MapQuest.
:param ``maxRows``: (default=1) Max number of results to fetch
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='mapquest', **kwargs)
def osm(location, **kwargs):
"""OSM Provider
:param ``location``: Your search location you want geocoded.
:param ``url``: Custom OSM Server URL location
(ex: http://nominatim.openstreetmap.org/search)
"""
return get(location, provider='osm', **kwargs)
def maxmind(location='me', **kwargs):
"""MaxMind Provider
:param ``location``: Your search IP Address you want geocoded.
:param ``location``: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='maxmind', **kwargs)
def ipinfo(location='', **kwargs):
"""IP Info.io Provider
:param ``location``: Your search IP Address you want geocoded.
:param ``location``: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='ipinfo', **kwargs)
def freegeoip(location, **kwargs):
"""FreeGeoIP Provider
:param ``location``: Your search IP Address you want geocoded.
:param ``location``: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='freegeoip', **kwargs)
def ip(location, **kwargs):
"""IP Address lookup
:param ``location``: Your search IP Address you want geocoded.
:param ``location``: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='ipinfo', **kwargs)
def canadapost(location, **kwargs):
"""CanadaPost Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) API Key from CanadaPost Address Complete.
:param ``language``: (default=en) Output language preference.
:param ``country``: (default=ca) Geofenced query by country.
:param ``maxRows``: (default=1) Max number of results to fetch
"""
return get(location, provider='canadapost', **kwargs)
def postal(location, **kwargs):
"""CanadaPost Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from
CanadaPost Address Complete.
"""
return get(location, provider='canadapost', **kwargs)
def geonames(location, **kwargs):
"""GeoNames Provider
:param ``location``: Your search location you want geocoded.
:param ``geonameid``: The place you want children / hierarchy for.
:param ``key``: (required) geonames *username*: needs to be passed with each request.
:param ``maxRows``: (default=1) Max number of results to fetch
:param ``proximity``: Search within given area (bbox, bounds, or around latlng)
:param ``method``: (default=geocode) Use the following:
> geocode
> details (mainly for administrive data and timezone)
> timezone (alias for details)
> children
> hierarchy
"""
return get(location, provider='geonames', **kwargs)
def mapzen(location, **kwargs):
"""Mapzen Provider
:param ``location``: Your search location you want geocoded.
:param ``maxRows``: (default=1) Max number of results to fetch
"""
return get(location, provider='mapzen', **kwargs)
def tamu(location, **kwargs):
"""TAMU Provider
Params
------
:param ``location``: The street address of the location you want geocoded.
:param ``city``: The city of the location to geocode.
:param ``state``: The state of the location to geocode.
:param ``zipcode``: The zipcode of the location to geocode.
:param ``key``: The API key (use API key "demo" for testing).
API Reference
-------------
https://geoservices.tamu.edu/Services/Geocode/WebService
"""
return get(location, provider='tamu', **kwargs)
def geocodefarm(location, **kwargs):
"""GeocodeFarm Provider
Params
------
:param ``location``: The string to search for. Usually a street address.
:param ``key``: (optional) API Key. Only Required for Paid Users.
:param ``lang``: (optional) 2 digit language code to return results in. Currently only "en"(English) or "de"(German) supported.
:param ``country``: (optional) The country to return results in. Used for biasing purposes and may not fully filter results to this specific country.
:param ``maxRows``: (default=1) Max number of results to fetch
API Reference
-------------
https://geocode.farm/geocoding/free-api-documentation/
"""
return get(location, provider='geocodefarm', **kwargs)
def tgos(location, **kwargs):
"""TGOS Provider
:param ``location``: Your search location you want geocoded.
:param ``language``: (default=taiwan) Use the following:
> taiwan
> english
> chinese
:param ``method``: (default=geocode) Use the following:
> geocode
API Reference
-------------
http://api.tgos.nat.gov.tw/TGOS_MAP_API/Web/Default.aspx
"""
return get(location, provider='tgos', **kwargs)
def uscensus(location, **kwargs):
"""US Census Provider
Params
------
:param ``location``: Your search location(s) you want geocoded.
:param ``benchmark``: (default=4) Use the following:
> Public_AR_Current or 4
> Public_AR_ACSYYYY or 8
> Public_AR_Census2010 or 9
:param ``vintage``: (default=4, not available with batch method) Use the following:
> Current_Current or 4
> Census2010_Current or 410
> ACS2013_Current or 413
> ACS2014_Current or 414
> ACS2015_Current or 415
> Current_ACS2015 or 8
> Census2010_ACS2015 or 810
> ACS2013_ACS2015 or 813
> ACS2014_ACS2015 or 814
> ACS2015_ACS2015 or 815
> Census2010_Census2010 or 910
> Census2000_Census2010 or 900
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
> batch
API Reference
-------------
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
"""
return get(location, provider='uscensus', **kwargs)
def locationiq(location, **kwargs):
"""LocationIQ Provider
Params
------
:param ``location``: Your search location you want geocoded.
:param ``method``: (default=geocode) Use the following:
> geocode
> reverse
API Reference
-------------
https://locationiq.org/
"""
return get(location, provider='locationiq', **kwargs)
def gisgraphy(location, **kwargs):
"""Gisgraphy Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='gisgraphy', **kwargs)
| DenisCarriere/geocoder | geocoder/api.py | Python | mit | 20,334 |
from dockyard.var import GLOBAL
from dockyard.utils.mongo import Mongo
class Log(Mongo):
"""
_id
level
msg
origin
"""
LEVEL = "level"
MSG = "msg"
ORIGIN = "origin"
def _warn(self, msgs, origin):
self.__save(msgs, GLOBAL.LOG_WARN, origin)
def _fatal(self, msgs, origin):
self.__save(msgs, GLOBAL.LOG_FATAL, origin)
def _info(self, msgs, origin):
self.__save(msgs, GLOBAL.LOG_INFO, origin)
def _success(self, msgs, origin):
self.__save(msgs, GLOBAL.LOG_SUCCESS, origin)
def _error(self, msgs, origin):
self.__save(msgs, GLOBAL.LOG_ERROR, origin)
def __save(self, msgs, level, origin):
if not isinstance(msgs, list):
msgs = [msgs]
for msg in msgs:
data = {self.LEVEL: level,
self.MSG: msg,
self.ORIGIN: origin or GLOBAL.SYS_ORIGIN}
self.append(data)
self.flush() | galileo-project/Galileo-dockyard | server/dockyard/driver/log/_model/__init__.py | Python | mit | 985 |
#!/usr/bin/python
#####################################################################
# This script tests performance in frames per second.
# Change iters, resolution, window visibility, use get_ state or not.
# It should give you some idea how fast the framework can work on
# your hardware. The test involes copying the state to make it more
# simillar to any reasonable usage. Comment the line with get_state
# to exclude copying process.
#####################################################################
from __future__ import print_function
from vizdoom import *
from random import choice
from vizdoom import ScreenResolution as res
from time import time
# Some options:
resolution =res.RES_320X240
screen_format = ScreenFormat.DEPTH_BUFFER8
iterations = 10000
game = DoomGame()
game.load_config("../config/basic.cfg")
game.set_screen_resolution(resolution)
game.set_screen_format(screen_format)
game.set_window_visible(False)
game.init()
actions = [[True,False,False],[False,True,False],[False,False,True]]
left = actions[0]
right = actions[1]
shoot = actions[2]
idle = [False,False,False]
iterations = 10000
start = time()
print("Checking FPS rating. It may take some time. Be patient.")
for i in range(iterations):
if game.is_episode_finished():
game.new_episode()
# Copying happens here
s = game.get_state()
game.make_action(choice(actions))
end=time()
t = end-start
print("Results:")
print("Iterations:", iterations)
print("Resolution:", resolution)
print("time:",round(t,3))
print("fps: ",round(iterations/t,2))
game.close()
| gdb/doom-py | doom_py/examples/python/fps.py | Python | mit | 1,577 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import periodic_task
from cinder import quota
from cinder import utils
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from eventlet import greenpool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = ('available', 'in-use',)
VALID_CREATE_CG_SRC_SNAP_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMISCSIDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
default='none',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSISCSIDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver',
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSFCDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver', }
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.23'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self._tp = greenpool.GreenPool()
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty)
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s" %
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception as err:
LOG.error(_LE('Fetch volume pool name failed.'),
resource=volume)
LOG.exception(err)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
LOG.error(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
LOG.exception(export_ex)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %s(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
pass
snapshots = self.db.snapshot_get_by_host(ctxt,
self.host,
{'status': 'creating'})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
self.db.snapshot_update(ctxt,
snapshot['id'],
{'status': 'error'})
except Exception as ex:
LOG.error(_LE("Error during re-export on driver init."),
resource=volume)
LOG.exception(ex)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'])
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
# conditionally run replication status task
stats = self.driver.get_volume_stats(refresh=True)
if stats and stats.get('replication', False):
@periodic_task.periodic_task
def run_replication_task(self, ctxt):
self._update_replication_relationship_status(ctxt)
self.add_periodic_task(run_replication_task)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None,
source_replicaid=None, consistencygroup_id=None,
cgsnapshot_id=None):
"""Creates the volume."""
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume_id,
allow_reschedule,
context,
request_spec,
filter_properties,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid,
source_replicaid=source_replicaid,
consistencygroup_id=consistencygroup_id,
cgsnapshot_id=cgsnapshot_id)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception((msg),
resource={'type': 'volume',
'id': volume_id})
raise exception.CinderException(msg)
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
except Exception as e:
if hasattr(e, 'rescheduled'):
rescheduled = e.rescheduled
raise
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound as e:
# Flow was reverted, fetching volume_ref from the DB.
vol_ref = self.db.volume_get(context, volume_id)
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref['id']
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration source volume
If deleting the source volume in a migration, we want to skip
quotas. Also we want to skip other database updates for source
volume because these update will be handled at
migrate_volume_completion properly.
3. Delete a migration destination volume
If deleting the destination volume in a migration, we want to
skip quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume_ref = self.db.volume_get(context, volume_id)
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s",
volume_id)
return True
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if (vol_utils.extract_host(volume_ref['host']) != self.host):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
is_migrating = volume_ref['migration_status'] is not None
is_migrating_dest = (is_migrating and
volume_ref['migration_status'].startswith(
'target:'))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume_ref)
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume_ref)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'available')
return True
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume_ref)
# If deleting the source volume in a migration, we should skip database
# update here. In other cases, continue to update database entries.
if not is_migrating or is_migrating_dest:
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_ref['host'], 'pool', True)
size = volume_ref['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume_ref)
return True
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
self.db.volume_destroy(context, volume_ref['id'])
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
self.db.volume_update(context,
volume_ref['id'],
{'status': status})
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save(context)
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error'
snapshot.save(context)
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = 'error'
snapshot.save(context)
raise exception.MetadataCopyFailure(reason=ex)
snapshot.status = 'available'
snapshot.progress = '100%'
snapshot.save(context)
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot):
"""Deletes and unexports snapshot."""
context = context.elevated()
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = 'available'
snapshot.save()
return True
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error_deleting'
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy(context)
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
msg = _("volume is already attached")
raise exception.InvalidVolume(reason=msg)
attachment = None
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachment = \
self.db.volume_attachment_get_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachment = \
self.db.volume_attachment_get_by_host(context, volume_id,
host_name_sanitized)
if attachment is not None:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
if volume['migration_status']:
self.db.volume_update(context, volume_id,
{'migration_status': None})
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.error(_LE("Find attachment in detach_volume failed."),
resource=volume)
raise
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_used_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
msg = _("Detach volume failed, because there are currently no "
"active attachments.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id, reason=ex)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def _driver_data_namespace(self):
return self.driver.configuration.safe_get('driver_data_namespace') \
or self.driver.configuration.safe_get('volume_backend_name') \
or self.driver.__class__.__name__
def _get_driver_initiator_data(self, context, connector):
data = None
initiator = connector.get('initiator', False)
if initiator:
namespace = self._driver_data_namespace()
try:
data = self.db.driver_initiator_data_get(
context,
initiator,
namespace
)
except exception.CinderException:
LOG.exception(_LE("Failed to get driver initiator data for"
" initiator %(initiator)s and namespace"
" %(namespace)s"),
{'initiator': initiator,
'namespace': namespace})
raise
return data
def _save_driver_initiator_data(self, context, connector, model_update):
if connector.get('initiator', False) and model_update:
namespace = self._driver_data_namespace()
try:
self.db.driver_initiator_data_update(context,
connector['initiator'],
namespace,
model_update)
except exception.CinderException:
LOG.exception(_LE("Failed to update initiator data for"
" initiator %(initiator)s and backend"
" %(backend)s"),
{'initiator': connector['initiator'],
'backend': namespace})
raise
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=err)
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err))."), {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=ex)
initiator_data = self._get_driver_initiator_data(context, connector)
try:
if initiator_data:
conn_info = self.driver.initialize_connection(volume,
connector,
initiator_data)
else:
conn_info = self.driver.initialize_connection(volume,
connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s)."), {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
initiator_update = conn_info.get('initiator_update', None)
if initiator_update:
self._save_driver_initiator_data(context, connector,
initiator_update)
del conn_info['initiator_update']
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
new_vol_values = {}
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'creating'
# FIXME(jdg): using a : delimeter is confusing to
# me below here. We're adding a string member to a dict
# using a :, which is kind of a poor choice in this case
# I think
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_vol_values['volume_attachment'] = []
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'available':
tries += 1
now = time.time()
if new_volume['status'] == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume = self.db.volume_get(ctxt, new_volume['id'])
# Copy the source volume to the destination volume
try:
attachments = volume['volume_attachment']
if not attachments:
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'],
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume['id'],
new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg, {'vol1': volume['id'],
'vol2': new_volume['id']})
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'])
def _get_original_status(self, volume):
attachments = volume['volume_attachment']
if not attachments:
return 'available'
else:
return 'in-use'
def _clean_temporary_volume(self, ctxt, volume_id, new_volume_id,
clean_db_only=False):
volume = self.db.volume_get(ctxt, volume_id)
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume['migration_status'] == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
self.db.volume_destroy(ctxt, new_volume_id)
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi.delete_volume(ctxt, volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume_id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
updates = {'migration_status': None}
self.db.volume_update(ctxt, new_volume_id, updates)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume_id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume_id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
msg = _("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s")
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = self._get_original_status(volume)
if error:
msg = _("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s")
LOG.info(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None, 'status': orig_volume_status}
self.db.volume_update(ctxt, volume_id, updates)
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
self.detach_volume(ctxt, volume_id, attachment['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _LE("Delete migration source volume failed: %(err)s")
LOG.error(msg, {'err': six.text_type(ex)}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume
rpcapi.update_migrated_volume(ctxt,
volume,
new_volume)
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
if orig_volume_status == 'in-use':
updates = {'migration_status': 'completing',
'status': orig_volume_status}
else:
updates = {'migration_status': None}
self.db.volume_update(ctxt, volume_id, updates)
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume['id']
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
status_update = None
if volume_ref['status'] == 'retyping':
status_update = {'status': self._get_original_status(volume_ref)}
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume_ref)
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
host)
if moved:
updates = {'host': host['host'],
'migration_status': None}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
volume_ref['id'],
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
if not moved:
try:
self._migrate_volume_generic(ctxt, volume_ref, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume_ref)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def notification(self, context, event):
LOG.info(_LI("Notification {%s} received"), event)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group['id'])
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot['id'])
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
volume = self.db.volume_get(context, volume_id)
project_id = volume['project_id']
size_increase = (int(new_size)) - volume['size']
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume_id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume = self.db.volume_update(context,
volume['id'],
{'size': int(new_size),
'status': 'available'})
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None):
def _retype_error(context, volume_id, old_reservations,
new_reservations, status_update):
try:
self.db.volume_update(context, volume_id, status_update)
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
volume_ref = self.db.volume_get(ctxt, volume_id)
status_update = {'status': self._get_original_status(volume_ref)}
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
self.db.volume_update(context, volume_id, status_update)
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
self.db.volume_update(context, volume_id, status_update)
LOG.exception(_LE("Failed to update usages "
"while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume_ref.get('volume_type_id'), new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
if not retyped:
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume_ref,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
retyped = False
LOG.error(_LE("Volume %s: driver error when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = self.db.snapshot_get_all_for_volume(context,
volume_ref['id'])
if snaps:
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume_ref['replication_status']
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.db.volume_update(context, volume_ref['id'],
{'migration_status': 'starting'})
try:
self.migrate_volume(context, volume_id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self.db.volume_update(context, volume_id, model_update)
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume_ref)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
LOG.exception(_LE("Failed to create manage_existing flow."),
resource={'type': 'volume',
'id': volume_id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group_id):
"""Creates the consistency group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
group_ref['host'] = self.host
status = 'available'
model_update = False
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group_ref['name'])
model_update = self.driver.create_consistencygroup(context,
group_ref)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_ref['id'], model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error'})
LOG.error(_LE("Consistency group %s: create failed"),
group_ref['name'])
now = timeutils.utcnow()
self.db.consistencygroup_update(context,
group_ref['id'],
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %s: created successfully"),
group_ref['name'])
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group_ref['id']})
return group_ref['id']
def create_consistencygroup_from_src(self, context, group_id,
cgsnapshot_id=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
"""
group_ref = self.db.consistencygroup_get(context, group_id)
try:
volumes = self.db.volume_get_all_by_group(
context, group_id)
cgsnapshot = None
snapshots = None
if cgsnapshot_id:
try:
cgsnapshot = self.db.cgsnapshot_get(context, cgsnapshot_id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot_id},
resource={'type': 'consistency_group',
'id': group_ref['id']})
raise
if cgsnapshot:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
for snap in snapshots:
if (snap['status'] not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group_id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group_ref, volumes, cgsnapshot,
sorted_snapshots))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_id, model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_id,
{'status': 'error'})
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed."),
{'snap': cgsnapshot_id},
resource={'type': 'consistency_group',
'id': group_ref['id']})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update,
group_id=group_id)
self._update_allocated_capacity(vol)
self.db.consistencygroup_update(context,
group_id,
{'status': status,
'created_at': now})
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
LOG.info(_LI("Create consistency group "
"from snapshot-%(snap)s completed successfully."),
{'snap': cgsnapshot_id},
resource={'type': 'consistency_group',
'id': group_ref['id']})
return group_ref['id']
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = filter(
lambda snap: snap['id'] == vol['snapshot_id'], snapshots)
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _update_volume_from_src(self, context, vol, update, group_id=None):
try:
snapshot_ref = self.db.snapshot_get(context,
vol['snapshot_id'])
orig_vref = self.db.volume_get(context,
snapshot_ref['volume_id'])
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], vol['snapshot_id'])
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot_ref['volume_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.") %
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group_id):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
project_id = group_ref['project_id']
if context.project_id != group_ref['project_id']:
project_id = group_ref['project_id']
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group_id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
model_update, volumes = self.driver.delete_consistencygroup(
context, group_ref)
if volumes:
for volume in volumes:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.exception(msg,
resource={'type': 'consistency_group',
'id': group_ref['id']})
raise exception.VolumeDriverException(message=msg)
else:
self.db.consistencygroup_update(context, group_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error_deleting'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group_id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group_id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
self.db.consistencygroup_destroy(context, group_id)
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group_id})
return True
def update_consistencygroup(self, context, group_id,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
group = self.db.consistencygroup_get(context, group_id)
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group_id})
raise
if add_vol_ref['status'] not in ['in-use', 'available']:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group_id,
'status': add_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group_id})
raise
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in ['error']:
msg = (_('Error occurred when updating consistency group '
'%s.') % group_id)
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
self.db.consistencygroup_update(context, group_id,
model_update)
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group_id})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group['id']})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
self.db.consistencygroup_update(context, group_id,
{'status': 'available',
'updated_at': now})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group_id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group_id})
return True
def create_cgsnapshot(self, context, group_id, cgsnapshot_id):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.create_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
# Update db if status is error
if snapshot['status'] == 'error':
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
# If status for one snapshot is error, make sure
# the status for the cgsnapshot is also error
if model_update['status'] != 'error':
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error'})
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot['id'], volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
snapshot['id'],
{'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',
'progress': '100%'})
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'available'})
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.end")
return cgsnapshot_id
def delete_cgsnapshot(self, context, cgsnapshot_id):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
project_id = cgsnapshot_ref['project_id']
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.delete_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
if snapshot['status'] in ['error_deleting', 'error'] and \
model_update['status'] not in \
['error_deleting', 'error']:
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.cgsnapshot_update(context, cgsnapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error_deleting'})
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
LOG.info(_LI("cgsnapshot %s: deleted successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.end", snapshots)
return True
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Finalize migration process on backend device."""
model_update = None
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume)
if model_update:
self.db.volume_update(ctxt.elevated(),
volume['id'],
model_update)
| rakeshmi/cinder | cinder/volume/manager.py | Python | apache-2.0 | 114,688 |
import xml.etree.cElementTree
from os import environ, unlink, symlink, path
from Tools.Directories import SCOPE_SKIN, resolveFilename
import time
from Tools.StbHardware import setRTCoffset
class Timezones:
def __init__(self):
self.timezones = []
self.readTimezonesFromFile()
def readTimezonesFromFile(self):
try:
root = xml.etree.cElementTree.parse(resolveFilename(SCOPE_SKIN, 'timezone.xml')).getroot()
for zone in root.findall("zone"):
self.timezones.append((zone.get('name',""), zone.get('zone',"")))
except:
pass
if len(self.timezones) == 0:
self.timezones = [("UTC", "UTC")]
def activateTimezone(self, index):
if len(self.timezones) <= index:
return
environ['TZ'] = self.timezones[index][1]
try:
unlink("/etc/localtime")
except OSError:
pass
try:
symlink("/usr/share/zoneinfo/%s" %(self.timezones[index][1]), "/etc/localtime")
except OSError:
pass
try:
time.tzset()
except:
from enigma import e_tzset
e_tzset()
if path.exists("/proc/stb/fp/rtc_offset"):
setRTCoffset()
def getTimezoneList(self):
return [ str(x[0]) for x in self.timezones ]
def getDefaultTimezone(self):
# TODO return something more useful - depending on country-settings?
t = "(GMT+01:00) Amsterdam, Berlin, Bern, Rome, Vienna"
for (a,b) in self.timezones:
if a == t:
return a
return self.timezones[0][0]
timezones = Timezones()
| OpenSPA/dvbapp | lib/python/Components/Timezones.py | Python | gpl-2.0 | 1,411 |
__author__ = 'nmaurer'
import os.path
current_dir = os.path.dirname(os.path.abspath(__file__))
import cherrypy
| ybonjour/nuus | web/__init__.py | Python | mit | 113 |
from ..base import Event
class OpenTabEvent(Event):
def __init__(self, tab):
self.__tab = tab
@property
def tab(self):
return self.__tab
| umlfri/umlfri2 | umlfri2/application/events/tabs/open.py | Python | gpl-3.0 | 172 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Interproscan(Package):
"""InterProScan is the software package that allows sequences
(protein and nucleic) to be scanned against InterPro's signatures.
Signatures are predictive models, provided by several different
databases, that make up the InterPro consortium."""
homepage = "https://www.ebi.ac.uk/interpro/interproscan.html"
url = "https://github.com/ebi-pf-team/interproscan/archive/5.36-75.0.tar.gz"
version('5.38-76.0', sha256='cb191ff8eee275689b789167a57b368ea5c06bbcd36b4de23e8bbbbdc0fc7434')
version('5.36-75.0', sha256='383d7431e47c985056c856ceb6d4dcf7ed2559a4a3d5c210c01ce3975875addb')
version('4.8',
sha256='f1cb0ae1218eb05ed59ad7f94883f474eb9a6185a56ad3a93a364acb73506a3f',
url='ftp://ftp.ebi.ac.uk/pub/software/unix/iprscan/4/RELEASE/4.8/iprscan_v4.8.tar.gz')
resource(
when='@:4.8',
name='binaries',
url="http://ftp.ebi.ac.uk/pub/databases/interpro/iprscan/BIN/4.x/iprscan_bin4.x_Linux64.tar.gz",
sha256='551610a4682b112522f3ded5268f76ba9a47399a72e726fafb17cc938a50e7ee',
)
depends_on('java@8.0:8.9', type=('build', 'run'), when='@5:5.36-99.0')
depends_on('java@11.0:', type=('build', 'run'), when='@5.37-76.0:')
depends_on('maven', type='build', when='@5:')
depends_on('perl@5:', type=('build', 'run'))
depends_on('python@3:', when='@5:', type=('build', 'run'))
depends_on('perl-cgi', when='@:4.8', type=('build', 'run'))
depends_on('perl-mailtools', when='@:4.8', type=('build', 'run'))
depends_on('perl-xml-quote', when='@:4.8', type=('build', 'run'))
depends_on('perl-xml-parser', when='@:4.8', type=('build', 'run'))
depends_on('perl-io-string', when='@:4.8', type=('build', 'run'))
depends_on('perl-io-stringy', when='@:4.8', type=('build', 'run'))
patch('large-gid.patch', when='@5:')
patch('non-interactive.patch', when='@:4.8')
patch('ps_scan.patch', when='@:4.8')
def install(self, spec, prefix):
with working_dir('core'):
which('mvn')('clean', 'install')
install_tree('.', prefix)
# link the main shell script into the PATH
ips_bin_suffix = 'core/jms-implementation/target/interproscan-5-dist'
symlink(join_path(prefix, ips_bin_suffix), prefix.bin)
@when('@:4.8')
def install(self, spec, prefix):
perl = which('perl')
src = join_path(self.stage.source_path, 'iprscan', 'bin', 'Linux')
dst = join_path(self.stage.source_path, 'bin', 'binaries')
force_symlink(src, dst)
install_tree('.', prefix)
with working_dir(prefix):
perl('Config.pl')
| rspavel/spack | var/spack/repos/builtin/packages/interproscan/package.py | Python | lgpl-2.1 | 2,898 |
#----------------------------------------------------------------------
# Copyright (c) 2011-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
| ahelsing/geni-ch | plugins/pgch/__init__.py | Python | mit | 1,217 |
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.Nova.Windows.SNMPPerfMonitorSimple"
VERSION = "1.6"
AUTHOR = "Ryan Matte"
LICENSE = ""
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.Nova', 'ZenPacks.Nova.Windows']
PACKAGES = ['ZenPacks', 'ZenPacks.Nova', 'ZenPacks.Nova.Windows', 'ZenPacks.Nova.Windows.SNMPPerfMonitorSimple']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ">=2.3"
PREV_ZENPACK_NAME = "ZenPacks.Nova.Windows.SNMPPerfMonitor.Simple"
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
package_data = {
'':['../COPYRIGHT.txt','../LICENSE.txt']
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| anksp21/Community-Zenpacks | ZenPacks.Nova.Windows.SNMPPerfMonitorSimple/setup.py | Python | gpl-2.0 | 2,765 |
# code for extracting the feature matrix of second last or any layer on a pretrained deep neural network on keras
#For installation follow 2 websites :
# 1. http://www.pyimagesearch.com/2016/08/10/imagenet-classification-with-python-and-keras/
# 2. https://github.com/fchollet/deep-learning-models (code comes from here)
# Comment : takes around 1 min to output the featutes of the last layer so can't process the images of house in real time
# the output matrix is long (1,512,7,7). The outer dimension "1" is dependent on the number of images you want to classify or want features for
from vgg19 import VGG19
from keras.preprocessing import image
from imagenet_utils import preprocess_input
from keras.models import Model
import numpy as np
from imagenet_utils import preprocess_input, decode_predictions
import sys
import os
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('block5_pool').output)
#model = Model(weights='imagenet')
path="../src/python/GUI/image_retrieval/images/"
print os.listdir(path)
for img_path in os.listdir(path):
print path+img_path
img = image.load_img(path+img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
block5_pool_features = model.predict(x)
#print block5_pool_features
tag=img_path.split(".")[0]
outfile="../src/python/GUI/image_retrieval/features/"+tag+".npy"
np.save(outfile,block5_pool_features)
| LinFelix/FlatHack | deep-learning-models/img_classify.py | Python | lgpl-3.0 | 1,481 |
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class NxloadResolver(UrlResolver):
name = "nxload"
domains = ["nxload.com"]
pattern = '(?://|\.)(nxload\.com)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.RAND_UA}
html = self.net.http_GET(web_url, headers=headers).content
if html:
match = re.search('''['"]?sources['"]?\s*:\s*\[(.*?)\]''', html, re.DOTALL)
if match:
sources = [(source.rsplit('/', 1).pop(1), source) for source in
re.findall('''['"](.*?)["']''', match.group(1), re.DOTALL)]
return helpers.pick_source(sources) + helpers.append_headers(headers)
raise ResolverError("Video not found")
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| koditr/xbmc-tr-team-turkish-addons | script.module.urlresolver/lib/urlresolver/plugins/nxload.py | Python | gpl-2.0 | 1,879 |
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# Importing Necessary System Packages
import sys, os, math
import numpy as np
import time as tp
from optparse import OptionParser
import glob
# Importing cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
from tycho import util
# Import the Amuse Gravity & Close-Encounter Packages
from amuse.community.smalln.interface import SmallN
from amuse.community.kepler.interface import Kepler
# Import the Tycho Packages
from tycho import create, util, read, write, stellar_systems
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def CutOrAdvance(enc_bodies, primary_sysID, converter=None, **kwargs):
bodies = enc_bodies.copy()
KeplerWorkerList = kwargs.get("kepler_workers", None)
# Initialize Kepler Workers if they Don't Exist
if KeplerWorkerList == None:
if converter == None:
converter = nbody_system.nbody_to_si(bodies.mass.sum(), 2 * np.max(bodies.radius.number) | bodies.radius.unit)
KeplerWorkerList = []
for i in range(3):
KeplerWorkerList.append(Kepler(unit_converter = converter, redirection = 'none'))
KeplerWorkerList[-1].initialize_code()
systems = stellar_systems.get_heirarchical_systems_from_set(bodies, \
kepler_workers=KeplerWorkerList[:2], \
RelativePosition=False)
# Deal with Possible Key Issues with Encounters with 3+ Star Particles Being Run More than Other Systems ...
if int(primary_sysID) not in list(systems.keys()):
print("...: Error: Previously run binary system has been found! Not running this system ...")
print(primary_sysID)
print(list(systems.keys()))
print("---------------------------------")
return None
# As this function is pulling from Multiples, there should never be more or less than 2 "Root" Particles ...
if len(systems) != 2:
print("...: Error: Encounter has more roots than expected! Total Root Particles:", len(systems))
print(bodies)
print("---------------------------------")
return None
# Assign the Primary System to #1 and Perturbing System to #2
sys_1 = systems[int(primary_sysID)]
secondary_sysID = [key for key in list(systems.keys()) if key!=int(primary_sysID)][0]
sys_2 = systems[secondary_sysID]
print('All System Keys:', list(systems.keys()))
print('Primary System Key:', primary_sysID)
print('System 1 IDs:', sys_1.id)
print('System 2 IDs:', sys_2.id)
# Calculate Useful Quantities
mass_ratio = sys_2.mass.sum()/sys_1.mass.sum()
total_mass = sys_1.mass.sum() + sys_2.mass.sum()
rel_pos = sys_1.center_of_mass() - sys_2.center_of_mass()
rel_vel = sys_1.center_of_mass_velocity() - sys_2.center_of_mass_velocity()
# Initialize Kepler Worker
kep = KeplerWorkerList[-1]
kep.initialize_from_dyn(total_mass, rel_pos[0], rel_pos[1], rel_pos[2], rel_vel[0], rel_vel[1], rel_vel[2])
# Check to See if the Periastron is within the Ignore Distance for 10^3 Perturbation
p = kep.get_periastron()
ignore_distance = mass_ratio**(1./3.) * 600 | units.AU
if p > ignore_distance:
print("Encounter Ignored due to Periastron of", p.in_(units.AU), "and an IgnoreDistance of",ignore_distance)
if KeplerWorkerList == None:
for K in KeplerWorkerList:
K.stop()
print("---------------------------------")
return None
# Move the Particles to be Relative to their Respective Center of Mass
cm_sys_1, cm_sys_2 = sys_1.center_of_mass(), sys_2.center_of_mass()
cmv_sys_1, cmv_sys_2 = sys_1.center_of_mass_velocity(), sys_2.center_of_mass_velocity()
for particle in sys_1:
particle.position -= cm_sys_1
particle.velocity -= cmv_sys_1
for particle in sys_2:
particle.position -= cm_sys_2
particle.velocity -= cmv_sys_2
# Check to See if the Planets are Closer than the Ignore Distance
# Note: This shouldn't happen in the main code, but this prevents overshooting the periastron in debug mode.
if kep.get_separation() > ignore_distance:
kep.advance_to_radius(ignore_distance)
# Advance the Center of Masses to the Desired Distance in Reduced Mass Coordinates
x, y, z = kep.get_separation_vector()
rel_pos_f = rel_pos.copy()
rel_pos_f[0], rel_pos_f[1], rel_pos_f[2] = x, y, z
vx, vy, vz = kep.get_velocity_vector()
rel_vel_f = rel_vel.copy()
rel_vel_f[0], rel_vel_f[1], rel_vel_f[2] = vx, vy, vz
# Transform to Absolute Coordinates from Kepler Reduced Mass Coordinates
cm_pos_1, cm_pos_2 = sys_2.mass.sum() * rel_pos_f / total_mass, -sys_1.mass.sum() * rel_pos_f / total_mass
cm_vel_1, cm_vel_2 = sys_2.mass.sum() * rel_vel_f / total_mass, -sys_1.mass.sum() * rel_vel_f / total_mass
# Move the Particles to the New Postions of their Respective Center of Mass
for particle in sys_1:
particle.position += cm_pos_1
particle.velocity += cm_vel_1
for particle in sys_2:
particle.position += cm_pos_2
particle.velocity += cm_vel_2
# If not provided, stop Kepler and return the Systems as a Particle Set
if KeplerWorkerList == None:
for K in KeplerWorkerList:
K.stop()
# Collect the Collective Particle Set to be Returned Back
final_set = Particles()
final_set.add_particles(sys_1)
final_set.add_particles(sys_2)
print("---------------------------------")
return final_set
# ------------------------------------- #
# Main Production Script #
# ------------------------------------- #
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--cluster-name", dest="cluster_name", default=None, type="str",
help="Enter the name of the cluster with suffixes.")
(options, args) = parser.parse_args()
if options.cluster_name != None:
cluster_name = options.cluster_name
else:
directory = os.getcwd()
cluster_name = directory.split("/")[-1]
base_planet_ID = 50000
orig_stdout = sys.stdout
log_file = open(os.getcwd()+"/cut_encounters.log","w")
sys.stdout = log_file
# Create the Kepler Workers
KeplerWorkerList = []
converter = nbody_system.nbody_to_si(1 | units.MSun, 100 |units.AU)
for i in range(3):
KeplerWorkerList.append(Kepler(unit_converter = converter, redirection = 'none'))
KeplerWorkerList[-1].initialize_code()
# Read in Encounter Directory
encounter_file = open(os.getcwd()+"/"+cluster_name+"_encounters.pkl", "rb")
encounter_db = pickle.load(encounter_file)
encounter_file.close()
sys.stdout.flush()
print(util.timestamp(), "Performing First Cut on Encounter Database ...")
print(len(encounter_db.keys()))
sys.stdout.flush()
# Perform a Cut on the Encounter Database
for star_ID in list(encounter_db.keys()):
# Cut Out Stars Recorded with Only Initialization Pickups
if len(encounter_db[star_ID]) == 0:
del encounter_db[star_ID]
if len(encounter_db[star_ID]) == 1:
# Check to Ensure it is an Actual Multiples Initialization (AKA: 1 System)
temp = stellar_systems.get_heirarchical_systems_from_set(encounter_db[star_ID][0], kepler_workers=KeplerWorkerList[:2])
print(temp)
if len(temp.keys()) <= 1:
print(encounter_db[star_ID][0].id)
del encounter_db[star_ID]
print("After Removal of Just Initializations", len(encounter_db.keys()))
for star_ID in list(encounter_db.keys()):
# Cut Out Stars with No Planets
enc_id_to_cut = []
for enc_id, encounter in enumerate(encounter_db[star_ID]):
# Refine "No Planet" Cut to Deal with Hierarchical Stellar Systems
# We are Looping Through Encounters to Deal with Rogue Jupiter Captures
print(star_ID, encounter.id)
sys.stdout.flush()
if len([ID for ID in encounter.id if ID >= base_planet_ID]) == 0:
enc_id_to_cut.append(enc_id)
elif len([ID for ID in encounter.id if ID >= base_planet_ID]) > 0:
if len([ID for ID in encounter.id if ID <= base_planet_ID]) == 1:
enc_id_to_cut.append(enc_id)
for enc_id in sorted(enc_id_to_cut, reverse=True):
del encounter_db[star_ID][enc_id]
print("After no planet encounters are removed", len(encounter_db.keys()))
sys.stdout.flush()
print(util.timestamp(), "Performing Second Cut on Encounter Database ...")
sys.stdout.flush()
star_id_to_cut = []
for star_ID in list(encounter_db.keys()):
if len(encounter_db[star_ID]) == 0:
star_id_to_cut.append(star_ID)
print(len(star_id_to_cut))
for star_ID in sorted(star_id_to_cut, reverse=True):
del encounter_db[star_ID]
# Perform Cut & Advancement on Systems to Lower Integration Time
for star_ID in list(encounter_db.keys()):
enc_id_to_cut = []
for enc_id, encounter in enumerate(encounter_db[star_ID]):
PeriastronCut = CutOrAdvance(encounter, star_ID, kepler_workers=KeplerWorkerList)
if PeriastronCut != None:
encounter_db[star_ID][enc_id] = PeriastronCut
elif PeriastronCut == None:
enc_id_to_cut.append(enc_id)
for enc_id in sorted(enc_id_to_cut, reverse=True):
del encounter_db[star_ID][enc_id]
star_id_to_cut = []
for star_ID in list(encounter_db.keys()):
if len(encounter_db[star_ID]) == 0:
star_id_to_cut.append(star_ID)
print("Star IDs to Cut:", star_id_to_cut)
for star_ID in sorted(star_id_to_cut, reverse=True):
del encounter_db[star_ID]
print(encounter_db.keys())
encounter_cut_file = open(os.getcwd()+"/"+cluster_name+"_encounters_cut.pkl", "wb")
pickle.dump(encounter_db, encounter_cut_file)
encounter_cut_file.close()
sys.stdout = orig_stdout
log_file.close()
for K in KeplerWorkerList:
K.stop()
print("Finished cutting encounter database.")
| JPGlaser/Tycho | cut_encounters.py | Python | mit | 10,626 |
# -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
import dialogmgr
class Dialog:
def __init__(self):
pass
def dialog(self, srcEntityID, targetID, dialogID):
"""
exposed.
对一个目标entity施放一个技能
"""
if srcEntityID != self.id:
return
if not KBEngine.entities.has_key(targetID):
DEBUG_MSG("Dialog::dialog: %i not found targetID:%i" % (self.id, dialogID))
return
dialogmgr.onGossip(dialogID, self, KBEngine.entities[targetID])
Dialog._timermap = {}
| LaoZhongGu/kbengine | demo/res/scripts/cell/interfaces/Dialog.py | Python | lgpl-3.0 | 537 |
import os
import threading
import txaio
txaio.use_twisted()
from txaio import make_logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.wamp.types import PublishOptions
from autobahn.twisted.wamp import ApplicationSession
class MyPublisher(ApplicationSession):
log = make_logger()
def __init__(self, config):
self.ident = '{}:{}'.format(os.getpid(), threading.get_ident())
self.log.info('{klass}[{ident}].__init__(config={config})',
klass=self.__class__.__name__, ident=self.ident, config=str(config))
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info('{klass}[{ident}].onJoin(details={details})',
klass=self.__class__.__name__, ident=self.ident, details=details)
n = 2
running = True
last_error = None
while running and n <= 2**25:
data = os.urandom(n)
try:
res = yield self.publish('com.example.topic1', data,
options=PublishOptions(acknowledge=True, exclude_me=False))
except Exception as e:
self.log.failure()
running = False
last_error = e
else:
self.log.info('{klass}[{ident}].publish(): succeeded for n={n}, res={res}',
klass=self.__class__.__name__, ident=self.ident, n=n, res=res)
n = n * 2
yield sleep(1)
if last_error:
self.log.info('Encountered error at n={n}', n=n)
else:
self.log.info('Finished (without error) at n={n}', n=n)
yield sleep(1)
yield self.publish('com.example.topic1', os.urandom(16), options=PublishOptions(acknowledge=True))
self.log.info('Ok, session still working - leaving now ..')
yield self.leave()
| crossbario/crossbar-examples | containers/max_message_size/publisher.py | Python | apache-2.0 | 1,986 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from email import utils as email_utils
import re
from oslo_log import log as logging
import six
from six.moves.urllib import parse
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
EMAIL_HEADER_PATTERN = ('From \S+(?: at \S+)?\s+'
'\w{3}\s+\w{3}\s+\d{1,2}\s+\d{2}:\d{2}(?::\d{2})?'
'(?:\s+\S+)?\s+\d{4}.*?\n')
MAIL_BOX_PATTERN = re.compile(
'^' + EMAIL_HEADER_PATTERN +
'From: (?P<author_email>\S+(?: at \S+))'
'(?:\W+(?P<author_name>\w+(?:\s\w+)*))?.*?\n'
'Date: (?P<date>.*?)\n'
'Subject: (?P<subject>.*?)(?=\n\S+:)'
'.*?Message-ID: (?P<message_id>\S+)\n'
'\n(?P<body>.*?)\n'
'(?=' + EMAIL_HEADER_PATTERN + 'From: )',
flags=re.MULTILINE | re.DOTALL)
MESSAGE_PATTERNS = {
'bug_id': re.compile(r'https://bugs.launchpad.net/bugs/(?P<id>\d+)',
re.IGNORECASE),
'blueprint_id': re.compile(r'https://blueprints.launchpad.net/'
r'(?P<module>[^\/]+)/\+spec/(?P<id>[a-z0-9-]+)',
re.IGNORECASE),
}
TRAILING_RECORD = ('From ishakhat at mirantis.com Tue Sep 17 07:30:43 2013\n'
'From: ')
def _get_mail_archive_links(uri):
content = utils.read_uri(uri)
if not content:
LOG.warning('Mail archive list is not found at %s', uri)
return []
links = set(re.findall(r'\shref\s*=\s*[\'"]([^\'"]*\.txt\.gz)', content,
flags=re.IGNORECASE))
return [parse.urljoin(uri, link) for link in links]
def _uri_content_changed(uri, runtime_storage_inst):
LOG.debug('Check changes for mail archive located at: %s', uri)
last_modified = utils.get_uri_last_modified(uri)
if last_modified != runtime_storage_inst.get_by_key('mail_link:' + uri):
LOG.debug('Mail archive changed, last modified at: %s', last_modified)
runtime_storage_inst.set_by_key('mail_link:' + uri, last_modified)
return True
return False
def _optimize_body(email_body):
result = []
for line in email_body.split('\n'):
line = line.strip()
if line[:1] == '>' or line[:8] == '--------':
continue # ignore replies and part delimiters
if (not result) or (result and result[-1] != line):
result.append(line)
return '\n'.join(result)
def _retrieve_mails(uri):
LOG.debug('Retrieving mail archive from: %s', uri)
content = utils.read_gzip_from_uri(uri)
if not content:
LOG.error('Error reading mail archive from: %s', uri)
return
LOG.debug('Mail archive is loaded, start processing')
content += TRAILING_RECORD
for rec in re.finditer(MAIL_BOX_PATTERN, content):
email = rec.groupdict()
email['author_email'] = email['author_email'].replace(' at ', '@', 1)
if not utils.check_email_validity(email['author_email']):
continue
email['date'] = int(email_utils.mktime_tz(
email_utils.parsedate_tz(email['date'])))
email['body'] = _optimize_body(email['body'])
for pattern_name, pattern in six.iteritems(MESSAGE_PATTERNS):
collection = set()
for item in re.finditer(pattern, email['body']):
groups = item.groupdict()
item_id = groups['id']
if 'module' in groups:
item_id = groups['module'] + ':' + item_id
email['module'] = groups['module']
collection.add(item_id)
email[pattern_name] = list(collection)
yield email
def log(uri, runtime_storage_inst):
links = _get_mail_archive_links(uri)
for link in links:
if _uri_content_changed(link, runtime_storage_inst):
for mail in _retrieve_mails(link):
LOG.debug('New mail: %s', mail['message_id'])
yield mail
| 0xf2/stackalytics | stackalytics/processor/mls.py | Python | apache-2.0 | 4,490 |
# Add new column with constant value to first resource
# Column name and value are taken from the processor's parameters
from datapackage_pipelines.wrapper import process
def modify_datapackage(datapackage, parameters, _):
datapackage['resources'][0]['schema']['fields'].append({
'name': parameters['column-name'],
'type': 'string'
})
return datapackage
def process_row(row, _1, _2, resource_index, parameters, _):
if resource_index == 0:
row[parameters['column-name']] = parameters['value']
return row
process(modify_datapackage=modify_datapackage,
process_row=process_row)
| frictionlessdata/datapackage-pipelines | samples/add_constant.py | Python | mit | 629 |
from django.db import models
from django.contrib.auth.models import Group
class Category(models.Model):
title = models.CharField(max_length=200)
def __unicode__(self):
return self.title
class Entry(models.Model):
category = models.ForeignKey(Category)
groups = models.ManyToManyField(Group, blank=True)
title = models.CharField(max_length=200)
year = models.IntegerField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
content = models.TextField()
is_public = models.BooleanField(default=True)
def __unicode__(self):
return self.title
| UTAlan/ginniBeam.net | gin/writings/models.py | Python | gpl-2.0 | 632 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import os
def data_generator():
data = [0, 1, 2, 3]
for val in data:
yield val
class TestDistributedReader(unittest.TestCase):
def test_distributed_reader(self):
trainer_num = 4
os.environ['PADDLE_TRAINER_ID'] = str(1)
os.environ['PADDLE_TRAINERS_NUM'] = str(trainer_num)
reader = fluid.contrib.reader.distributed_batch_reader(data_generator)
data = next(reader())
assert data == 1
os.unsetenv('PADDLE_TRAINER_ID')
os.unsetenv('PADDLE_TRAINERS_NUM')
if __name__ == '__main__':
unittest.main()
| tensor-tang/Paddle | python/paddle/fluid/contrib/tests/test_distributed_reader.py | Python | apache-2.0 | 1,305 |
from __future__ import unicode_literals
import json
import requests
from rest_framework import viewsets, permissions, views
from django.contrib.auth.models import User
from rest_framework.authentication import TokenAuthentication, BasicAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import UserSerializer, AccountSerializer, ExpensesSerializer, TransactionsSerializer, \
UserDetailsSerializer
from .models import Account, Expenses, UserDetails, Transactions
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (permissions.AllowAny,)
class AccountViewSet(viewsets.ModelViewSet):
queryset = Account.objects.all()
serializer_class = AccountSerializer
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save(accountUser=self.request.user)
class ExpenseViewSet(viewsets.ModelViewSet):
queryset = Expenses.objects.all()
serializer_class = ExpensesSerializer
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save(expenseUser=self.request.user)
class UserDetailViewSet(viewsets.ModelViewSet):
queryset = UserDetails.objects.all()
serializer_class = UserDetailsSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save(userId=self.request.user)
class TransactionViewSet(viewsets.ModelViewSet):
queryset = Transactions.objects.all()
serializer_class = TransactionsSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save(fromUser=self.request.user)
class CustomObtainAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
response = super(CustomObtainAuthToken, self).post(request, *args, **kwargs)
token = Token.objects.get(key=response.data['token'])
return Response({'token': token.key, 'id': token.user_id})
# class TokenViewSet(views.APIView):
# permission_classes = (permissions.AllowAny,)
#
# def post(self, request):
# print request.data['email']
# data = request.data['email']
# if request.data['isvarified']:
# user = User.objects.get(email=data)
# token = Token.objects.get(user=user)
# authtoken = {'bonapacheT': str(token)}
# return JsonResponse(authtoken)
# else:
# return JsonResponse({'error': 'maakda'})
@api_view(['POST'])
def SocialAuthFacebook(request):
if request.method == 'POST':
token = request.data['token']
url = 'https://graph.facebook.com/me?fields=id,name,email,first_name,last_name,picture&access_token=' + token
r = requests.get(url)
data = json.loads(r.text)
if 'error' in data:
resData = {'error': 'Invalid Auth Token ! Beware this incident will be reported !'}
else:
try:
user = User.objects.get(username=data['email'])
serializer = UserSerializer(user)
token = Token.objects.get(user=user)
userDetail, created = UserDetails.objects.get_or_create(userId=user)
if created:
userDetail.userFacebookId = data['id']
userDetail.userPicUrl = data['picture']['data']['url']
userDetail.save()
userDetailSerializer = UserDetailsSerializer(userDetail)
except User.DoesNotExist:
newUser = {'username': data['email'], 'email': data['email'], 'first_name': data['first_name'],
'last_name': data['last_name'], 'password': 'shotu123'}
serializer = UserSerializer(data=newUser)
if serializer.is_valid():
serializer.save()
user = User.objects.get(username=serializer.data['email'])
userDetail, created = UserDetails.objects.get_or_create(userId=user)
if created:
userDetail.userFacebookId = data['id']
userDetail.userPicUrl = data['picture']['data']['url']
userDetail.save()
userDetailSerializer = UserDetailsSerializer(userDetail)
token, created = Token.objects.get_or_create(user=user)
resData = {'token': token.key, 'userData': serializer.data, 'userDetail': userDetailSerializer.data}
return Response(resData)
@api_view(['POST'])
def SocialAuthGoogle(request):
if request.method == 'POST':
token = request.data['token']
url = 'https://www.googleapis.com/userinfo/v2/me'
header = {'Authorization': 'Bearer ' + token}
r = requests.get(url, headers=header)
data = json.loads(r.text)
print data
if 'error' in data:
resData = {'Error': 'Invalid Credentials ! This event will be reported'}
else:
try:
user = User.objects.get(username=data['email'])
serializer = UserSerializer(user)
token = Token.objects.get(user=user)
userDetail, created = UserDetails.objects.get_or_create(userId=user)
if created:
userDetail.userGoogleId = data['id']
userDetail.userPicUrl = data['picture']
userDetail.save()
userDetailSerializer = UserDetailsSerializer(userDetail)
except User.DoesNotExist:
newUser = {'username': data['email'], 'email': data['email'], 'first_name': data['given_name'],
'last_name': data['family_name'], 'password': 'shotu123'}
serializer = UserSerializer(data=newUser)
if serializer.is_valid():
serializer.save()
user = User.objects.get(username=serializer.data['email'])
userDetail, created = UserDetails.objects.get_or_create(userId=user)
if created:
userDetail.userPicUrl = data['picture']
userDetail.userGoogleId = data['id']
userDetail.save()
userDetailSerializer = UserDetailsSerializer(userDetail)
token, created = Token.objects.get_or_create(user=user)
resData = {'token': token.key, 'userData': serializer.data, 'userDetail': userDetailSerializer.data}
return Response(resData)
| mr-someone/mozo-rest-django | mozorest/views.py | Python | mit | 7,433 |
#######################################################################
# Implements a topological sort algorithm.
#
# Copyright 2014 True Blade Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Notes:
# Based on http://code.activestate.com/recipes/578272-topological-sort
# with these major changes:
# Added unittests.
# Deleted doctests (maybe not the best idea in the world, but it cleans
# up the docstring).
# Moved functools import to the top of the file.
# Changed assert to a ValueError.
# Changed iter[items|keys] to [items|keys], for python 3
# compatibility.
# Copy the input so as to leave it unmodified.
# Renamed function from toposort2 to toposort.
# Handle empty input.
# Switch tests to use set literals.
#
########################################################################
from functools import reduce as _reduce
__all__ = ['toposort', 'toposort_flatten']
def toposort(data):
"""Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependencies, each subsequent set consists of items that depend upon
items in the preceding sets.
"""
# Special case empty input.
if len(data) == 0:
return
# Copy the input so as to leave it unmodified.
data = data.copy()
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
# Add empty dependencies where needed.
data.update({item: set() for item in extra_items_in_deps})
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in data.items()
if item not in ordered}
if len(data) != 0:
raise ValueError('Cyclic dependencies exist among these items: {}'.format(
', '.join(repr(x) for x in data.items())))
def toposort_flatten(data, sort=True):
"""Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic)."""
result = []
for d in toposort(data):
result.extend((sorted if sort else list)(d))
return result
| wangmiao1981/spark | dev/sparktestsupport/toposort.py | Python | apache-2.0 | 3,002 |
import importlib
import logging
import sys
import unittest
from os.path import sep, abspath
from queue import Queue
from tempfile import mkdtemp
from threading import Thread
import pytest
from errbot.rendering import text
from errbot.backends.base import Message, Room, Person, RoomOccupant, ONLINE
from errbot.core_plugins.wsview import reset_app
from errbot.errBot import ErrBot
from errbot.main import setup_bot
# Can't use __name__ because of Yapsy
log = logging.getLogger('errbot.backends.test')
QUIT_MESSAGE = '$STOP$'
STZ_MSG = 1
STZ_PRE = 2
STZ_IQ = 3
class TestPerson(Person):
"""
This is an identifier just represented as a string.
DO NOT USE THIS DIRECTLY AS IT IS NOT COMPATIBLE WITH MOST BACKENDS,
use self.build_identifier(identifier_as_string) instead.
Note to back-end implementors: You should provide a custom
<yourbackend>Identifier object that adheres to this interface.
You should not directly inherit from SimpleIdentifier, inherit
from object instead and make sure it includes all properties and
methods exposed by this class.
"""
def __init__(self, person, client=None, nick=None, fullname=None):
self._person = person
self._client = client
self._nick = nick
self._fullname = fullname
@property
def person(self):
"""This needs to return the part of the identifier pointing to a person."""
return self._person
@property
def client(self):
"""This needs to return the part of the identifier pointing to a client from which a person is sending a message from.
Returns None is unspecified"""
return self._client
@property
def nick(self):
"""This needs to return a short display name for this identifier e.g. gbin.
Returns None is unspecified"""
return self._nick
@property
def fullname(self):
"""This needs to return a long display name for this identifier e.g. Guillaume Binet.
Returns None is unspecified"""
return self._fullname
aclattr = person
def __unicode__(self):
if self.client:
return self._person + "/" + self._client
return self._person
__str__ = __unicode__
def __eq__(self, other):
return self.person == other.person
# noinspection PyAbstractClass
class TestOccupant(TestPerson, RoomOccupant):
""" This is a MUC occupant represented as a string.
DO NOT USE THIS DIRECTLY AS IT IS NOT COMPATIBLE WITH MOST BACKENDS,
"""
def __init__(self, person, room):
super().__init__(person)
self._room = room
@property
def room(self):
return self._room
def __unicode__(self):
return self._person + '@' + str(self._room)
__str__ = __unicode__
def __eq__(self, other):
return self.person == other.person and self.room == other.room
class TestRoom(Room):
def invite(self, *args):
pass
def __init__(self, name, occupants=None, topic=None, bot=None):
"""
:param name: Name of the room
:param occupants: Occupants of the room
:param topic: The MUC's topic
"""
if occupants is None:
occupants = []
self._occupants = occupants
self._topic = topic
self._bot = bot
self._name = name
self._bot_mucid = TestOccupant(self._bot.bot_config.BOT_IDENTITY['username'], self._name)
@property
def occupants(self):
return self._occupants
def find_croom(self):
""" find back the canonical room from a this room"""
for croom in self._bot._rooms:
if croom == self:
return croom
return None
@property
def joined(self):
room = self.find_croom()
if room:
return self._bot_mucid in room.occupants
return False
def join(self, username=None, password=None):
if self.joined:
logging.warning("Attempted to join room '{!s}', but already in this room".format(self))
return
if not self.exists:
log.debug("Room {!s} doesn't exist yet, creating it".format(self))
self.create()
room = self.find_croom()
room._occupants.append(self._bot_mucid)
log.info("Joined room {!s}".format(self))
self._bot.callback_room_joined(room)
def leave(self, reason=None):
if not self.joined:
logging.warning("Attempted to leave room '{!s}', but not in this room".format(self))
return
room = self.find_croom()
room._occupants.remove(self._bot_mucid)
log.info("Left room {!s}".format(self))
self._bot.callback_room_left(room)
@property
def exists(self):
return self.find_croom() is not None
def create(self):
if self.exists:
logging.warning("Room {!s} already created".format(self))
return
self._bot._rooms.append(self)
log.info("Created room {!s}".format(self))
def destroy(self):
if not self.exists:
logging.warning("Cannot destroy room {!s}, it doesn't exist".format(self))
return
self._bot._rooms.remove(self)
log.info("Destroyed room {!s}".format(self))
@property
def topic(self):
return self._topic
@topic.setter
def topic(self, topic):
self._topic = topic
room = self.find_croom()
room._topic = self._topic
log.info("Topic for room {!s} set to '{}'".format(self, topic))
self._bot.callback_room_topic(self)
def __unicode__(self):
return self._name
def __str__(self):
return self._name
def __eq__(self, other):
return self._name == other._name
class TestBackend(ErrBot):
def change_presence(self, status: str = ONLINE, message: str = '') -> None:
pass
def __init__(self, config):
config.BOT_LOG_LEVEL = logging.DEBUG
config.CHATROOM_PRESENCE = ('testroom',) # we are testing with simple identfiers
config.BOT_IDENTITY = {'username': 'err'} # we are testing with simple identfiers
self.bot_identifier = self.build_identifier('Err') # whatever
super().__init__(config)
self.incoming_stanza_queue = Queue()
self.outgoing_message_queue = Queue()
self.sender = self.build_identifier(config.BOT_ADMINS[0]) # By default, assume this is the admin talking
self.reset_rooms()
self.md = text()
def send_message(self, mess):
log.info("\n\n\nMESSAGE:\n%s\n\n\n", mess.body)
super().send_message(mess)
self.outgoing_message_queue.put(self.md.convert(mess.body))
def serve_forever(self):
self.connect_callback() # notify that the connection occured
try:
while True:
print('waiting on queue')
stanza_type, entry = self.incoming_stanza_queue.get()
print('message received')
if entry == QUIT_MESSAGE:
log.info("Stop magic message received, quitting...")
break
if stanza_type is STZ_MSG:
msg = Message(entry)
msg.frm = self.sender
msg.to = self.bot_identifier # To me only
self.callback_message(msg)
# implements the mentions.
mentioned = [self.build_identifier(word[1:]) for word in entry.split() if word.startswith('@')]
if mentioned:
self.callback_mention(msg, mentioned)
elif stanza_type is STZ_PRE:
log.info("Presence stanza received.")
self.callback_presence(entry)
elif stanza_type is STZ_IQ:
log.info("IQ stanza received.")
else:
log.error("Unknown stanza type.")
except EOFError:
pass
except KeyboardInterrupt:
pass
finally:
log.debug("Trigger disconnect callback")
self.disconnect_callback()
log.debug("Trigger shutdown")
self.shutdown()
def connect(self):
return
def build_identifier(self, text_representation):
return TestPerson(text_representation)
def build_reply(self, mess, text=None, private=False):
msg = self.build_message(text)
msg.frm = self.bot_identifier
msg.to = mess.frm
return msg
@property
def mode(self):
return 'test'
def rooms(self):
return [r for r in self._rooms if r.joined]
def query_room(self, room):
try:
return [r for r in self._rooms if str(r) == str(room)][0]
except IndexError:
r = TestRoom(room, bot=self)
return r
def prefix_groupchat_reply(self, message, identifier):
super().prefix_groupchat_reply(message, identifier)
message.body = '@{0} {1}'.format(identifier.nick, message.body)
def pop_message(self, timeout=5, block=True):
return self.outgoing_message_queue.get(timeout=timeout, block=block)
def push_message(self, msg):
self.incoming_stanza_queue.put((STZ_MSG, msg), timeout=5)
def push_presence(self, presence):
""" presence must at least duck type base.Presence
"""
self.incoming_stanza_queue.put((STZ_PRE, presence), timeout=5)
def zap_queues(self):
while not self.incoming_stanza_queue.empty():
msg = self.incoming_stanza_queue.get(block=False)
log.error('Message left in the incoming queue during a test : %s' % msg)
while not self.outgoing_message_queue.empty():
msg = self.outgoing_message_queue.get(block=False)
log.error('Message left in the outgoing queue during a test : %s' % msg)
def reset_rooms(self):
"""Reset/clear all rooms"""
self._rooms = []
class ShallowConfig(object):
pass
class TestBot(object):
"""
A minimal bot utilizing the TestBackend, for use with unit testing.
Only one instance of this class should globally be active at any one
time.
End-users should not use this class directly. Use
:func:`~errbot.backends.test.testbot` or
:class:`~errbot.backends.test.FullStackTest` instead, which use this
class under the hood.
"""
bot_thread = None
def __init__(self, extra_plugin_dir=None, loglevel=logging.DEBUG, extra_config=None):
self.setup(extra_plugin_dir=extra_plugin_dir, loglevel=loglevel, extra_config=extra_config)
def setup(self, extra_plugin_dir=None, loglevel=logging.DEBUG, extra_config=None):
"""
:param extra_config: Piece of extra configuration you want to inject to the config.
:param extra_plugin_dir: Path to a directory from which additional
plugins should be loaded.
:param loglevel: Logging verbosity. Expects one of the constants
defined by the logging module.
"""
tempdir = mkdtemp()
# This is for test isolation.
config = ShallowConfig()
config.__dict__.update(importlib.import_module('errbot.config-template').__dict__)
config.BOT_DATA_DIR = tempdir
config.BOT_LOG_FILE = tempdir + sep + 'log.txt'
config.STORAGE = 'Memory'
if extra_config is not None:
log.debug('Merging %s to the bot config.' % repr(extra_config))
for k, v in extra_config.items():
setattr(config, k, v)
# reset logging to console
logging.basicConfig(format='%(levelname)s:%(message)s')
file = logging.FileHandler(config.BOT_LOG_FILE, encoding='utf-8')
self.logger = logging.getLogger('')
self.logger.setLevel(loglevel)
self.logger.addHandler(file)
config.BOT_EXTRA_PLUGIN_DIR = extra_plugin_dir
config.BOT_LOG_LEVEL = loglevel
self.bot_config = config
def start(self):
"""
Start the bot
Calling this method when the bot has already started will result
in an Exception being raised.
"""
if self.bot_thread is not None:
raise Exception("Bot has already been started")
self.bot = setup_bot('Test', self.logger, self.bot_config)
self.bot_thread = Thread(target=self.bot.serve_forever, name='TestBot main thread')
self.bot_thread.setDaemon(True)
self.bot_thread.start()
self.bot.push_message("!echo ready")
# Ensure bot is fully started and plugins are loaded before returning
assert self.bot.pop_message(timeout=60) == "ready"
def stop(self):
"""
Stop the bot
Calling this method before the bot has started will result in an
Exception being raised.
"""
if self.bot_thread is None:
raise Exception("Bot has not yet been started")
self.bot.push_message(QUIT_MESSAGE)
self.bot_thread.join()
reset_app() # empty the bottle ... hips!
log.info("Main bot thread quits")
self.bot.zap_queues()
self.bot.reset_rooms()
self.bot_thread = None
def pop_message(self, timeout=5, block=True):
return self.bot.pop_message(timeout, block)
def push_message(self, msg):
return self.bot.push_message(msg)
def push_presence(self, presence):
""" presence must at least duck type base.Presence
"""
return self.bot.push_presence(presence)
def zap_queues(self):
return self.bot.zap_queues()
def assertCommand(self, command, response, timeout=5):
"""Assert the given command returns the given response"""
self.bot.push_message(command)
assert response in self.bot.pop_message(timeout)
def assertCommandFound(self, command, timeout=5):
"""Assert the given command does not exist"""
self.bot.push_message(command)
assert 'not found' not in self.bot.pop_message(timeout)
class FullStackTest(unittest.TestCase, TestBot):
"""
Test class for use with Python's unittest module to write tests
against a fully functioning bot.
For example, if you wanted to test the builtin `!about` command,
you could write a test file with the following::
from errbot.backends.test import FullStackTest
class TestCommands(FullStackTest):
def test_about(self):
self.push_message('!about')
self.assertIn('Err version', self.pop_message())
"""
def setUp(self, extra_plugin_dir=None, extra_test_file=None, loglevel=logging.DEBUG, extra_config=None):
"""
:param extra_plugin_dir: Path to a directory from which additional
plugins should be loaded.
:param extra_test_file: [Deprecated but kept for backward-compatibility,
use extra_plugin_dir instead]
Path to an additional plugin which should be loaded.
:param loglevel: Logging verbosity. Expects one of the constants
defined by the logging module.
:param extra_config: Piece of extra bot config in a dict.
"""
if extra_plugin_dir is None and extra_test_file is not None:
extra_plugin_dir = sep.join(abspath(extra_test_file).split(sep)[:-2])
self.setup(extra_plugin_dir=extra_plugin_dir, loglevel=loglevel, extra_config=extra_config)
self.start()
def tearDown(self):
self.stop()
@pytest.fixture
def testbot(request):
"""
Pytest fixture to write tests against a fully functioning bot.
For example, if you wanted to test the builtin `!about` command,
you could write a test file with the following::
from errbot.backends.test import testbot
def test_about(testbot):
testbot.push_message('!about')
assert "Err version" in testbot.pop_message()
It's possible to provide additional configuration to this fixture,
by setting variables at module level or as class attributes (the
latter taking precedence over the former). For example::
from errbot.backends.test import testbot
extra_plugin_dir = '/foo/bar'
def test_about(testbot):
testbot.pushMessage('!about')
assert "Err version" in testbot.pop_message()
..or::
from errbot.backends.test import testbot
extra_plugin_dir = '/foo/bar'
class Tests(object):
# Wins over `extra_plugin_dir = '/foo/bar'` above
extra_plugin_dir = '/foo/baz'
def test_about(self, testbot):
testbot.push_message('!about')
assert "Err version" in testbot.pop_message()
..to load additional plugins from the directory `/foo/bar` or
`/foo/baz` respectively. This works for the following items, which are
passed to the constructor of :class:`~errbot.backends.test.TestBot`:
* `extra_plugin_dir`
* `loglevel`
"""
def on_finish():
bot.stop()
# setup the logging to something digestable.
logger = logging.getLogger('')
logging.getLogger('yapsy').setLevel(logging.ERROR) # this one is way too verbose in debug
logging.getLogger('MARKDOWN').setLevel(logging.ERROR) # this one is way too verbose in debug
logging.getLogger('Rocket.Errors').setLevel(logging.ERROR) # this one is way too verbose in debug
logger.setLevel(logging.DEBUG)
console_hdlr = logging.StreamHandler(sys.stdout)
console_hdlr.setFormatter(logging.Formatter("%(levelname)-8s %(name)-25s %(message)s"))
logger.handlers = []
logger.addHandler(console_hdlr)
kwargs = {}
for attr, default in (('extra_plugin_dir', None), ('loglevel', logging.DEBUG),):
if hasattr(request, 'instance'):
kwargs[attr] = getattr(request.instance, attr, None)
if kwargs[attr] is None:
kwargs[attr] = getattr(request.module, attr, default)
bot = TestBot(**kwargs)
bot.start()
request.addfinalizer(on_finish)
return bot
| Synforge/err | errbot/backends/test.py | Python | gpl-3.0 | 18,173 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
class Args(object):
def __init__(self, *args, **kargs):
object.__init__(self)
self.args = args
self.kargs = kargs
| ganeshgore/myremolab | server/src/voodoo/gen/generators/Args.py | Python | bsd-2-clause | 520 |
"""
Various linear algebra utilities.
Notes on the fast Hadamard transform (Alistair Reid, NICTA 2015):
The Hadamard transform is a recursive application of sums and differences
on a vector of length 2^n. The log(n) recursive application allow computation
in n log(n) operations instead of the naive n^2 per vector that would result
from computing and then multiplying a Hadamard matrix.
Note: the Walsh ordering is naturally computed by the recursive algorithm. The
sequence ordering requires a re-ordering afterwards incurring a similar
overhead to the original computation.
.. code:: python
# Larger tests against calling Julia: [length 4*512 vectors:]
M = np.argsort(np.sin(np.arange(64))).astype(float)
Julia: 570us (sequence ordered)
Al's single optimised: 560us (sequence ordered), 30us (natural)
Al's basic vectorised: 1370us (sequence ordered)
"""
from __future__ import division
import numpy as np
from scipy.linalg import cholesky, cho_solve, svd, LinAlgError
# Numerical constants / thresholds
CHOLTHRESH = 1e-5
def cho_log_det(L):
"""
Compute the log of the determinant of :math:`A`, given its (upper or lower)
Cholesky factorization :math:`LL^T`.
Parameters
----------
L: ndarray
an upper or lower Cholesky factor
Examples
--------
>>> A = np.array([[ 2, -1, 0],
... [-1, 2, -1],
... [ 0, -1, 2]])
>>> Lt = cholesky(A)
>>> np.isclose(cho_log_det(Lt), np.log(np.linalg.det(A)))
True
>>> L = cholesky(A, lower=True)
>>> np.isclose(cho_log_det(L), np.log(np.linalg.det(A)))
True
"""
return 2 * np.sum(np.log(L.diagonal()))
def svd_log_det(s):
"""
Compute the log of the determinant of :math:`A`, given its singular values
from an SVD factorisation (i.e. :code:`s` from :code:`U, s, Ut = svd(A)`).
Parameters
----------
s: ndarray
the singular values from an SVD decomposition
Examples
--------
>>> A = np.array([[ 2, -1, 0],
... [-1, 2, -1],
... [ 0, -1, 2]])
>>> _, s, _ = np.linalg.svd(A)
>>> np.isclose(svd_log_det(s), np.log(np.linalg.det(A)))
True
"""
return np.sum(np.log(s))
def solve_posdef(A, b):
"""
Solve the system :math:`A X = b` for :math:`X` where :math:`A` is a
positive semi-definite matrix.
This first tries cholesky, and if numerically unstable with solve using a
truncated SVD (see solve_posdef_svd).
The log-determinant of :math:`A` is also returned since it requires minimal
overhead.
Parameters
----------
A: ndarray
A positive semi-definite matrix.
b: ndarray
An array or matrix
Returns
-------
X: ndarray
The result of :math:`X = A^-1 b`
logdet: float
The log-determinant of :math:`A`
"""
# Try cholesky for speed
try:
lower = False
L = cholesky(A, lower=lower)
if any(L.diagonal() < CHOLTHRESH):
raise LinAlgError("Unstable cholesky factor detected")
X = cho_solve((L, lower), b)
logdet = cho_log_det(L)
# Failed cholesky, use svd to do the inverse
except LinAlgError:
U, s, V = svd(A)
X = svd_solve(U, s, V, b)
logdet = svd_log_det(s)
return X, logdet
def svd_solve(U, s, V, b, s_tol=1e-15):
"""
Solve the system :math:`A X = b` for :math:`X`.
Here :math:`A` is a positive semi-definite matrix using the singular value
decomposition. This truncates the SVD so only dimensions corresponding to
non-negative and sufficiently large singular values are used.
Parameters
----------
U: ndarray
The :code:`U` factor of :code:`U, s, V = svd(A)` positive
semi-definite matrix.
s: ndarray
The :code:`s` factor of :code:`U, s, V = svd(A)` positive
semi-definite matrix.
V: ndarray
The :code:`V` factor of :code:`U, s, V = svd(A)` positive
semi-definite matrix.
b: ndarray
An array or matrix
s_tol: float
Cutoff for small singular values. Singular values smaller than
:code:`s_tol` are clamped to :code:`s_tol`.
Returns
-------
X: ndarray
The result of :math:`X = A^-1 b`
okind: ndarray
The indices of :code:`s` that are kept in the factorisation
"""
# Test shapes for efficient computations
n = U.shape[0]
assert(b.shape[0] == n)
m = b.shape[1] if np.ndim(b) > 1 else 1
# Auto clamp SVD based on threshold
sclamp = np.maximum(s, s_tol)
# Inversion factors
ss = 1. / np.sqrt(sclamp)
U2 = U * ss[np.newaxis, :]
V2 = ss[:, np.newaxis] * V
if m < n:
# Few queries
X = U2.dot(V2.dot(b)) # O(n^2 (2m))
else:
X = U2.dot(V2).dot(b) # O(n^2 (m + n))
return X
def hadamard(Y, ordering=True):
"""
*Very fast* Hadamard transform for single vector at a time.
Parameters
----------
Y: ndarray
the n*2^k data to be 1d hadamard transformed
ordering: bool, optional
reorder from Walsh to sequence
Returns
-------
H: ndarray
hadamard transformed data.
Examples
--------
from https://en.wikipedia.org/wiki/Hadamard_transform with normalisation
>>> y = np.array([[1, 0, 1, 0, 0, 1, 1, 0]])
>>> hadamard(y, ordering=False)
array([[ 0.5 , 0.25, 0. , -0.25, 0. , 0.25, 0. , 0.25]])
>>> hadamard(y, ordering=True)
array([[ 0.5 , 0. , 0. , 0. , -0.25, 0.25, 0.25, 0.25]])
"""
# dot is a sum product over the last axis of a and the second-to-last of b.
# Transpose - can specify axes, default transposes a[0] and a[1] hmm
n_vectors, n_Y = Y.shape
matching = (n_vectors, 2, int(n_Y / 2))
H = np.array([[1, 1], [1, -1]]) / 2. # Julia uses 2 and not sqrt(2)?
steps = int(np.log(n_Y) / np.log(2))
assert(2**steps == n_Y) # required
for _ in range(steps):
Y = np.transpose(Y.reshape(matching), (0, 2, 1)).dot(H)
Y = Y.reshape((n_vectors, n_Y))
if ordering:
Y = Y[:, _sequency(n_Y)]
return Y
def _sequency(length):
# http://fourier.eng.hmc.edu/e161/lectures/wht/node3.html
# Although this incorrectly explains grey codes...
s = np.arange(length).astype(int)
s = (s >> 1) ^ s # Grey code ...
# Reverse bits
order = np.zeros(s.shape).astype(int)
n = int(1)
m = length // 2
while n < length:
order |= m * (n & s) // n
n <<= 1
m >>= 1
return order
| NICTA/revrand | revrand/mathfun/linalg.py | Python | apache-2.0 | 6,602 |
import os
import entity2vec.node2vec as node2vec
from os import path
import networkx as nx
def main(config):
what = config.feature
if what is None:
raise RuntimeError('You must specify the feature using -f or --feature')
print('loading edgelists...')
G = nx.read_edgelist(path.join(config.edgelistDir, '%s.edgelist' % what), nodetype=str, create_using=nx.DiGraph())
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = .3
if what in config.featureList:
feat = config.featureList[what]
if 'dependencies' in feat:
dependencies = feat['dependencies']
for eg in dependencies:
H = nx.read_edgelist(path.join(config.edgelistDir, '%s.edgelist' % eg), nodetype=str,
create_using=nx.DiGraph())
for edge in H.edges():
H[edge[0]][edge[1]]['weight'] = 6
G = nx.compose(G, H)
G = G.to_undirected()
n2vOpt = config.node2vec
directed = n2vOpt["directed"]
preprocessing = n2vOpt["preprocessing"]
weighted = n2vOpt["weighted"]
p = n2vOpt["p"]
q = n2vOpt["q"]
walk_length = n2vOpt["walk_length"]
num_walks = n2vOpt["num_walks"]
dimensions = n2vOpt["dimensions"]
window_size = n2vOpt["window_size"]
workers = n2vOpt["workers"]
iter = n2vOpt["iter"]
print(n2vOpt)
node2vec_graph = node2vec.Node2Vec(directed, preprocessing, weighted, p, q, walk_length,
num_walks, dimensions, window_size, workers, iter)
node2vec_graph.G = G
node2vec_graph.learn_embeddings('%s/%s.emb' % (config.embDir, what), 'text')
def init(config):
if not os.path.exists(config.edgelistDir):
os.makedirs(config.edgelistDir)
| DOREMUS-ANR/recommender | recsystem/embedder/embed.py | Python | mit | 1,797 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.