hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73e669cb3bac808cd6231439760e24915b76113d | 830 | py | Python | tests/basic_stages/test_schematize.py | DavidKatz-il/pdpipe | 5ddd066425d99886bfc51cf19ab78b2bf8c7791a | [
"MIT"
] | 269 | 2019-12-21T13:30:37.000Z | 2022-03-27T18:41:36.000Z | tests/basic_stages/test_schematize.py | DavidKatz-il/pdpipe | 5ddd066425d99886bfc51cf19ab78b2bf8c7791a | [
"MIT"
] | 68 | 2019-12-21T12:51:51.000Z | 2022-03-13T13:06:14.000Z | tests/basic_stages/test_schematize.py | DavidKatz-il/pdpipe | 5ddd066425d99886bfc51cf19ab78b2bf8c7791a | [
"MIT"
] | 30 | 2019-12-21T12:18:18.000Z | 2022-03-17T05:53:19.000Z | """Testing the ColReorder stage."""
import pytest
import pandas as pd
from pdpipe import Schematize
from pdpipe.exceptions import FailedPreconditionError
def _df():
return pd.DataFrame([[2, 4, 8], [3, 6, 9]], [1, 2], ['a', 'b', 'c'])
def test_schematize():
df = _df()
stage = Schematize(['a', 'c'])
res = stage(df)
assert list(res.columns) == ['a', 'c']
assert res.iloc[0, 0] == 2
assert res.iloc[1, 0] == 3
assert res.iloc[0, 1] == 8
assert res.iloc[1, 1] == 9
stage = Schematize(['c', 'b'])
res = stage(df)
assert list(res.columns) == ['c', 'b']
assert res.iloc[0, 0] == 8
assert res.iloc[1, 0] == 9
assert res.iloc[0, 1] == 4
assert res.iloc[1, 1] == 6
stage = Schematize(['a', 'g'])
with pytest.raises(FailedPreconditionError):
stage(df)
| 23.714286 | 72 | 0.571084 |
73e66f2200986a9c8e9ce37d9c22e9a32b812074 | 789 | py | Python | arjuna/engine/data/record.py | ChandraMouliDisturbs/arjuna | 4965622fbb01a5e5b6459110c413accc5c483424 | [
"Apache-2.0"
] | null | null | null | arjuna/engine/data/record.py | ChandraMouliDisturbs/arjuna | 4965622fbb01a5e5b6459110c413accc5c483424 | [
"Apache-2.0"
] | null | null | null | arjuna/engine/data/record.py | ChandraMouliDisturbs/arjuna | 4965622fbb01a5e5b6459110c413accc5c483424 | [
"Apache-2.0"
] | null | null | null | # This file is a part of Arjuna
# Copyright 2015-2020 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna.tpi.data.record import DataRecord
class DummyDataRecord(DataRecord):
def __init__(self):
super().__init__(process=False) | 34.304348 | 74 | 0.757921 |
73e67842fde253a90d4c258da79532085b5d4b1a | 2,101 | py | Python | conduit/tests/test_parameterization.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 12 | 2018-05-14T17:43:18.000Z | 2021-11-16T04:03:33.000Z | conduit/tests/test_parameterization.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 34 | 2019-05-06T19:13:36.000Z | 2021-05-06T19:12:35.000Z | conduit/tests/test_parameterization.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 3 | 2019-10-08T17:42:17.000Z | 2021-07-28T05:52:02.000Z |
import unittest
import yaml
import json
import os
import itertools
import numpy as np
from conduit.utils.parameterization import parameterize
from conduit.tests.testing_utils import load_test_data
class TestParameterization(unittest.TestCase):
def setUp(self):
self._test_data = load_test_data('test_parameterization.yml')
def test_parameterize_single(self):
data = self._test_data['metaparam1']
data = {"metaparam1" : data}
p = parameterize(data)
expected_dict_format = {
"step1" : {
"param1" : "{a}"
},
"step2" : {
"param1" : "{a}"
},
"step3" : {
"param1" : "{a}"
}
}
for i,step in enumerate(p):
self.assertDictLike(expected_dict_format, step, a=0.1*i)
def test_parameterize_multiple(self):
data = {
"metaparam1" : self._test_data['metaparam1'],
"metaparam2" : self._test_data['metaparam2'],
}
p = parameterize(data)
expected_dict_format = {
"step1" : {
"param1" : "{a}",
"param2" : "{b}"
},
"step2" : {
"param1" : "{a}",
},
"step3" : {
"param1" : "{a}",
}
}
vals = list(itertools.product(np.arange(0.0, 1, 0.1),np.arange(0.0, 0.2, 0.1)))
self.assertEqual(len(p), len(vals))
for step,(a,b) in zip(p,vals):
self.assertDictLike(expected_dict_format,step, a=a, b=b)
def assertDictLike(self, d1, d2, *args, **kwargs):
yaml.Dumper.ignore_aliases = lambda *args : True
d1str = yaml.dump(d1, default_flow_style=False)
d2str = yaml.dump(d2, default_flow_style=False)
d1str = d1str.format(*args, **kwargs)
# print(d1str, d2str)
d1l = yaml.load(d1str)
d2l = yaml.load(d2str)
self.assertEqual(d1l,d2l)
if __name__ == "__main__":
unittest.main() | 30.014286 | 87 | 0.519752 |
73e6b51c58711541532e5955ad2c0be42fbbe9e7 | 3,343 | py | Python | melody/constraints/equal_pitch_constraint.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | 1 | 2021-05-06T19:45:54.000Z | 2021-05-06T19:45:54.000Z | melody/constraints/equal_pitch_constraint.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | null | null | null | melody/constraints/equal_pitch_constraint.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | null | null | null | """
File: equal_pitch_policy.py
Purpose: Defines a two note policy where the second note's pitch must equal first note's pitch.
"""
from melody.constraints.abstract_constraint import AbstractConstraint
from structure.note import Note
from tonalmodel.pitch_scale import PitchScale
class EqualPitchConstraint(AbstractConstraint):
"""
Multi-note constraints that claims that both notes have the same pitch value.
"""
def __init__(self, equal_notes):
"""
Constructor
:param equal_notes: list of all v_notes that map to notes, mapped notes all same pitch.
"""
AbstractConstraint.__init__(self, equal_notes)
if len(equal_notes) <= 1:
raise Exception('EqualNotePolicy must have two or v-more notes.')
def clone(self, new_actors=None):
return EqualPitchConstraint(new_actors if new_actors is not None else self.actors)
def verify(self, p_map):
"""
Ensure the two mapped actors have identical pitches.
:param p_map:
:return:
"""
equal_notes = self.actors
for v_note in equal_notes:
if v_note not in p_map:
raise Exception('Improper parameter map in equal note constraints.')
if p_map[v_note].note is None:
return False
# We compare diatonic distances, as the notes may be enharmonic due to differing tonalities.
for i in range(0, len(equal_notes) - 1):
if p_map[equal_notes[i]].note.diatonic_pitch.diatonic_distance != \
p_map[equal_notes[i + 1]].note.diatonic_pitch.diatonic_distance:
return False
return True
def values(self, p_map, v_note):
assigned = p_map.assigned_actors(self)
unassigned = p_map.unassigned_actors(self)
if len(assigned) == 0:
pitches = p_map.all_tonal_pitches(v_note)
return {Note(p, v_note.base_duration, v_note.num_dots) for p in pitches}
if v_note in assigned:
return {p_map[v_note].note}
if v_note not in unassigned:
raise Exception('{0} is not in actor list of equal pitch constraints.'.format(v_note.note))
return EqualPitchConstraint.compute_note(p_map, assigned[0], v_note)
@staticmethod
def compute_note(p_map, assigned_note, unassigned_note):
"""
For an assigned note and an unassigned note, return for unassigned, a note the same as assigned, but with
pitch enharmonic to its tonality.
:param p_map:
:param assigned_note:
:param unassigned_note:
:return:
"""
# select a pitch representation closest to the tonality, if it exists.
policy_context = p_map[unassigned_note].policy_context
pitch = p_map[assigned_note].note.diatonic_pitch
for p in pitch.enharmonics():
for t in PitchScale(policy_context.harmonic_context.tonality, policy_context.pitch_range).tone_scale:
if p.diatonic_tone == t:
pitch = p
break
actual_note = Note(pitch, unassigned_note.base_duration, unassigned_note.num_dots)
return {actual_note}
def __str__(self):
note_str = ','.join([str(x) for x in self.actors])
return 'e.p.p{0}'.format(note_str)
| 37.988636 | 113 | 0.64822 |
73e6cc6359764d42fe0a659613e617637178d799 | 5,560 | py | Python | city_scrapers/spiders/chi_library.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/chi_library.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/chi_library.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
All spiders should yield data shaped according to the Open Civic Data
specification (http://docs.opencivicdata.org/en/latest/data/event.html).
"""
import re
import requests
import json
import datetime
from city_scrapers.spider import Spider
class Chi_librarySpider(Spider):
name = 'chi_library'
long_name = 'Chicago Public Library'
allowed_domains = ['https://www.chipublib.org/']
start_urls = ['https://www.chipublib.org/board-of-directors/board-meeting-schedule/']
def __init__(self, session=requests.Session()):
"""
Initialize a spider with a session object to use in the
_get_lib_info function.
"""
self.session = session
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the `Open Civic Data
event standard <http://docs.opencivicdata.org/en/latest/data/event.html>`_.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
# the following code turns the HTML glob into an array of lists of strings, one list
# per event. The first line is *always* the date, the last line is *always* the address.
# IF the event has 3 lines, then line 2 and 3 should be concatenated to be the location.
#Otherwise, the event has 3 lines and the middle line is the location.
events = response.css('div.entry-content p').extract()
year = response.css('div.entry-content h2').extract()
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
all_clean_events = []
for val in events:
clean_event = cleanhtml(val)
final_event = clean_event.splitlines()
all_clean_events.append(final_event)
# grab general information for event description
description_str = ' '.join(all_clean_events[0] + all_clean_events[1])
# remove first two informational lines from events array
events_only = all_clean_events[2:]
# get library info from City of Chicago API
lib_info = self._get_lib_info()
for item in events_only:
yr = cleanhtml(year[0])
start_time = self._parse_start(item, yr)
data = {
'_type': 'event',
'name': 'Chicago Public Library Board Meeting',
'description': description_str,
'classification': 'Board meeting',
'start_time': start_time,
'end_time': None, # no end time listed
'all_day': False, # default is false
'timezone': 'America/Chicago',
'status': self._parse_status(item), # default is tentative, but there is no status info on site
'location': self._parse_location(item, lib_info),
'sources': self._parse_sources(response)
}
data['id'] = self._generate_id(data)
yield data
def _get_lib_info(self):
"""
Returns a list of dictionaries of information about each library
from the City of Chicago's API.
"""
r = self.session.get("https://data.cityofchicago.org/resource/psqp-6rmg.json")
return json.loads(r.text)
def _parse_classification(self, item):
"""
Parse or generate classification (e.g. town hall).
"""
return 'Not classified'
def _parse_status(self, item):
"""
Parse or generate status of meeting. Can be one of:
* cancelled
* tentative
* confirmed
* passed
@TODO determine correct status
"""
return 'tentative'
def find_name(self, li):
if len(li) == 4:
return ', '.join(li[1:3])
else:
return li[1]
def _parse_location(self, item, lib_info):
"""
Parse or generate location. Url, latitutde and longitude are all
optional and may be more trouble than they're worth to collect.
"""
return {
'url': None,
'name': self.find_name(item),
'coordinates': {
'latitude': None,
'longitude': None,
},
'address': self._parse_address(item, lib_info)
}
def _parse_address(self, item, lib_info):
"""
compare item's address line to library API addresses until you find the match,
then concatenate address line with city/state/zip to return address and maybe url?
"""
if len(item) == 4:
addr = 3
else:
addr = 2
for i in range(len(lib_info)):
if item[addr] == lib_info[i]['address']:
match = lib_info[i]
return match['address'] + ', ' + match['city'] + ' ' + match['state'] + ' ' + match['zip']
def _parse_start(self, item, year):
"""
Parse start date and time.
"""
# TODO: turn every event array's first string into correct date format
date = item[0]
date = date.replace(',', '')
date = date.replace('.', '')
date = date + ' ' + year
datetime_object = datetime.datetime.strptime(date, '%A %B %d %I %p %Y')
return self._naive_datetime_to_tz(datetime_object)
def _parse_sources(self, response):
"""
Parse sources.
"""
return [{'url': response.url, 'note': ''}]
| 34.968553 | 112 | 0.577518 |
73e6d49701842df30038213a510474181808b907 | 1,810 | py | Python | src/CompartmentalSystems/example_smooth_model_runs.py | goujou/CompartmentalSystems | 4724555c33f11395ddc32738e8dfed7349ee155f | [
"MIT"
] | null | null | null | src/CompartmentalSystems/example_smooth_model_runs.py | goujou/CompartmentalSystems | 4724555c33f11395ddc32738e8dfed7349ee155f | [
"MIT"
] | null | null | null | src/CompartmentalSystems/example_smooth_model_runs.py | goujou/CompartmentalSystems | 4724555c33f11395ddc32738e8dfed7349ee155f | [
"MIT"
] | null | null | null | import numpy as np
from sympy import symbols
from . import example_smooth_reservoir_models as ESRM
from .smooth_model_run import SmoothModelRun
def critics():
symbs = symbols("t k_01 k_10 k_0o k_1o")
t, k_01, k_10, k_0o, k_1o = symbs
srm = ESRM.critics(symbs)
pardict = {k_0o: 0.01, k_1o: 0.08, k_01: 0.09, k_10: 1}
start_values = np.array([0.001, 0.001])
times = np.linspace(0, 100, 1000)
pwc_mr = SmoothModelRun(srm, pardict, start_values, times)
return pwc_mr
def nonlinear_two_pool():
symbs = symbols("t k_01 k_10 k_0o k_1o")
t, k_01, k_10, k_0o, k_1o = symbs
srm = ESRM.nonlinear_two_pool(symbs)
# now create the modelrun
pardict = {
k_01: 1/100,
k_10: 1/100,
k_0o: 1/2,
k_1o: 1/2
}
times = np.linspace(0, 20, 1600) # time grid forward
start_values = np.array([1, 2])
pwc_mr = SmoothModelRun(srm, pardict, start_values, times)
return pwc_mr
def emanuel_1():
symbs = symbols(
"""
I_1 I_3 x_1 x_2 x_3 x_4 x_5 t F_1 F_2 F_3 F_4 F_5
F_21 F_41 F_42 F_52 F_43 F_53 F_54
"""
)
(I_1, I_3, x_1, x_2, x_3, x_4, x_5, t, F_1, F_2, F_3, F_4, F_5,
F_21, F_41, F_42, F_52, F_43, F_53, F_54) = symbs
srm = ESRM.emanuel(symbs)
# now create the modelrun
pardict = {
I_1: 77, I_3: 36,
F_1: 2.081, F_2: 0.0686, F_3: 0.5217, F_4: 0.5926, F_5: 9.813e-3,
F_21: 0.8378, F_41: 0.5676, F_42: 0.0322, F_52: 4.425e-3,
F_43: 0.1739, F_53: 0.0870, F_54: 0.0370
}
start_values = np.array(
[37.00144, 451.89224, 69.00518, 80.2446, 1118.12122]
)
times = np.arange(0, (10+(1/365)), 1/365) # time grid forward
pwc_mr = SmoothModelRun(srm, pardict, start_values, times)
return pwc_mr
| 30.166667 | 73 | 0.603867 |
73e6fbc6cab087e31cba335b15c12100556ea575 | 69,705 | py | Python | pymatgen/ext/matproj.py | chunweizhu/pymatgen | acfe5899ee50add1e2a0dd6385ee4fba78122e0f | [
"MIT"
] | 921 | 2015-01-25T22:17:05.000Z | 2022-03-27T20:58:38.000Z | pymatgen/ext/matproj.py | chunweizhu/pymatgen | acfe5899ee50add1e2a0dd6385ee4fba78122e0f | [
"MIT"
] | 1,631 | 2015-01-05T21:05:04.000Z | 2022-03-31T18:40:17.000Z | pymatgen/ext/matproj.py | chunweizhu/pymatgen | acfe5899ee50add1e2a0dd6385ee4fba78122e0f | [
"MIT"
] | 851 | 2015-01-01T17:38:00.000Z | 2022-03-31T02:14:07.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to interface with the Materials Project REST
API v2 to enable the creation of data structures and pymatgen objects using
Materials Project data.
To make use of the Materials API, you need to be a registered user of the
Materials Project, and obtain an API key by going to your dashboard at
https://www.materialsproject.org/dashboard.
"""
import itertools
import json
import logging
import platform
import re
import sys
import warnings
from typing import List
from enum import Enum, unique
from time import sleep
import requests
from monty.json import MontyDecoder, MontyEncoder
from monty.serialization import dumpfn
from pymatgen.core import SETTINGS, SETTINGS_FILE, yaml
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_symmetrically_equivalent_miller_indices
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from pymatgen.entries.exp_entries import ExpEntry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.sequence import PBar, get_chunks
from pymatgen.core import __version__ as PMG_VERSION
logger = logging.getLogger(__name__)
@unique
class TaskType(Enum):
"""task types available in MP"""
GGA_OPT = "GGA Structure Optimization"
GGAU_OPT = "GGA+U Structure Optimization"
SCAN_OPT = "SCAN Structure Optimization"
GGA_LINE = "GGA NSCF Line"
GGAU_LINE = "GGA+U NSCF Line"
GGA_UNIFORM = "GGA NSCF Uniform"
GGAU_UNIFORM = "GGA+U NSCF Uniform"
GGA_STATIC = "GGA Static"
GGAU_STATIC = "GGA+U Static"
GGA_STATIC_DIEL = "GGA Static Dielectric"
GGAU_STATIC_DIEL = "GGA+U Static Dielectric"
GGA_DEF = "GGA Deformation"
GGAU_DEF = "GGA+U Deformation"
LDA_STATIC_DIEL = "LDA Static Dielectric"
class MPRester:
"""
A class to conveniently interface with the Materials Project REST
interface. The recommended way to use MPRester is with the "with" context
manager to ensure that sessions are properly closed after usage::
with MPRester("API_KEY") as m:
do_something
MPRester uses the "requests" package, which provides for HTTP connection
pooling. All connections are made via https for security.
For more advanced uses of the Materials API, please consult the API
documentation at https://github.com/materialsproject/mapidoc.
"""
supported_properties = (
"energy",
"energy_per_atom",
"volume",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"spacegroup",
"task_ids",
"band_gap",
"density",
"icsd_id",
"icsd_ids",
"cif",
"total_magnetization",
"material_id",
"oxide_type",
"tags",
"elasticity",
)
supported_task_properties = (
"energy",
"energy_per_atom",
"volume",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"spacegroup",
"band_gap",
"density",
"icsd_id",
"cif",
)
def __init__(
self,
api_key=None,
endpoint=None,
notify_db_version=True,
include_user_agent=True,
):
"""
Args:
api_key (str): A String API key for accessing the MaterialsProject
REST interface. Please obtain your API key at
https://www.materialsproject.org/dashboard. If this is None,
the code will check if there is a "PMG_MAPI_KEY" setting.
If so, it will use that environment variable. This makes
easier for heavy users to simply add this environment variable to
their setups and MPRester can then be called without any arguments.
endpoint (str): Url of endpoint to access the MaterialsProject REST
interface. Defaults to the standard Materials Project REST
address at "https://materialsproject.org/rest/v2", but
can be changed to other urls implementing a similar interface.
notify_db_version (bool): If True, the current MP database version will
be retrieved and logged locally in the ~/.pmgrc.yaml. If the database
version changes, you will be notified. The current database version is
also printed on instantiation. These local logs are not sent to
materialsproject.org and are not associated with your API key, so be
aware that a notification may not be presented if you run MPRester
from multiple computing environments.
include_user_agent (bool): If True, will include a user agent with the
HTTP request including information on pymatgen and system version
making the API request. This helps MP support pymatgen users, and
is similar to what most web browsers send with each page request.
Set to False to disable the user agent.
"""
if api_key is not None:
self.api_key = api_key
else:
self.api_key = SETTINGS.get("PMG_MAPI_KEY", "")
if endpoint is not None:
self.preamble = endpoint
else:
self.preamble = SETTINGS.get("PMG_MAPI_ENDPOINT", "https://materialsproject.org/rest/v2")
if self.preamble != "https://materialsproject.org/rest/v2":
warnings.warn("Non-default endpoint used: {}".format(self.preamble))
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
if include_user_agent:
pymatgen_info = "pymatgen/" + PMG_VERSION
python_info = "Python/{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
platform_info = "{}/{}".format(platform.system(), platform.release())
self.session.headers["user-agent"] = "{} ({} {})".format(pymatgen_info, python_info, platform_info)
if notify_db_version:
db_version = self.get_database_version()
logger.debug(f"Connection established to Materials Project database, version {db_version}.")
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
d = {}
d = d if d else {}
if "MAPI_DB_VERSION" not in d:
d["MAPI_DB_VERSION"] = {"LOG": {}, "LAST_ACCESSED": None}
# store a log of what database versions are being connected to
if db_version not in d["MAPI_DB_VERSION"]["LOG"]:
d["MAPI_DB_VERSION"]["LOG"][db_version] = 1
else:
d["MAPI_DB_VERSION"]["LOG"][db_version] += 1
# alert user if db version changed
last_accessed = d["MAPI_DB_VERSION"]["LAST_ACCESSED"]
if last_accessed and last_accessed != db_version:
print(
f"This database version has changed from the database last accessed ({last_accessed}).\n"
f"Please see release notes on materialsproject.org for information about what has changed."
)
d["MAPI_DB_VERSION"]["LAST_ACCESSED"] = db_version
# write out new database log if possible
# bare except is not ideal (perhaps a PermissionError, etc.) but this is not critical
# and should be allowed to fail regardless of reason
try:
dumpfn(d, SETTINGS_FILE)
except Exception:
pass
def __enter__(self):
"""
Support for "with" context.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Support for "with" context.
"""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET", mp_decode=True):
response = None
url = self.preamble + sub_url
try:
if method == "POST":
response = self.session.post(url, data=payload, verify=True)
else:
response = self.session.get(url, params=payload, verify=True)
if response.status_code in [200, 400]:
if mp_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
raise MPRestError(data["error"])
raise MPRestError("REST query returned with error status code {}".format(response.status_code))
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), response.content) if hasattr(response, "content") else str(ex)
raise MPRestError(msg)
def get_database_version(self):
"""
The Materials Project database is periodically updated and has a
database version associated with it. When the database is updated,
consolidated data (information about "a material") may and does
change, while calculation data about a specific calculation task
remains unchanged and available for querying via its task_id.
The database version is set as a date in the format YYYY-MM-DD,
where "-DD" may be optional. An additional numerical suffix
might be added if multiple releases happen on the same day.
Returns: database version as a string
"""
d = self._make_request("/api_check")
return d["version"]["db"]
def get_materials_id_from_task_id(self, task_id):
"""
Returns a new MP materials id from a task id (which can be
equivalent to an old materials id)
Args:
task_id (str): A task id.
Returns:
materials_id (str)
"""
return self._make_request("/materials/mid_from_tid/%s" % task_id)
def get_materials_id_references(self, material_id):
"""
Returns all references for a materials id.
Args:
material_id (str): A material id.
Returns:
BibTeX (str)
"""
return self._make_request("/materials/%s/refs" % material_id)
def get_data(self, chemsys_formula_id, data_type="vasp", prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
This is generally a call to
https://www.materialsproject.org/rest/v2/materials/vasp/<prop>.
See https://github.com/materialsproject/mapidoc for details.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
data_type (str): Type of data to return. Currently can either be
"vasp" or "exp".
prop (str): Property to be obtained. Should be one of the
MPRester.supported_task_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/materials/%s/%s" % (chemsys_formula_id, data_type)
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_materials_ids(self, chemsys_formula):
"""
Get all materials ids for a formula or chemsys.
Args:
chemsys_formula (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3).
Returns:
([str]) List of all materials ids.
"""
return self._make_request("/materials/%s/mids" % chemsys_formula, mp_decode=False)
def get_doc(self, materials_id):
"""
Get the entire data document for one materials id. Use this judiciously.
REST Endpoint: https://www.materialsproject.org/materials/<mp-id>/doc.
Args:
materials_id (str): E.g., mp-1143 for Al2O3
Returns:
Dict of json document of all data that is displayed on a materials
details page.
"""
return self._make_request("/materials/%s/doc" % materials_id, mp_decode=False)
def get_xas_data(self, material_id, absorbing_element):
"""
Get X-ray absorption spectroscopy data for absorbing element in the
structure corresponding to a material_id. Only X-ray Absorption Near Edge
Structure (XANES) for K-edge is supported.
REST Endpoint:
https://www.materialsproject.org/materials/<mp-id>/xas/<absorbing_element>.
Args:
material_id (str): E.g., mp-1143 for Al2O3
absorbing_element (str): The absorbing element in the corresponding
structure. E.g., Al in Al2O3
"""
element_list = self.get_data(material_id, prop="elements")[0]["elements"]
if absorbing_element not in element_list:
raise ValueError(
"{} element not contained in corresponding structure with "
"mp_id: {}".format(absorbing_element, material_id)
)
data = self._make_request(
"/materials/{}/xas/{}".format(material_id, absorbing_element),
mp_decode=False,
)
return data[0]
def get_task_data(self, chemsys_formula_id, prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Unlike the :func:`get_data`_, this method queries the task collection
for specific run information.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
prop (str): Property to be obtained. Should be one of the
MPRester.supported_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/tasks/%s" % chemsys_formula_id
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_structures(self, chemsys_formula_id, final=True):
"""
Get a list of Structures corresponding to a chemical system, formula,
or materials_id.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
Returns:
List of Structure objects.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(chemsys_formula_id, prop=prop)
return [d[prop] for d in data]
def find_structure(self, filename_or_structure):
"""
Finds matching structures on the Materials Project site.
Args:
filename_or_structure: filename or Structure object
Returns:
A list of matching materials project ids for structure.
Raises:
MPRestError
"""
try:
if isinstance(filename_or_structure, str):
s = Structure.from_file(filename_or_structure)
elif isinstance(filename_or_structure, Structure):
s = filename_or_structure
else:
raise MPRestError("Provide filename or Structure object.")
payload = {"structure": json.dumps(s.as_dict(), cls=MontyEncoder)}
response = self.session.post("{}/find_structure".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_entries(
self,
chemsys_formula_id_criteria,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
sort_by_e_above_hull=False,
):
"""
Get a list of ComputedEntries or ComputedStructureEntries corresponding
to a chemical system, formula, or materials_id or full criteria.
Args:
chemsys_formula_id_criteria (str/dict): A chemical system
(e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id
(e.g., mp-1234) or full Mongo-style dict criteria.
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProject2020Compatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="initial",
ComputedStructureEntries with initial structures are returned.
Otherwise, ComputedStructureEntries with final structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
sort_by_e_above_hull (bool): Whether to sort the list of entries by
e_above_hull (will query e_above_hull as a property_data if True).
Returns:
List of ComputedEntry or ComputedStructureEntry objects.
"""
# TODO: This is a very hackish way of doing this. It should be fixed
# on the REST end.
params = [
"run_type",
"is_hubbard",
"pseudo_potential",
"hubbards",
"potcar_symbols",
"oxide_type",
]
props = ["energy", "unit_cell_formula", "task_id"] + params
if sort_by_e_above_hull:
if property_data and "e_above_hull" not in property_data:
property_data.append("e_above_hull")
elif not property_data:
property_data = ["e_above_hull"]
if property_data:
props += property_data
if inc_structure:
if inc_structure == "initial":
props.append("initial_structure")
else:
props.append("structure")
if not isinstance(chemsys_formula_id_criteria, dict):
criteria = MPRester.parse_criteria(chemsys_formula_id_criteria)
else:
criteria = chemsys_formula_id_criteria
data = self.query(criteria, props)
entries = []
for d in data:
d["potcar_symbols"] = [
"%s %s" % (d["pseudo_potential"]["functional"], l) for l in d["pseudo_potential"]["labels"]
]
data = {"oxide_type": d["oxide_type"]}
if property_data:
data.update({k: d[k] for k in property_data})
if not inc_structure:
e = ComputedEntry(
d["unit_cell_formula"],
d["energy"],
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"],
)
else:
prim = d["initial_structure"] if inc_structure == "initial" else d["structure"]
if conventional_unit_cell:
s = SpacegroupAnalyzer(prim).get_conventional_standard_structure()
energy = d["energy"] * (len(s) / len(prim))
else:
s = prim.copy()
energy = d["energy"]
e = ComputedStructureEntry(
s,
energy,
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"],
)
entries.append(e)
if compatible_only:
from pymatgen.entries.compatibility import MaterialsProject2020Compatibility
# suppress the warning about missing oxidation states
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Failed to guess oxidation states.*")
entries = MaterialsProject2020Compatibility().process_entries(entries, clean=True)
if sort_by_e_above_hull:
entries = sorted(entries, key=lambda entry: entry.data["e_above_hull"])
return entries
def get_pourbaix_entries(self, chemsys, solid_compat="MaterialsProject2020Compatibility"):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys (str or [str]): Chemical system string comprising element
symbols separated by dashes, e.g., "Li-Fe-O" or List of element
symbols, e.g., ["Li", "Fe", "O"].
solid_compat: Compatiblity scheme used to pre-process solid DFT energies prior to applying aqueous
energy adjustments. May be passed as a class (e.g. MaterialsProject2020Compatibility) or an instance
(e.g., MaterialsProject2020Compatibility()). If None, solid DFT energies are used as-is.
Default: MaterialsProject2020Compatibility
"""
# imports are not top-level due to expense
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.pourbaix_diagram import IonEntry, PourbaixEntry
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import (
Compatibility,
MaterialsProjectAqueousCompatibility,
MaterialsProject2020Compatibility,
MaterialsProjectCompatibility,
)
if solid_compat == "MaterialsProjectCompatibility":
self.solid_compat = MaterialsProjectCompatibility()
elif solid_compat == "MaterialsProject2020Compatibility":
self.solid_compat = MaterialsProject2020Compatibility()
elif isinstance(solid_compat, Compatibility):
self.solid_compat = solid_compat
else:
raise ValueError(
"Solid compatibility can only be 'MaterialsProjectCompatibility', "
"'MaterialsProject2020Compatibility', or an instance of a Compatability class"
)
pbx_entries = []
if isinstance(chemsys, str):
chemsys = chemsys.split("-")
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = "/pourbaix_diagram/reference_data/" + "-".join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d["Reference Solid"]) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ["O", "H"])),
property_data=["e_above_hull"],
compatible_only=False,
)
# suppress the warning about supplying the required energies; they will be calculated from the
# entries we get from MPRester
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="You did not provide the required O2 and H2O energies.",
)
compat = MaterialsProjectAqueousCompatibility(solid_compat=self.solid_compat)
# suppress the warning about missing oxidation states
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Failed to guess oxidation states.*")
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion = Ion.from_formula(i_d["Name"])
refs = [e for e in ion_ref_entries if e.composition.reduced_formula == i_d["Reference Solid"]]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data["e_above_hull"])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) - i_d["Reference solid energy"] * rf
elt = i_d["Major_Elements"][0]
correction_factor = ion.composition[elt] / stable_ref.composition[elt]
energy = i_d["Energy"] + solid_diff * correction_factor
ion_entry = IonEntry(ion, energy)
pbx_entries.append(PourbaixEntry(ion_entry, "ion-{}".format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} - {Element("H"), Element("O")}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element("H"), Element("O")} or extra_elts.intersection(entry_elts)):
# Create new computed entry
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = ComputedEntry(entry.composition, form_e, entry_id=entry.entry_id)
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries
def get_structure_by_material_id(self, material_id, final=True, conventional_unit_cell=False):
"""
Get a Structure corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
Structure object.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(material_id, prop=prop)
if not data:
try:
new_material_id = self.get_materials_id_from_task_id(material_id)
if new_material_id:
warnings.warn(
"The calculation task {} is mapped to canonical mp-id {}, "
"so structure for {} returned. "
"This is not an error, see documentation. "
"If original task data for {} is required, "
"use get_task_data(). To find the canonical mp-id from a task id "
"use get_materials_id_from_task_id().".format(
material_id, new_material_id, new_material_id, material_id
)
)
return self.get_structure_by_material_id(new_material_id)
except MPRestError:
raise MPRestError(
"material_id {} unknown, if this seems like "
"an error please let us know at "
"matsci.org/materials-project".format(material_id)
)
if conventional_unit_cell:
data[0][prop] = SpacegroupAnalyzer(data[0][prop]).get_conventional_standard_structure()
return data[0][prop]
def get_entry_by_material_id(
self,
material_id,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
):
"""
Get a ComputedEntry corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProject2020Compatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
ComputedEntry or ComputedStructureEntry object.
"""
data = self.get_entries(
material_id,
compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell,
)
return data[0]
def get_dos_by_material_id(self, material_id):
"""
Get a Dos corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/dos
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
Returns:
A Dos object.
"""
data = self.get_data(material_id, prop="dos")
return data[0]["dos"]
def get_bandstructure_by_material_id(self, material_id, line_mode=True):
"""
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
"""
prop = "bandstructure" if line_mode else "bandstructure_uniform"
data = self.get_data(material_id, prop=prop)
return data[0][prop]
def get_phonon_dos_by_material_id(self, material_id):
"""
Get phonon density of states data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
CompletePhononDos: A phonon DOS object.
"""
return self._make_request("/materials/{}/phonondos".format(material_id))
def get_phonon_bandstructure_by_material_id(self, material_id):
"""
Get phonon dispersion data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
PhononBandStructureSymmLine: A phonon band structure.
"""
return self._make_request("/materials/{}/phononbs".format(material_id))
def get_phonon_ddb_by_material_id(self, material_id):
"""
Get ABINIT Derivative Data Base (DDB) output for phonon calculations.
Args:
material_id (str): Materials Project material_id.
Returns:
str: ABINIT DDB file as a string.
"""
return self._make_request("/materials/{}/abinit_ddb".format(material_id))
def get_entries_in_chemsys(
self,
elements,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
):
"""
Helper method to get a list of ComputedEntries in a chemical system.
For example, elements = ["Li", "Fe", "O"] will return a list of all
entries in the Li-Fe-O chemical system, i.e., all LixOy,
FexOy, LixFey, LixFeyOz, Li, Fe and O phases. Extremely useful for
creating phase diagrams of entire chemical systems.
Args:
elements (str or [str]): Chemical system string comprising element
symbols separated by dashes, e.g., "Li-Fe-O" or List of element
symbols, e.g., ["Li", "Fe", "O"].
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProject2020Compatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
List of ComputedEntries.
"""
if isinstance(elements, str):
elements = elements.split("-")
all_chemsyses = []
for i in range(len(elements)):
for els in itertools.combinations(elements, i + 1):
all_chemsyses.append("-".join(sorted(els)))
entries = self.get_entries(
{"chemsys": {"$in": all_chemsyses}},
compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell,
)
return entries
def get_exp_thermo_data(self, formula):
"""
Get a list of ThermoData objects associated with a formula using the
Materials Project REST interface.
Args:
formula (str): A formula to search for.
Returns:
List of ThermoData objects.
"""
return self.get_data(formula, data_type="exp")
def get_exp_entry(self, formula):
"""
Returns an ExpEntry object, which is the experimental equivalent of a
ComputedEntry and can be used for analyses using experimental data.
Args:
formula (str): A formula to search for.
Returns:
An ExpEntry object.
"""
return ExpEntry(Composition(formula), self.get_exp_thermo_data(formula))
def query(
self,
criteria,
properties,
chunk_size=500,
max_tries_per_chunk=5,
mp_decode=True,
):
r"""
Performs an advanced query using MongoDB-like syntax for directly
querying the Materials Project database. This allows one to perform
queries which are otherwise too cumbersome to perform using the standard
convenience methods.
Please consult the Materials API documentation at
https://github.com/materialsproject/mapidoc, which provides a
comprehensive explanation of the document schema used in the Materials
Project (supported criteria and properties) and guidance on how best to
query for the relevant information you need.
For queries that request data on more than CHUNK_SIZE materials at once,
this method will chunk a query by first retrieving a list of material
IDs that satisfy CRITERIA, and then merging the criteria with a
restriction to one chunk of materials at a time of size CHUNK_SIZE. You
can opt out of this behavior by setting CHUNK_SIZE=0. To guard against
intermittent server errors in the case of many chunks per query,
possibly-transient server errors will result in re-trying a give chunk
up to MAX_TRIES_PER_CHUNK times.
Args:
criteria (str/dict): Criteria of the query as a string or
mongo-style dict.
If string, it supports a powerful but simple string criteria.
E.g., "Fe2O3" means search for materials with reduced_formula
Fe2O3. Wild cards are also supported. E.g., "\\*2O" means get
all materials whose formula can be formed as \\*2O, e.g.,
Li2O, K2O, etc.
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g. "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Using a full dict syntax, even more powerful queries can be
constructed. For example, {"elements":{"$in":["Li",
"Na", "K"], "$all": ["O"]}, "nelements":2} selects all Li, Na
and K oxides. {"band_gap": {"$gt": 1}} selects all materials
with band gaps greater than 1 eV.
properties (list): Properties to request for as a list. For
example, ["formula", "formation_energy_per_atom"] returns
the formula and formation energy per atom.
chunk_size (int): Number of materials for which to fetch data at a
time. More data-intensive properties may require smaller chunk
sizes. Use chunk_size=0 to force no chunking -- this is useful
when fetching only properties such as 'material_id'.
max_tries_per_chunk (int): How many times to re-try fetching a given
chunk when the server gives a 5xx error (e.g. a timeout error).
mp_decode (bool): Whether to do a decoding to a Pymatgen object
where possible. In some cases, it might be useful to just get
the raw python dict, i.e., set to False.
Returns:
List of results. E.g.,
[{u'formula': {u'O': 1, u'Li': 2.0}},
{u'formula': {u'Na': 2.0, u'O': 2.0}},
{u'formula': {u'K': 1, u'O': 3.0}},
...]
"""
if not isinstance(criteria, dict):
criteria = self.parse_criteria(criteria)
payload = {
"criteria": json.dumps(criteria),
"properties": json.dumps(properties),
}
if chunk_size == 0:
return self._make_request("/query", payload=payload, method="POST", mp_decode=mp_decode)
count_payload = payload.copy()
count_payload["options"] = json.dumps({"count_only": True})
num_results = self._make_request("/query", payload=count_payload, method="POST")
if num_results <= chunk_size:
return self._make_request("/query", payload=payload, method="POST", mp_decode=mp_decode)
data = []
mids = [d["material_id"] for d in self.query(criteria, ["material_id"], chunk_size=0)]
chunks = get_chunks(mids, size=chunk_size)
progress_bar = PBar(total=len(mids))
for chunk in chunks:
chunk_criteria = criteria.copy()
chunk_criteria.update({"material_id": {"$in": chunk}})
num_tries = 0
while num_tries < max_tries_per_chunk:
try:
data.extend(
self.query(
chunk_criteria,
properties,
chunk_size=0,
mp_decode=mp_decode,
)
)
break
except MPRestError as e:
# pylint: disable=E1101
match = re.search(r"error status code (\d+)", str(e))
if match:
if not match.group(1).startswith("5"):
raise e
num_tries += 1
print(
"Unknown server error. Trying again in five "
"seconds (will try at most {} times)...".format(max_tries_per_chunk)
)
sleep(5)
progress_bar.update(len(chunk))
return data
def submit_structures(
self,
structures,
authors,
projects=None,
references="",
remarks=None,
data=None,
histories=None,
created_at=None,
):
"""
Submits a list of structures to the Materials Project as SNL files.
The argument list mirrors the arguments for the StructureNL object,
except that a list of structures with the same metadata is used as an
input.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
structures: A list of Structure objects
authors (list): List of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
data ([dict]): A list of free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at (datetime): A datetime object
Returns:
A list of inserted submission ids.
"""
from pymatgen.util.provenance import StructureNL
snl_list = StructureNL.from_structures(
structures,
authors,
projects,
references,
remarks,
data,
histories,
created_at,
)
self.submit_snl(snl_list)
def submit_snl(self, snl):
"""
Submits a list of StructureNL to the Materials Project site.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl (StructureNL/[StructureNL]): A single StructureNL, or a list
of StructureNL objects
Returns:
A list of inserted submission ids.
Raises:
MPRestError
"""
try:
snl = snl if isinstance(snl, list) else [snl]
jsondata = [s.as_dict() for s in snl]
payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)}
response = self.session.post("{}/snl/submit".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["inserted_ids"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def delete_snl(self, snl_ids):
"""
Delete earlier submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl_ids: List of SNL ids.
Raises:
MPRestError
"""
try:
payload = {"ids": json.dumps(snl_ids)}
response = self.session.post("{}/snl/delete".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def query_snl(self, criteria):
"""
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
"""
try:
payload = {"criteria": json.dumps(criteria)}
response = self.session.post("{}/snl/query".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def submit_vasp_directory(
self,
rootdir,
authors,
projects=None,
references="",
remarks=None,
master_data=None,
master_history=None,
created_at=None,
ncpus=None,
):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
"""
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
drone = VaspToComputedEntryDrone(inc_structure=True, data=["filename", "initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
histories = []
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].as_dict(),
}
}
if "history" in e.parameters:
histories.append(e.parameters["history"])
if master_data is not None:
m.update(master_data)
metadata.append(m)
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures,
authors,
projects=projects,
references=references,
remarks=remarks,
data=metadata,
histories=histories,
created_at=created_at,
)
def get_stability(self, entries):
"""
Returns the stability of all entries.
"""
try:
payload = {"entries": json.dumps(entries, cls=MontyEncoder)}
response = self.session.post(
"{}/phase_diagram/calculate_stability".format(self.preamble),
data=payload,
)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el), mp_decode=False)[0]
isolated_atom_e_sum += e["output"]["final_energy_per_atom"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula / n if per_atom else ecoh_per_formula
def get_reaction(self, reactants, products):
"""
Gets a reaction from the Materials Project.
Args:
reactants ([str]): List of formulas
products ([str]): List of formulas
Returns:
rxn
"""
return self._make_request(
"/reaction",
payload={"reactants[]": reactants, "products[]": products},
mp_decode=False,
)
def get_substrates(self, material_id, number=50, orient=None):
"""
Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return
n=0 returns all available matches
Returns:
list of dicts with substrate matches
"""
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req)
def get_all_substrates(self):
"""
Gets the list of all possible substrates considered in the
Materials Project substrate database
Returns:
list of material_ids corresponding to possible substrates
"""
return self._make_request("/materials/all_substrate_ids")
def get_surface_data(self, material_id, miller_index=None, inc_structures=False):
"""
Gets surface data for a material. Useful for Wulff shapes.
Reference for surface data:
Tran, R., Xu, Z., Radhakrishnan, B., Winston, D., Sun, W., Persson, K.
A., & Ong, S. P. (2016). Data Descripter: Surface energies of elemental
crystals. Scientific Data, 3(160080), 1–13.
http://dx.doi.org/10.1038/sdata.2016.80
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
miller_index (list of integer): The miller index of the surface.
e.g., [3, 2, 1]. If miller_index is provided, only one dictionary
of this specific plane will be returned.
inc_structures (bool): Include final surface slab structures.
These are unnecessary for Wulff shape construction.
Returns:
Surface data for material. Energies are given in SI units (J/m^2).
"""
req = "/materials/{}/surfaces".format(material_id)
if inc_structures:
req += "?include_structures=true"
if miller_index:
surf_data_dict = self._make_request(req)
surf_list = surf_data_dict["surfaces"]
ucell = self.get_structure_by_material_id(material_id, conventional_unit_cell=True)
eq_indices = get_symmetrically_equivalent_miller_indices(ucell, miller_index)
for one_surf in surf_list:
if tuple(one_surf["miller_index"]) in eq_indices:
return one_surf
raise ValueError("Bad miller index.")
return self._make_request(req)
def get_wulff_shape(self, material_id):
"""
Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape
"""
from pymatgen.analysis.wulff import WulffShape
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = SpacegroupAnalyzer(structure).get_conventional_standard_structure().lattice
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
# Prefer reconstructed surfaces, which have lower surface energies.
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies)
def get_gb_data(
self,
material_id=None,
pretty_formula=None,
chemsys=None,
sigma=None,
gb_plane=None,
rotation_axis=None,
include_work_of_separation=False,
):
"""
Gets grain boundary data for a material.
Args:
material_id (str): Materials Project material_id, e.g., 'mp-129'.
pretty_formula (str): The formula of metals. e.g., 'Fe'
sigma(int): The sigma value of a certain type of grain boundary
gb_plane(list of integer): The Miller index of grain
boundary plane. e.g., [1, 1, 1]
rotation_axis(list of integer): The Miller index of rotation
axis. e.g., [1, 0, 0], [1, 1, 0], and [1, 1, 1]
Sigma value is determined by the combination of rotation axis and
rotation angle. The five degrees of freedom (DOF) of one grain boundary
include: rotation axis (2 DOFs), rotation angle (1 DOF), and grain
boundary plane (2 DOFs).
include_work_of_separation (bool): whether to include the work of separation
(in unit of (J/m^2)). If you want to query the work of separation, please
specify the material_id.
Returns:
A list of grain boundaries that satisfy the query conditions (sigma, gb_plane).
Energies are given in SI units (J/m^2).
"""
if gb_plane:
gb_plane = ",".join([str(i) for i in gb_plane])
if rotation_axis:
rotation_axis = ",".join([str(i) for i in rotation_axis])
payload = {
"material_id": material_id,
"pretty_formula": pretty_formula,
"chemsys": chemsys,
"sigma": sigma,
"gb_plane": gb_plane,
"rotation_axis": rotation_axis,
}
if include_work_of_separation and material_id:
list_of_gbs = self._make_request("/grain_boundaries", payload=payload)
for i, gb_dict in enumerate(list_of_gbs):
gb_energy = gb_dict["gb_energy"]
gb_plane_int = gb_dict["gb_plane"]
surface_energy = self.get_surface_data(material_id=material_id, miller_index=gb_plane_int)[
"surface_energy"
]
wsep = 2 * surface_energy - gb_energy # calculate the work of separation
gb_dict["work_of_separation"] = wsep
return list_of_gbs
return self._make_request("/grain_boundaries", payload=payload)
def get_interface_reactions(
self,
reactant1,
reactant2,
open_el=None,
relative_mu=None,
use_hull_energy=False,
):
"""
Gets critical reactions between two reactants.
Get critical reactions ("kinks" in the mixing ratio where
reaction products change) between two reactants. See the
`pymatgen.analysis.interface_reactions` module for more info.
Args:
reactant1 (str): Chemical formula for reactant
reactant2 (str): Chemical formula for reactant
open_el (str): Element in reservoir available to system
relative_mu (float): Relative chemical potential of element in
reservoir with respect to pure substance. Must be non-positive.
use_hull_energy (bool): Whether to use the convex hull energy for a
given composition for the reaction energy calculation. If false,
the energy of the ground state structure will be preferred; if a
ground state can not be found for a composition, the convex hull
energy will be used with a warning message.
Returns:
list: list of dicts of form {ratio,energy,rxn} where `ratio` is the
reactant mixing ratio, `energy` is the reaction energy
in eV/atom, and `rxn` is a
`pymatgen.analysis.reaction_calculator.Reaction`.
"""
payload = {
"reactants": " ".join([reactant1, reactant2]),
"open_el": open_el,
"relative_mu": relative_mu,
"use_hull_energy": use_hull_energy,
}
return self._make_request("/interface_reactions", payload=payload, method="POST")
def get_download_info(self, material_ids, task_types=None, file_patterns=None):
"""
get a list of URLs to retrieve raw VASP output files from the NoMaD repository
Args:
material_ids (list): list of material identifiers (mp-id's)
task_types (list): list of task types to include in download (see TaskType Enum class)
file_patterns (list): list of wildcard file names to include for each task
Returns:
a tuple of 1) a dictionary mapping material_ids to task_ids and
task_types, and 2) a list of URLs to download zip archives from
NoMaD repository. Each zip archive will contain a manifest.json with
metadata info, e.g. the task/external_ids that belong to a directory
"""
# task_id's correspond to NoMaD external_id's
task_types = [t.value for t in task_types if isinstance(t, TaskType)] if task_types else []
meta = {}
for doc in self.query({"task_id": {"$in": material_ids}}, ["task_id", "blessed_tasks"]):
for task_type, task_id in doc["blessed_tasks"].items():
if task_types and task_type not in task_types:
continue
mp_id = doc["task_id"]
if meta.get(mp_id) is None:
meta[mp_id] = [{"task_id": task_id, "task_type": task_type}]
else:
meta[mp_id].append({"task_id": task_id, "task_type": task_type})
if not meta:
raise ValueError(f"No tasks found for material id {material_ids}.")
# return a list of URLs for NoMaD Downloads containing the list of files
# for every external_id in `task_ids`
# For reference, please visit https://nomad-lab.eu/prod/rae/api/
# check if these task ids exist on NOMAD
prefix = "https://nomad-lab.eu/prod/rae/api/repo/?"
if file_patterns is not None:
for file_pattern in file_patterns:
prefix += f"file_pattern={file_pattern}&"
prefix += "external_id="
task_ids = [t["task_id"] for tl in meta.values() for t in tl]
nomad_exist_task_ids = self._check_get_download_info_url_by_task_id(prefix=prefix, task_ids=task_ids)
if len(nomad_exist_task_ids) != len(task_ids):
self._print_help_message(nomad_exist_task_ids, task_ids, file_patterns, task_types)
# generate download links for those that exist
prefix = "https://nomad-lab.eu/prod/rae/api/raw/query?"
if file_patterns is not None:
for file_pattern in file_patterns:
prefix += f"file_pattern={file_pattern}&"
prefix += "external_id="
urls = [prefix + tids for tids in nomad_exist_task_ids]
return meta, urls
@staticmethod
def _print_help_message(nomad_exist_task_ids, task_ids, file_patterns, task_types):
non_exist_ids = set(task_ids) - set(nomad_exist_task_ids)
warnings.warn(
f"For file patterns [{file_patterns}] and task_types [{task_types}], \n"
f"the following ids are not found on NOMAD [{list(non_exist_ids)}]. \n"
f"If you need to upload them, please contact Patrick Huck at phuck@lbl.gov"
)
def _check_get_download_info_url_by_task_id(self, prefix, task_ids) -> List[str]:
nomad_exist_task_ids: List[str] = []
prefix = prefix.replace("/raw/query", "/repo/")
for task_id in task_ids:
url = prefix + task_id
if self._check_nomad_exist(url):
nomad_exist_task_ids.append(task_id)
return nomad_exist_task_ids
@staticmethod
def _check_nomad_exist(url) -> bool:
response = requests.get(url=url)
if response.status_code != 200:
return False
content = json.loads(response.text)
if content["pagination"]["total"] == 0:
return False
return True
@staticmethod
def parse_criteria(criteria_string):
"""
Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria.
Args:
criteria_string (str): A string representing a search criteria.
Also supports wild cards. E.g.,
something like "*2O" gets converted to
{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u"Li2O", ...]}}
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g., "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Returns:
A mongo query dict.
"""
toks = criteria_string.split()
def parse_sym(sym):
if sym == "*":
return [el.symbol for el in Element]
m = re.match(r"\{(.*)\}", sym)
if m:
return [s.strip() for s in m.group(1).split(",")]
return [sym]
def parse_tok(t):
if re.match(r"\w+-\d+", t):
return {"task_id": t}
if "-" in t:
elements = [parse_sym(sym) for sym in t.split("-")]
chemsyss = []
for cs in itertools.product(*elements):
if len(set(cs)) == len(cs):
# Check for valid symbols
cs = [Element(s).symbol for s in cs]
chemsyss.append("-".join(sorted(cs)))
return {"chemsys": {"$in": chemsyss}}
all_formulas = set()
explicit_els = []
wild_card_els = []
for sym in re.findall(r"(\*[\.\d]*|\{.*\}[\.\d]*|[A-Z][a-z]*)[\.\d]*", t):
if ("*" in sym) or ("{" in sym):
wild_card_els.append(sym)
else:
m = re.match(r"([A-Z][a-z]*)[\.\d]*", sym)
explicit_els.append(m.group(1))
nelements = len(wild_card_els) + len(set(explicit_els))
parts = re.split(r"(\*|\{.*\})", t)
parts = [parse_sym(s) for s in parts if s != ""]
for f in itertools.product(*parts):
c = Composition("".join(f))
if len(c) == nelements:
# Check for valid Elements in keys.
for e in c.keys():
Element(e.symbol)
all_formulas.add(c.reduced_formula)
return {"pretty_formula": {"$in": list(all_formulas)}}
if len(toks) == 1:
return parse_tok(toks[0])
return {"$or": list(map(parse_tok, toks))}
class MPRestError(Exception):
"""
Exception class for MPRestAdaptor.
Raised when the query has problems, e.g., bad query format.
"""
pass
| 40.954759 | 120 | 0.593659 |
73e70175f6717da73eee89411870e07cd2ce6039 | 792 | py | Python | Part2/hash_table.py | ParashRahman/Database-Project | 3a934d82289b58dcf83497b658970b2d336c1fba | [
"Apache-2.0"
] | null | null | null | Part2/hash_table.py | ParashRahman/Database-Project | 3a934d82289b58dcf83497b658970b2d336c1fba | [
"Apache-2.0"
] | null | null | null | Part2/hash_table.py | ParashRahman/Database-Project | 3a934d82289b58dcf83497b658970b2d336c1fba | [
"Apache-2.0"
] | null | null | null | import bsddb
from DB import DB
# HashTable (subclass of DB)
class HashTable(DB):
def __init__(self, db_address):
# Database="Hash_table.db"
# self.address=db.address #contains address to db file's location
self.db = bsddb.hashopen(db_address, 'c')
DB.__init__(self, self.db, db_address)
return
# Traverses entire database and checks if key
# is within constraints.
def retrieve_range( self, low_key, high_key ):
ret = []
current = self.db.first()
while ( True ):
if ( low_key <= current[0] and high_key >= current[0] ):
ret.append( current )
try:
current = self.db.next()
except KeyError:
break
return ret
| 24.75 | 73 | 0.564394 |
73e73809f88ac0ca5abf35d40d4f5139e3b0155e | 1,486 | py | Python | raiden/tests/unit/storage/test_serialization.py | ExchangeUnion/raiden | 2217bcb698fcfce3499dc1f41ad919ed82e8e45f | [
"MIT"
] | null | null | null | raiden/tests/unit/storage/test_serialization.py | ExchangeUnion/raiden | 2217bcb698fcfce3499dc1f41ad919ed82e8e45f | [
"MIT"
] | 12 | 2019-08-09T19:12:17.000Z | 2019-12-05T15:49:29.000Z | raiden/tests/unit/storage/test_serialization.py | ExchangeUnion/raiden | 2217bcb698fcfce3499dc1f41ad919ed82e8e45f | [
"MIT"
] | null | null | null | import pytest
from raiden.storage.serialization.fields import (
BytesField,
OptionalIntegerToStringField,
QueueIdentifierField,
)
from raiden.tests.utils import factories
from raiden.transfer.identifiers import QueueIdentifier
def assert_roundtrip(field, value):
serialized = field._serialize(value, None, None)
assert field._deserialize(serialized, None, None) == value
@pytest.fixture()
def queue_identifier():
return QueueIdentifier(
recipient=factories.make_address(),
canonical_identifier=factories.make_canonical_identifier(),
)
def test_queue_identifier_field_roundtrip(queue_identifier):
assert_roundtrip(QueueIdentifierField(), queue_identifier)
def test_queue_identifier_field_invalid_inputs(queue_identifier):
serialized = QueueIdentifierField()._serialize(queue_identifier, None, None)
wrong_delimiter = serialized.replace("|", ":")
# TODO check for address and chain/channel id validity in QueueIdentifier too, add tests here
for string in (wrong_delimiter,):
with pytest.raises(ValueError):
QueueIdentifierField()._deserialize(string, None, None)
def test_optional_integer_to_string_field_roundtrip():
field = OptionalIntegerToStringField()
assert_roundtrip(field, 42)
assert_roundtrip(field, None)
def test_bytes_field_roundtrip():
field = BytesField()
assert_roundtrip(field, b"foo")
assert_roundtrip(field, b"")
assert_roundtrip(field, None)
| 29.137255 | 97 | 0.76245 |
73e7398c67ff139f2c9f71344e23bf8c64ce19bc | 4,424 | py | Python | SciPy2016/MTwork/inv3d_HPK1/run4/findDiam_inversion.py | simpeg/simpegExamples | 38b8064fb854d809f72b7f1ca8b8096bca696af1 | [
"MIT"
] | 1 | 2021-08-07T13:46:54.000Z | 2021-08-07T13:46:54.000Z | SciPy2016/MTwork/inv3d_HPK1/run4/findDiam_inversion.py | simpeg/simpegExamples | 38b8064fb854d809f72b7f1ca8b8096bca696af1 | [
"MIT"
] | 1 | 2016-07-27T22:20:36.000Z | 2016-07-27T22:20:36.000Z | SciPy2016/MTwork/inv3d_HPK1/run4/findDiam_inversion.py | simpeg/simpegExamples | 38b8064fb854d809f72b7f1ca8b8096bca696af1 | [
"MIT"
] | null | null | null | # Import modules
import numpy as np, sys, os, time, gzip, cPickle as pickle, scipy
sys.path.append('/tera_raid/gudni/gitCodes/simpeg')
sys.path.append('/tera_raid/gudni')
from pymatsolver import MumpsSolver
import SimPEG as simpeg
from SimPEG import NSEM
from numpy.lib import recfunctions as rFunc
# Function to get data and data info
def getDataInfo(MTdata):
dL, freqL, rxTL = [], [], []
for src in MTdata.survey.srcList:
for rx in src.rxList:
dL.append(MTdata[src,rx])
freqL.append(np.ones(rx.nD)*src.freq)
rxTL.extend( ((rx.rxType+' ')*rx.nD).split())
return np.concatenate(dL), np.concatenate(freqL), np.array(rxTL)
# Script to read MT data and run an inversion.
# Load the data
drecAll = np.load('../../MTdataStArr_nsmesh_HKPK1.npy')
# Select larger frequency band for the MT data
indMTFreq = np.sum( [drecAll['freq'] == val for val in np.unique(drecAll['freq'])[5::]] ,axis=0,dtype=bool)
mtRecArr = drecAll[indMTFreq][['freq','x','y','z','tzx','tzy']]
dUse = NSEM.Data.fromRecArray(mtRecArr)
# Extract to survey
survey = dUse.survey
# # Add noise to the data
dobs, freqArr, rxT = getDataInfo(dUse)
# Set the data
survey.dobs = dobs
# Assign std based on- and off-diagonal parts of the impedance tensor
std = np.ones_like(dobs)*.025 # 5% on all off-diagonals
# std[np.array([ ('xx' in l or 'yy' in l) for l in rxT])] = 0.15 # 15% on the on-diagonal
survey.std = np.abs(survey.dobs*std) #+ 0.01*np.linalg.norm(survey.dobs) #survey.dobs*0 + std
# Estimate a floor for the data.
# Use the 10% of the mean of the off-diagonals for each frequency
floor = np.zeros_like(dobs)
offind = np.array([('zxy' in l or 'zyx' in l) for l in rxT],bool)
onind = np.array([('zxx' in l or 'zyy' in l) for l in rxT],bool)
tipind = np.array([('tzx' in l or 'tzy' in l) for l in rxT],bool)
for f in np.unique(freqArr):
freqInd = freqArr == f
floorFreq = floor[freqInd]
offD = np.sort(np.abs(dobs[freqInd*offind]))
floor[freqInd] = 0.0001*np.mean(offD)
onD = np.sort(np.abs(dobs[freqInd*onind]))
floor[freqInd*onind] = 0.1*np.mean(onD)
# Constant floor for the tipper.
floor[freqInd*tipind] = 0.001
# Assign the data weight
Wd = 1./(survey.std + floor)
# Make the mesh
mesh, modDict = simpeg.Mesh.TensorMesh.readVTK('../../nsmesh_HPVK1_inv.vtr')
sigma = modDict['S/m']
# Make the mapping
active = sigma > 9.999e-7
actMap = simpeg.Maps.ActiveCells(mesh, active, np.log(1e-8), nC=mesh.nC)
mappingExpAct = simpeg.Maps.ExpMap(mesh) * actMap
# sigma1d = 1e-8 * mesh.vectorCCz
# sigma1d[mesh.vectorCCz < 750] = 1e-2
sigmaBG = np.ones_like(sigma)*1e-8
sigmaBG[active] = 1e-4
sigma1d = mesh.r(sigmaBG,'CC','CC','M')[0,0,:]
# Make teh starting model
m_0 = np.log(sigmaBG[active])
## Setup the problem object
problem = NSEM.Problem3D_ePrimSec(mesh,mapping=mappingExpAct,sigmaPrimary = sigma1d)
problem.verbose = True
# Change the solver
problem.Solver = MumpsSolver
problem.pair(survey)
## Setup the inversion proceedure
C = simpeg.Utils.Counter()
# Set the optimization
# opt = simpeg.Optimization.ProjectedGNCG(maxIter = 50)
# opt.lower = np.log(1e-5) # Set bounds to
# opt.upper = np.log(10)
opt = simpeg.Optimization.InexactGaussNewton(maxIter = 36)
opt.counter = C
opt.LSshorten = 0.5
opt.remember('xc')
# Need to add to the number of iter per beta.
# Data misfit
dmis = simpeg.DataMisfit.l2_DataMisfit(survey)
dmis.Wd = Wd
# Regularization
# regMap = simpeg.Maps.InjectActiveCellsTopo(mesh,active) # valInactive=
reg = simpeg.Regularization.Tikhonov(mesh,indActive=active)
reg.mref = m_0
reg.alpha_s = 1e-7
reg.alpha_x = 1.
reg.alpha_y = 1.
reg.alpha_z = 1.
reg.alpha_xx = 0.
reg.alpha_yy = 0.
reg.alpha_zz = 0.
# Inversion problem
invProb = simpeg.InvProblem.BaseInvProblem(dmis, reg, opt)
invProb.counter = C
# Beta cooling
beta = simpeg.Directives.BetaSchedule()
beta.coolingRate = 3 # Number of beta iterations
beta.coolingFactor = 8.
betaest = simpeg.Directives.BetaEstimate_ByEig(beta0_ratio=1000.)
# invProb.beta = 1e6.
targmis = simpeg.Directives.TargetMisfit()
targmis.target = 0.5 * survey.nD
# saveModel = simpeg.Directives.SaveModelEveryIteration()
saveDict = simpeg.Directives.SaveOutputDictEveryIteration()
# Create an inversion object
inv = simpeg.Inversion.BaseInversion(invProb, directiveList=[beta,targmis,saveDict])
# Print
print 'Target Misfit is: {:.1f}'.format(targmis.target)
# Run the inversion
mopt = inv.run(m_0) | 33.263158 | 108 | 0.711799 |
73e75ef8295e6d9a5a07f2246991a25d1fca8903 | 262 | py | Python | file_utils.py | andrewlavaia/ParticleSimulation | afa1eaa95b68c1baaa607d9b0ff0f39717ad8e8b | [
"MIT"
] | null | null | null | file_utils.py | andrewlavaia/ParticleSimulation | afa1eaa95b68c1baaa607d9b0ff0f39717ad8e8b | [
"MIT"
] | null | null | null | file_utils.py | andrewlavaia/ParticleSimulation | afa1eaa95b68c1baaa607d9b0ff0f39717ad8e8b | [
"MIT"
] | null | null | null | import yaml
def load_config(file_string):
with open(file_string) as f:
dataMap = yaml.safe_load(f)
return dataMap
def set_config(data):
with open('config.yml', 'w') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False) | 26.2 | 63 | 0.694656 |
73e7877030afcf43c322e9a3e8a43a7ac0251a0a | 30,890 | py | Python | appengine/findit/first_party/logdog/annotations_pb2.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | appengine/findit/first_party/logdog/annotations_pb2.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | appengine/findit/first_party/logdog/annotations_pb2.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: annotations.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='annotations.proto',
package='milo',
syntax='proto3',
serialized_pb=_b('\n\x11\x61nnotations.proto\x12\x04milo\x1a\x1fgoogle/protobuf/timestamp.proto\"\xd8\x01\n\x0e\x46\x61ilureDetails\x12\'\n\x04type\x18\x01 \x01(\x0e\x32\x19.milo.FailureDetails.Type\x12\x0c\n\x04text\x18\x02 \x01(\t\x12*\n\x14\x66\x61iled_dm_dependency\x18\x03 \x03(\x0b\x32\x0c.milo.DMLink\"c\n\x04Type\x12\x0b\n\x07GENERAL\x10\x00\x12\r\n\tEXCEPTION\x10\x01\x12\t\n\x05INFRA\x10\x02\x12\x18\n\x14\x44M_DEPENDENCY_FAILED\x10\x03\x12\r\n\tCANCELLED\x10\x04\x12\x0b\n\x07\x45XPIRED\x10\x05\"\xbb\x06\n\x04Step\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x07\x63ommand\x18\x02 \x01(\x0b\x32\x12.milo.Step.Command\x12\x1c\n\x06status\x18\x03 \x01(\x0e\x32\x0c.milo.Status\x12-\n\x0f\x66\x61ilure_details\x18\x04 \x01(\x0b\x32\x14.milo.FailureDetails\x12#\n\x07substep\x18\x05 \x03(\x0b\x32\x12.milo.Step.Substep\x12)\n\rstdout_stream\x18\x06 \x01(\x0b\x32\x12.milo.LogdogStream\x12)\n\rstderr_stream\x18\x07 \x01(\x0b\x32\x12.milo.LogdogStream\x12+\n\x07started\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x05\x65nded\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04text\x18\x14 \x03(\t\x12%\n\x08progress\x18\x15 \x01(\x0b\x32\x13.milo.Step.Progress\x12\x18\n\x04link\x18\x16 \x01(\x0b\x32\n.milo.Link\x12\x1f\n\x0bother_links\x18\x17 \x03(\x0b\x32\n.milo.Link\x12%\n\x08property\x18\x18 \x03(\x0b\x32\x13.milo.Step.Property\x1a\x8e\x01\n\x07\x43ommand\x12\x14\n\x0c\x63ommand_line\x18\x01 \x03(\t\x12\x0b\n\x03\x63wd\x18\x02 \x01(\t\x12\x30\n\x07\x65nviron\x18\x03 \x03(\x0b\x32\x1f.milo.Step.Command.EnvironEntry\x1a.\n\x0c\x45nvironEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x61\n\x07Substep\x12\x1a\n\x04step\x18\x01 \x01(\x0b\x32\n.milo.StepH\x00\x12/\n\x11\x61nnotation_stream\x18\x02 \x01(\x0b\x32\x12.milo.LogdogStreamH\x00\x42\t\n\x07substep\x1a,\n\x08Progress\x12\r\n\x05total\x18\x01 \x01(\x05\x12\x11\n\tcompleted\x18\x02 \x01(\x05\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xbf\x01\n\x04Link\x12\r\n\x05label\x18\x01 \x01(\t\x12\x13\n\x0b\x61lias_label\x18\x02 \x01(\t\x12\r\n\x03url\x18\x03 \x01(\tH\x00\x12+\n\rlogdog_stream\x18\x04 \x01(\x0b\x32\x12.milo.LogdogStreamH\x00\x12-\n\x0eisolate_object\x18\x05 \x01(\x0b\x32\x13.milo.IsolateObjectH\x00\x12\x1f\n\x07\x64m_link\x18\x06 \x01(\x0b\x32\x0c.milo.DMLinkH\x00\x42\x07\n\x05value\"<\n\x0cLogdogStream\x12\x0e\n\x06server\x18\x01 \x01(\t\x12\x0e\n\x06prefix\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\"-\n\rIsolateObject\x12\x0e\n\x06server\x18\x01 \x01(\t\x12\x0c\n\x04hash\x18\x02 \x01(\t\"K\n\x06\x44MLink\x12\x0e\n\x06server\x18\x01 \x01(\t\x12\r\n\x05quest\x18\x02 \x01(\t\x12\x0f\n\x07\x61ttempt\x18\x03 \x01(\x03\x12\x11\n\texecution\x18\x04 \x01(\x03*<\n\x06Status\x12\x0b\n\x07RUNNING\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x0b\n\x07\x46\x41ILURE\x10\x02\x12\x0b\n\x07PENDING\x10\x03\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='milo.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RUNNING', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1489,
serialized_end=1549,
)
_sym_db.RegisterEnumDescriptor(_STATUS)
Status = enum_type_wrapper.EnumTypeWrapper(_STATUS)
RUNNING = 0
SUCCESS = 1
FAILURE = 2
PENDING = 3
_FAILUREDETAILS_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='milo.FailureDetails.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='GENERAL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXCEPTION', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFRA', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DM_DEPENDENCY_FAILED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANCELLED', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXPIRED', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=178,
serialized_end=277,
)
_sym_db.RegisterEnumDescriptor(_FAILUREDETAILS_TYPE)
_FAILUREDETAILS = _descriptor.Descriptor(
name='FailureDetails',
full_name='milo.FailureDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='milo.FailureDetails.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='milo.FailureDetails.text', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_dm_dependency', full_name='milo.FailureDetails.failed_dm_dependency', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FAILUREDETAILS_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=277,
)
_STEP_COMMAND_ENVIRONENTRY = _descriptor.Descriptor(
name='EnvironEntry',
full_name='milo.Step.Command.EnvironEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='milo.Step.Command.EnvironEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='milo.Step.Command.EnvironEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=875,
serialized_end=921,
)
_STEP_COMMAND = _descriptor.Descriptor(
name='Command',
full_name='milo.Step.Command',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='command_line', full_name='milo.Step.Command.command_line', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cwd', full_name='milo.Step.Command.cwd', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='environ', full_name='milo.Step.Command.environ', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_STEP_COMMAND_ENVIRONENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=779,
serialized_end=921,
)
_STEP_SUBSTEP = _descriptor.Descriptor(
name='Substep',
full_name='milo.Step.Substep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step', full_name='milo.Step.Substep.step', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='annotation_stream', full_name='milo.Step.Substep.annotation_stream', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='substep', full_name='milo.Step.Substep.substep',
index=0, containing_type=None, fields=[]),
],
serialized_start=923,
serialized_end=1020,
)
_STEP_PROGRESS = _descriptor.Descriptor(
name='Progress',
full_name='milo.Step.Progress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='total', full_name='milo.Step.Progress.total', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='completed', full_name='milo.Step.Progress.completed', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1022,
serialized_end=1066,
)
_STEP_PROPERTY = _descriptor.Descriptor(
name='Property',
full_name='milo.Step.Property',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='milo.Step.Property.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='milo.Step.Property.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1068,
serialized_end=1107,
)
_STEP = _descriptor.Descriptor(
name='Step',
full_name='milo.Step',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='milo.Step.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='command', full_name='milo.Step.command', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='milo.Step.status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failure_details', full_name='milo.Step.failure_details', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='substep', full_name='milo.Step.substep', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stdout_stream', full_name='milo.Step.stdout_stream', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stderr_stream', full_name='milo.Step.stderr_stream', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='started', full_name='milo.Step.started', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ended', full_name='milo.Step.ended', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='milo.Step.text', index=9,
number=20, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='progress', full_name='milo.Step.progress', index=10,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='link', full_name='milo.Step.link', index=11,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='other_links', full_name='milo.Step.other_links', index=12,
number=23, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='property', full_name='milo.Step.property', index=13,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_STEP_COMMAND, _STEP_SUBSTEP, _STEP_PROGRESS, _STEP_PROPERTY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=280,
serialized_end=1107,
)
_LINK = _descriptor.Descriptor(
name='Link',
full_name='milo.Link',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='milo.Link.label', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alias_label', full_name='milo.Link.alias_label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url', full_name='milo.Link.url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='logdog_stream', full_name='milo.Link.logdog_stream', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isolate_object', full_name='milo.Link.isolate_object', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dm_link', full_name='milo.Link.dm_link', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='milo.Link.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=1110,
serialized_end=1301,
)
_LOGDOGSTREAM = _descriptor.Descriptor(
name='LogdogStream',
full_name='milo.LogdogStream',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='server', full_name='milo.LogdogStream.server', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefix', full_name='milo.LogdogStream.prefix', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='milo.LogdogStream.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1303,
serialized_end=1363,
)
_ISOLATEOBJECT = _descriptor.Descriptor(
name='IsolateObject',
full_name='milo.IsolateObject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='server', full_name='milo.IsolateObject.server', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hash', full_name='milo.IsolateObject.hash', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1365,
serialized_end=1410,
)
_DMLINK = _descriptor.Descriptor(
name='DMLink',
full_name='milo.DMLink',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='server', full_name='milo.DMLink.server', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quest', full_name='milo.DMLink.quest', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attempt', full_name='milo.DMLink.attempt', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='execution', full_name='milo.DMLink.execution', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1412,
serialized_end=1487,
)
_FAILUREDETAILS.fields_by_name['type'].enum_type = _FAILUREDETAILS_TYPE
_FAILUREDETAILS.fields_by_name['failed_dm_dependency'].message_type = _DMLINK
_FAILUREDETAILS_TYPE.containing_type = _FAILUREDETAILS
_STEP_COMMAND_ENVIRONENTRY.containing_type = _STEP_COMMAND
_STEP_COMMAND.fields_by_name['environ'].message_type = _STEP_COMMAND_ENVIRONENTRY
_STEP_COMMAND.containing_type = _STEP
_STEP_SUBSTEP.fields_by_name['step'].message_type = _STEP
_STEP_SUBSTEP.fields_by_name['annotation_stream'].message_type = _LOGDOGSTREAM
_STEP_SUBSTEP.containing_type = _STEP
_STEP_SUBSTEP.oneofs_by_name['substep'].fields.append(
_STEP_SUBSTEP.fields_by_name['step'])
_STEP_SUBSTEP.fields_by_name['step'].containing_oneof = _STEP_SUBSTEP.oneofs_by_name['substep']
_STEP_SUBSTEP.oneofs_by_name['substep'].fields.append(
_STEP_SUBSTEP.fields_by_name['annotation_stream'])
_STEP_SUBSTEP.fields_by_name['annotation_stream'].containing_oneof = _STEP_SUBSTEP.oneofs_by_name['substep']
_STEP_PROGRESS.containing_type = _STEP
_STEP_PROPERTY.containing_type = _STEP
_STEP.fields_by_name['command'].message_type = _STEP_COMMAND
_STEP.fields_by_name['status'].enum_type = _STATUS
_STEP.fields_by_name['failure_details'].message_type = _FAILUREDETAILS
_STEP.fields_by_name['substep'].message_type = _STEP_SUBSTEP
_STEP.fields_by_name['stdout_stream'].message_type = _LOGDOGSTREAM
_STEP.fields_by_name['stderr_stream'].message_type = _LOGDOGSTREAM
_STEP.fields_by_name['started'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STEP.fields_by_name['ended'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STEP.fields_by_name['progress'].message_type = _STEP_PROGRESS
_STEP.fields_by_name['link'].message_type = _LINK
_STEP.fields_by_name['other_links'].message_type = _LINK
_STEP.fields_by_name['property'].message_type = _STEP_PROPERTY
_LINK.fields_by_name['logdog_stream'].message_type = _LOGDOGSTREAM
_LINK.fields_by_name['isolate_object'].message_type = _ISOLATEOBJECT
_LINK.fields_by_name['dm_link'].message_type = _DMLINK
_LINK.oneofs_by_name['value'].fields.append(
_LINK.fields_by_name['url'])
_LINK.fields_by_name['url'].containing_oneof = _LINK.oneofs_by_name['value']
_LINK.oneofs_by_name['value'].fields.append(
_LINK.fields_by_name['logdog_stream'])
_LINK.fields_by_name['logdog_stream'].containing_oneof = _LINK.oneofs_by_name['value']
_LINK.oneofs_by_name['value'].fields.append(
_LINK.fields_by_name['isolate_object'])
_LINK.fields_by_name['isolate_object'].containing_oneof = _LINK.oneofs_by_name['value']
_LINK.oneofs_by_name['value'].fields.append(
_LINK.fields_by_name['dm_link'])
_LINK.fields_by_name['dm_link'].containing_oneof = _LINK.oneofs_by_name['value']
DESCRIPTOR.message_types_by_name['FailureDetails'] = _FAILUREDETAILS
DESCRIPTOR.message_types_by_name['Step'] = _STEP
DESCRIPTOR.message_types_by_name['Link'] = _LINK
DESCRIPTOR.message_types_by_name['LogdogStream'] = _LOGDOGSTREAM
DESCRIPTOR.message_types_by_name['IsolateObject'] = _ISOLATEOBJECT
DESCRIPTOR.message_types_by_name['DMLink'] = _DMLINK
DESCRIPTOR.enum_types_by_name['Status'] = _STATUS
FailureDetails = _reflection.GeneratedProtocolMessageType('FailureDetails', (_message.Message,), dict(
DESCRIPTOR = _FAILUREDETAILS,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.FailureDetails)
))
_sym_db.RegisterMessage(FailureDetails)
Step = _reflection.GeneratedProtocolMessageType('Step', (_message.Message,), dict(
Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
EnvironEntry = _reflection.GeneratedProtocolMessageType('EnvironEntry', (_message.Message,), dict(
DESCRIPTOR = _STEP_COMMAND_ENVIRONENTRY,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Step.Command.EnvironEntry)
))
,
DESCRIPTOR = _STEP_COMMAND,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Step.Command)
))
,
Substep = _reflection.GeneratedProtocolMessageType('Substep', (_message.Message,), dict(
DESCRIPTOR = _STEP_SUBSTEP,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Step.Substep)
))
,
Progress = _reflection.GeneratedProtocolMessageType('Progress', (_message.Message,), dict(
DESCRIPTOR = _STEP_PROGRESS,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Step.Progress)
))
,
Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
DESCRIPTOR = _STEP_PROPERTY,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Step.Property)
))
,
DESCRIPTOR = _STEP,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Step)
))
_sym_db.RegisterMessage(Step)
_sym_db.RegisterMessage(Step.Command)
_sym_db.RegisterMessage(Step.Command.EnvironEntry)
_sym_db.RegisterMessage(Step.Substep)
_sym_db.RegisterMessage(Step.Progress)
_sym_db.RegisterMessage(Step.Property)
Link = _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), dict(
DESCRIPTOR = _LINK,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.Link)
))
_sym_db.RegisterMessage(Link)
LogdogStream = _reflection.GeneratedProtocolMessageType('LogdogStream', (_message.Message,), dict(
DESCRIPTOR = _LOGDOGSTREAM,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.LogdogStream)
))
_sym_db.RegisterMessage(LogdogStream)
IsolateObject = _reflection.GeneratedProtocolMessageType('IsolateObject', (_message.Message,), dict(
DESCRIPTOR = _ISOLATEOBJECT,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.IsolateObject)
))
_sym_db.RegisterMessage(IsolateObject)
DMLink = _reflection.GeneratedProtocolMessageType('DMLink', (_message.Message,), dict(
DESCRIPTOR = _DMLINK,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:milo.DMLink)
))
_sym_db.RegisterMessage(DMLink)
_STEP_COMMAND_ENVIRONENTRY.has_options = True
_STEP_COMMAND_ENVIRONENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| 38.182942 | 2,922 | 0.728585 |
73e7ac15ee74fe3b8d7613812fef0f76a8218708 | 7,503 | py | Python | src/utils.py | a11to1n3/X-Detector | 8d89c55d2578e03d42cb0b587165d0f17dede1b7 | [
"BSD-3-Clause"
] | null | null | null | src/utils.py | a11to1n3/X-Detector | 8d89c55d2578e03d42cb0b587165d0f17dede1b7 | [
"BSD-3-Clause"
] | null | null | null | src/utils.py | a11to1n3/X-Detector | 8d89c55d2578e03d42cb0b587165d0f17dede1b7 | [
"BSD-3-Clause"
] | null | null | null | import os
import copy
import pandas as pd
import cv2 as cv
import numpy as np
import torch
from PIL import Image, ImageDraw
from torchvision import transforms, utils, models
from sklearn.cluster import DBSCAN
from matplotlib import cm
import matplotlib.pyplot as plt
# Erase classes except one class
def eraseWithROI(image, start_point, end_point):
color = (int(image[start_point[1], start_point[0],0]),int(image[start_point[1], start_point[0],1]),int(image[start_point[1], start_point[0],2]))
image = cv.rectangle(image, start_point, end_point, color , -1)
return image
# multiplying instance
def multiply(image, instance, knowledge_base, times):
for t in range(times):
while 1:
for k in knowledge_base:
exit_loop = False
position = (np.random.randint(image.shape[0]), np.random.randint(image.shape[1]))
if (len(list(set(range(position[0],instance.shape[0] + position[0])) & set(range(k[0],k[1])))) != 0 \
and (len(list(set(range(position[1],instance.shape[1] + position[1])) & set(range(k[2],k[3])))) != 0)) \
or (instance.shape[0] + position[0]) > image.shape[0] \
or (instance.shape[1] + position[1]) > image.shape[1]:
break
exit_loop = True
if exit_loop:
break
knowledge_base.append((position[0], instance.shape[0] + position[0], position[1], instance.shape[1] + position[1]))
image[position[0]:instance.shape[0] + position[0],position[1]:instance.shape[1] + position[1]] = instance
return image
# Multiplying instance
def multiplyAndWrite(image, instance, knowledge_base, times, class_of_interest, count):
for t in range(times):
while 1:
for k in knowledge_base:
exit_loop = False
position = (np.random.randint(image.shape[0]), np.random.randint(image.shape[1]))
if (len(list(set(range(position[0],instance.shape[0] + position[0])) & set(range(k[0],k[1])))) != 0 \
and (len(list(set(range(position[1],instance.shape[1] + position[1])) & set(range(k[2],k[3])))) != 0)) \
or (instance.shape[0] + position[0]) > image.shape[0] \
or (instance.shape[1] + position[1]) > image.shape[1]:
break
exit_loop = True
if exit_loop:
break
knowledge_base.append((position[0], instance.shape[0] + position[0], position[1], instance.shape[1] + position[1]))
image[position[0]:instance.shape[0] + position[0],position[1]:instance.shape[1] + position[1]] = instance
cv.imwrite(f'./augmented_img/{class_of_interest}_{str(count)}.bmp', image)
count += 1
for k in knowledge_base:
# Flip horizontally
start_point = (k[0],k[2])
end_point = (k[1], k[3])
image_copy = copy.deepcopy(image)
instance1 = cv.flip(image_copy[start_point[0]:end_point[0], start_point[1]:end_point[1]], 1)
image_copy[start_point[0]:end_point[0], start_point[1]:end_point[1]] = instance1
cv.imwrite(f'./augmented_img/{class_of_interest}_{str(count)}.bmp', image_copy)
count += 1
# Flip vertically
image_copy = copy.deepcopy(image)
instance1 = cv.flip(image_copy[start_point[0]:end_point[0], start_point[1]:end_point[1]], 0)
image_copy[start_point[0]:end_point[0], start_point[1]:end_point[1]] = instance1
cv.imwrite(f'./augmented_img/{class_of_interest}_{str(count)}.bmp', image_copy)
count += 1
return image, knowledge_base, count
def find_bounding_boxes_per_class(CAM_explainer, img_path, class_names, class_labels, class_colors, plot=False):
strToLabel = {n:l for n,l in zip(class_names, class_labels)}
strToColor = {n:c for n,c in zip(class_names, class_colors)}
if plot:
fig = plt.figure(figsize=(20,70))
ax = fig.gca()
class_boxes = {}
class_scores = {}
for class_oi in list(strToLabel.keys()):
class_boxes[class_oi] = []
class_scores[class_oi] = []
for class_oi in list(strToLabel.keys()):
img = cv.imread(img_path)
data_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
img_norm = data_transform(img)
ex, out = CAM_explainer(torch.tensor(img_norm.reshape(1,3,224,224),dtype=torch.float),False,strToLabel[class_oi])
cmap = cm.get_cmap('jet')
ex = Image.fromarray(ex)
img = Image.fromarray(img)
overlay = ex.resize(img.size, resample=Image.BILINEAR)
overlay = np.asarray(overlay)
overlay1 = overlay>0.6
X = []
for i in range(overlay1.shape[1]):
for j in range(overlay1.shape[0]):
if overlay1.T[i,j] == 1:
X.append([i,j])
X = np.array(X)
if len(X.shape) > 1:
clustering = DBSCAN(eps=3, min_samples=2).fit(X)
xmins = [min(np.array(X)[clustering.labels_ == l,0]) for l in np.unique(clustering.labels_) if l != -1]
xmaxs = [max(np.array(X)[clustering.labels_ == l,0]) for l in np.unique(clustering.labels_) if l != -1]
ymins = [min(np.array(X)[clustering.labels_ == l,1]) for l in np.unique(clustering.labels_) if l != -1]
ymaxs = [max(np.array(X)[clustering.labels_ == l,1]) for l in np.unique(clustering.labels_) if l != -1]
else:
if len(X) == 1:
clustering = DBSCAN(eps=3, min_samples=2).fit(X.reshape(1, -1))
xmins = [min(np.array(X)[clustering.labels_ == l,0]) for l in np.unique(clustering.labels_) if l != -1]
xmaxs = [max(np.array(X)[clustering.labels_ == l,0]) for l in np.unique(clustering.labels_) if l != -1]
ymins = [min(np.array(X)[clustering.labels_ == l,1]) for l in np.unique(clustering.labels_) if l != -1]
ymaxs = [max(np.array(X)[clustering.labels_ == l,1]) for l in np.unique(clustering.labels_) if l != -1]
else:
xmins, ymins, xmaxs, ymaxs = [0], [0], [0], [0]
[class_boxes[class_oi].append([xmins[i], ymins[i], xmaxs[i], ymaxs[i]]) for i in range(len(xmins))]
[class_scores[class_oi].append(np.mean(overlay[class_boxes[class_oi][i][0]:class_boxes[class_oi][i][2],class_boxes[class_oi][i][1]:class_boxes[class_oi][i][3]])) for i in range(len(xmins))]
if plot:
overlay = overlay * (overlay > 0.5).astype(np.uint8)
overlay = (255 * cmap(overlay ** 2)[:, :, :3]).astype(np.uint8)
try:
overlayed_img = Image.fromarray((0.95 * np.asarray(overlayed_img) + (1 - 0.95) * overlay).astype(np.uint8))
except:
overlayed_img = Image.fromarray((0.7 * np.asarray(img) + (1 - 0.7) * overlay).astype(np.uint8))
draw = ImageDraw.Draw(overlayed_img)
[draw.rectangle(((xmins[i], ymins[i]), (xmaxs[i], ymaxs[i])),outline=strToColor[class_oi]) for i in range(len(xmins))]
if np.all(np.array(ymins)-10 > 10):
[draw.text(((xmins[i], ymins[i]-10)),class_oi, fill=strToColor[class_oi]) for i in range(len(xmins))]
else:
[draw.text(((xmins[i], ymaxs[i]+10)),class_oi, fill=strToColor[class_oi]) for i in range(len(xmins))]
ax.imshow(np.array(overlayed_img))
if plot:
plt.show()
return class_boxes, class_scores
| 46.602484 | 197 | 0.610422 |
73e7d15f45545c5c8759ccddfaba33e651999b42 | 12,237 | py | Python | tempest/api/compute/servers/test_create_server.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/servers/test_create_server.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/servers/test_create_server.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def resource_setup(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name('server')
file_contents = 'This is a test file.'
personality = [{'path': '/test.txt',
'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
cls.network_client = cls.os.network_client
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(name=cls.name,
meta=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
personality=personality,
disk_config=disk_config)
cls.password = cls.server_initial['adminPass']
cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
# NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
# Here we compare directly with the canonicalized format.
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
def test_list_servers(self):
# The created server should be in the list of all servers
resp, body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.attr(type='smoke')
def test_list_servers_with_detail(self):
# The created server should be in the detailed list of all servers
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.get_flavor_details(self.flavor_ref)
linux_client = remote_client.RemoteClient(self.server, self.ssh_user,
self.password)
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
linux_client = remote_client.RemoteClient(self.server, self.ssh_user,
self.password)
self.assertTrue(linux_client.hostname_equals_servername(self.name))
@test.attr(type='gate')
def test_create_server_with_scheduler_hint_group(self):
# Create a server with the scheduler hint "group".
name = data_utils.rand_name('server_group')
policies = ['affinity']
resp, body = self.client.create_server_group(name=name,
policies=policies)
self.assertEqual(200, resp.status)
group_id = body['id']
self.addCleanup(self.client.delete_server_group, group_id)
hints = {'group': group_id}
server = self.create_test_server(sched_hints=hints,
wait_until='ACTIVE')
# Check a server is in the group
resp, server_group = self.client.get_server_group(group_id)
self.assertEqual(200, resp.status)
self.assertIn(server['id'], server_group['members'])
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
name_net1 = data_utils.rand_name(self.__class__.__name__)
net1 = self.network_client.create_network(name=name_net1)
self.addCleanup(self.network_client.delete_network,
net1['network']['id'])
name_net2 = data_utils.rand_name(self.__class__.__name__)
net2 = self.network_client.create_network(name=name_net2)
self.addCleanup(self.network_client.delete_network,
net2['network']['id'])
subnet1 = self.network_client.create_subnet(
network_id=net1['network']['id'],
cidr='19.80.0.0/24',
ip_version=4)
self.addCleanup(self.network_client.delete_subnet,
subnet1['subnet']['id'])
subnet2 = self.network_client.create_subnet(
network_id=net2['network']['id'],
cidr='19.86.0.0/24',
ip_version=4)
self.addCleanup(self.network_client.delete_subnet,
subnet2['subnet']['id'])
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# Cleanup server; this is needed in the test case because with the LIFO
# nature of the cleanups, if we don't delete the server first, the port
# will still be part of the subnet and we'll get a 409 from Neutron
# when trying to delete the subnet. The tear down in the base class
# will try to delete the server and get a 404 but it's ignored so
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
self.client.wait_for_server_termination(server_multi_nics['id'])
self.addCleanup(cleanup_server)
_, addresses = self.client.list_addresses(server_multi_nics['id'])
# We can't predict the ip addresses assigned to the server on networks.
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[name_net1][0]['addr'],
addresses[name_net2][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def resource_setup(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 64
vcpus = 1
disk = 0
# Create a flavor with extra specs
flavor = (self.flavor_client.
create_flavor(flavor_with_eph_disk_name,
ram, vcpus, disk,
flavor_with_eph_disk_id,
ephemeral=1))
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 64
vcpus = 1
disk = 0
# Create a flavor without extra specs
flavor = (self.flavor_client.
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_extra_specs()
flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
server_no_eph_disk = (self.create_test_server(
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id))
server_with_eph_disk = (self.create_test_server(
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
server_no_eph_disk = self.client.get_server(
server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
self.ssh_user, admin_pass)
partition_num = len(linux_client.get_partitions().split('\n'))
server_with_eph_disk = self.client.get_server(
server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
self.ssh_user, admin_pass)
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def resource_setup(cls):
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
super(ServersTestManualDisk, cls).resource_setup()
| 43.860215 | 79 | 0.616981 |
73e7fe0630f465871f86d3c1d30ea72e6c52ae84 | 2,147 | py | Python | gtld_data/tools/create_data_from_zonefile.py | NCommander/gtld-innovation-health | 5dac47ce7678f8867e056eb361b4ddddbab109bd | [
"MIT"
] | null | null | null | gtld_data/tools/create_data_from_zonefile.py | NCommander/gtld-innovation-health | 5dac47ce7678f8867e056eb361b4ddddbab109bd | [
"MIT"
] | null | null | null | gtld_data/tools/create_data_from_zonefile.py | NCommander/gtld-innovation-health | 5dac47ce7678f8867e056eb361b4ddddbab109bd | [
"MIT"
] | null | null | null | #!/usr/bin/env pyhton3
# Copyright 2018 Michael Casadevall <michael@casadevall.pro>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
'''Dumps all the nameservers seen in a given zone with their quantity'''
import argparse
import operator
from gtld_data import gtld_db
import gtld_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('zonefile', help='Zonefile to load')
parser.add_argument('--origin', help="Origin of the zone file")
args = parser.parse_args()
print("Creating database ...")
gtld_data.gtld_db.create_database()
gtld_db.database_connection.begin()
cursor = gtld_db.database_connection.cursor()
print("Loading zone data, this may take a moment")
zone_data = gtld_data.ZoneData.load_from_file(cursor, args.zonefile, origin=args.origin)
print("Unique domains: " + str(len(zone_data.domains)))
zone_processor = gtld_data.ZoneProcessor(zone_data)
zone_processor.get_reverse_zone_information(cursor, zone_data.domains)
zone_data.to_db(cursor)
gtld_db.database_connection.commit()
cursor.close()
if __name__ == "__main__":
main()
| 38.339286 | 92 | 0.758733 |
73e84b2acebc09dc07b07cbff66efb0b46922fed | 7,263 | py | Python | tests/test_schedule_wiki_storage.py | slow-start-fans/slow-start-rewatch | 7cf0fc9434084b3b9fa23497b08b4c9e6c512250 | [
"MIT"
] | 1 | 2020-05-31T02:41:48.000Z | 2020-05-31T02:41:48.000Z | tests/test_schedule_wiki_storage.py | slow-start-fans/slow-start-rewatch | 7cf0fc9434084b3b9fa23497b08b4c9e6c512250 | [
"MIT"
] | 33 | 2020-05-27T01:17:42.000Z | 2021-03-01T16:09:33.000Z | tests/test_schedule_wiki_storage.py | slow-start-fans/slow-start-rewatch | 7cf0fc9434084b3b9fa23497b08b4c9e6c512250 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pathlib import Path
from unittest.mock import MagicMock, Mock, PropertyMock
import pytest
from praw.models.reddit.subreddit import SubredditWiki
from prawcore.exceptions import Forbidden, NotFound, PrawcoreException
from slow_start_rewatch.exceptions import (
InvalidWikiLink,
MissingPost,
MissingSchedule,
RedditError,
)
from slow_start_rewatch.schedule.schedule_wiki_storage import (
ScheduleWikiStorage,
)
from tests.conftest import TEST_ROOT_DIR, MockConfig
from tests.test_schedule_storage import POST_BODY, SCHEDULE_DATA
wiki = SubredditWiki
TEST_SCHEDULE_PATH = Path(TEST_ROOT_DIR).joinpath("test_schedule")
SCHEDULE_FILENAME = "schedule.yml"
POST_BODY_FILENAME = "episode_01.md"
def test_load_schedule_data(schedule_wiki_storage_config, reddit_with_wiki):
"""Test loading of the Schedule data from the wiki."""
schedule_wiki_storage = ScheduleWikiStorage(
schedule_wiki_storage_config,
reddit_with_wiki,
)
schedule_data = schedule_wiki_storage.load_schedule_data()
assert schedule_data == SCHEDULE_DATA
def test_load_schedule_data_not_found(reddit_with_wiki):
"""Test loading of the Schedule data from the nonexistent wiki."""
config = MockConfig({
"schedule_wiki_url": "/r/anime/wiki/not-found",
})
schedule_wiki_storage = ScheduleWikiStorage(config, reddit_with_wiki)
with pytest.raises(MissingSchedule) as missing_schedule_error:
schedule_wiki_storage.load_schedule_data()
# Comply with PT012: https://pypi.org/project/flake8-pytest-style/
error_message = str(missing_schedule_error.value) # noqa: WPS441
assert "wiki page not found" in error_message
assert "/r/anime/wiki/not-found" in error_message
def test_load_schedule_data_forbidden(reddit_with_wiki):
"""Test loading of the Schedule data from the inaccessible wiki."""
config = MockConfig({
"schedule_wiki_url": "/r/anime/wiki/forbidden",
})
schedule_wiki_storage = ScheduleWikiStorage(config, reddit_with_wiki)
with pytest.raises(MissingSchedule) as missing_schedule_error:
schedule_wiki_storage.load_schedule_data()
# Comply with PT012: https://pypi.org/project/flake8-pytest-style/
error_message = str(missing_schedule_error.value) # noqa: WPS441
assert "permissions to access" in error_message
assert "/r/anime/wiki/forbidden" in error_message
def test_load_post_body(schedule_wiki_storage_config, reddit_with_wiki):
"""Test loading of the Post body from the wiki."""
schedule_wiki_storage = ScheduleWikiStorage(
schedule_wiki_storage_config,
reddit_with_wiki,
)
post_body = schedule_wiki_storage.load_post_body("episode_01")
assert post_body == POST_BODY
def test_load_post_body_not_found(reddit_with_wiki):
"""Test loading of the Post body from the nonexistent wiki."""
config = MockConfig({
"schedule_wiki_url": "/r/anime/wiki/not-found",
})
schedule_wiki_storage = ScheduleWikiStorage(config, reddit_with_wiki)
with pytest.raises(MissingPost) as missing_post_error:
schedule_wiki_storage.load_post_body("episode_01")
# Comply with PT012: https://pypi.org/project/flake8-pytest-style/
error_message = str(missing_post_error.value) # noqa: WPS441
assert "wiki page not found" in error_message
assert "/r/anime/wiki/not-found/episode_01" in error_message
def test_load_post_body_forbidden(reddit_with_wiki):
"""Test loading of the Post body from the inaccessible wiki."""
config = MockConfig({
"schedule_wiki_url": "/r/anime/wiki/forbidden",
})
schedule_wiki_storage = ScheduleWikiStorage(config, reddit_with_wiki)
with pytest.raises(MissingPost) as missing_post_error:
schedule_wiki_storage.load_post_body("episode_01")
# Comply with PT012: https://pypi.org/project/flake8-pytest-style/
error_message = str(missing_post_error.value) # noqa: WPS441
assert "permissions to access" in error_message
assert "/r/anime/wiki/forbidden/episode_01" in error_message
def test_save_schedule_data(schedule_wiki_storage_config, reddit_with_wiki):
"""
Test saving of the Schedule data to the wiki.
Check that the content is indented by 4 spaces.
"""
schedule_wiki_storage = ScheduleWikiStorage(
schedule_wiki_storage_config,
reddit_with_wiki,
)
schedule_wiki_storage.save_schedule_data(SCHEDULE_DATA)
wiki_edit = reddit_with_wiki.subreddit().wiki["slow-start-rewatch"].edit
assert "anime\n posts:" in wiki_edit.call_args[1]["content"]
assert wiki_edit.call_args[1]["reason"] == "Rewatch Update"
def test_save_schedule_data_error(reddit_with_wiki):
"""Test saving of the Schedule data to the wiki with en error."""
config = MockConfig({
"schedule_wiki_url": "/r/anime/wiki/not-found",
})
schedule_wiki_storage = ScheduleWikiStorage(config, reddit_with_wiki)
with pytest.raises(RedditError) as reddit_error:
schedule_wiki_storage.save_schedule_data(SCHEDULE_DATA)
# Comply with PT012: https://pypi.org/project/flake8-pytest-style/
error_message = str(reddit_error.value) # noqa: WPS441
assert "Failed to update" in error_message
assert "/r/anime/wiki/not-found" in error_message
def test_invalid_config(reddit_with_wiki):
"""Test initializing `ScheduleWikiStorage` with invalid config."""
config = MockConfig({"schedule_wiki_url": None})
with pytest.raises(RuntimeError):
ScheduleWikiStorage(config, reddit_with_wiki)
invalid_wiki_url = "/r/anime/slow-start-rewatch"
config["schedule_wiki_url"] = invalid_wiki_url
with pytest.raises(InvalidWikiLink) as invalid_wiki_link_error:
ScheduleWikiStorage(config, reddit_with_wiki)
# Comply with PT012: https://pypi.org/project/flake8-pytest-style/
error_message = str(invalid_wiki_link_error.value) # noqa: WPS441
assert invalid_wiki_url in error_message
@pytest.fixture()
def schedule_wiki_storage_config(tmpdir):
"""Return mock Config contaning a wiki URL."""
return MockConfig({
"schedule_wiki_url": "/r/anime/wiki/slow-start-rewatch",
})
@pytest.fixture()
def reddit_with_wiki():
"""Return mock `Reddit` class with a mock wiki page."""
reddit = Mock()
wiki_page_schedule = Mock()
wiki_page_schedule.content_md = SCHEDULE_DATA
wiki_page_post_body = Mock()
wiki_page_post_body.content_md = POST_BODY
wiki_page_not_found = Mock()
type(wiki_page_not_found).content_md = PropertyMock(
side_effect=NotFound(response=MagicMock()),
)
type(wiki_page_not_found).edit = PropertyMock(
side_effect=PrawcoreException,
)
wiki_page_forbidden = Mock()
type(wiki_page_forbidden).content_md = PropertyMock(
side_effect=Forbidden(response=MagicMock()),
)
reddit.subreddit().wiki = {
"slow-start-rewatch": wiki_page_schedule,
"slow-start-rewatch/episode_01": wiki_page_post_body,
"not-found": wiki_page_not_found,
"not-found/episode_01": wiki_page_not_found,
"forbidden": wiki_page_forbidden,
"forbidden/episode_01": wiki_page_forbidden,
}
return reddit
| 32.424107 | 76 | 0.739502 |
73e85b36ac2b88009da4bd434f55fbcf54f994ea | 2,355 | py | Python | tests/test_addons/test_addon_word_getter.py | cangirolave/loglan_db | 62508356e31624bc36c833cc12c3feaf64ff9767 | [
"MIT"
] | null | null | null | tests/test_addons/test_addon_word_getter.py | cangirolave/loglan_db | 62508356e31624bc36c833cc12c3feaf64ff9767 | [
"MIT"
] | 4 | 2021-10-01T09:38:13.000Z | 2021-10-22T04:20:58.000Z | tests/test_addons/test_addon_word_getter.py | cangirolave/loglan_db | 62508356e31624bc36c833cc12c3feaf64ff9767 | [
"MIT"
] | 3 | 2021-10-01T09:41:07.000Z | 2021-10-10T23:26:47.000Z | # -*- coding: utf-8 -*-
# pylint: disable=R0201, R0903, C0116, C0103
"""Base Model unit tests."""
import pytest
from loglan_db.model_db.base_definition import BaseDefinition as Definition
from loglan_db.model_db.base_event import BaseEvent as Event
from loglan_db.model_db.base_key import BaseKey as Key
from loglan_db.model_db.base_word import BaseWord
from loglan_db.model_db.addons.addon_word_getter import AddonWordGetter
from tests.data import changed_words, all_events, doubled_words
from tests.data import connect_keys
from tests.data import keys, definitions, words, events
from tests.functions import db_connect_keys, db_add_objects
class Word(BaseWord, AddonWordGetter):
"""BaseWord class with Getter addon"""
@pytest.mark.usefixtures("db")
class TestWord:
"""Word tests."""
def test_by_event(self):
db_add_objects(Word, changed_words + words)
db_add_objects(Event, all_events)
result = Word.get_all()
assert len(result) == 13
result = Word.by_event(1).all()
assert len(result) == 10
result = Word.by_event(5).all()
assert len(result) == 9
result = Word.by_event().all()
assert len(result) == 9
def test_by_name(self):
db_add_objects(Word, doubled_words)
db_add_objects(Event, events)
result = Word.by_name("duo").count()
assert result == 2
result = Word.by_name("duo").all()
assert isinstance(result, list)
assert isinstance(result[0], Word)
result = sorted([w.type_id for w in result])
assert result == [2, 17]
result = Word.by_name("duo").first()
assert isinstance(result, Word)
def test_by_key(self):
db_add_objects(Word, words)
db_add_objects(Definition, definitions)
db_add_objects(Key, keys)
db_add_objects(Event, all_events)
db_connect_keys(connect_keys)
result = Word.by_key("test").count()
assert result == 5
result = Word.by_key("Test").count()
assert result == 5
result = [w.name for w in Word.by_key("test").all()]
assert result == ['pru', 'pruci', 'prukao']
result = Word.by_key("test", language="es").count()
assert result == 0
result = Word.by_key("test", language="en").count()
assert result == 5
| 29.074074 | 75 | 0.655626 |
73e85fce131436144bb36fcb7143fcabf6fe48a9 | 491 | py | Python | test/filter1.py | sergey-serebryakov/mpipe | 5a1804cf64271931f0cd3e4fff3e2b38291212dd | [
"MIT"
] | null | null | null | test/filter1.py | sergey-serebryakov/mpipe | 5a1804cf64271931f0cd3e4fff3e2b38291212dd | [
"MIT"
] | null | null | null | test/filter1.py | sergey-serebryakov/mpipe | 5a1804cf64271931f0cd3e4fff3e2b38291212dd | [
"MIT"
] | null | null | null | import time
from mpipe import OrderedStage, FilterStage, Pipeline
def passthru(value):
time.sleep(0.013)
return value
s1 = FilterStage(
(OrderedStage(passthru),),
max_tasks=1,
)
p1 = Pipeline(s1)
def pull(task):
for task, result in p1.results():
if result:
print('{0} {1}'.format(task, result[0]))
p2 = Pipeline(OrderedStage(pull))
p2.put(True)
for number in range(10):
p1.put(number)
time.sleep(0.010)
p1.put(None)
p2.put(None)
| 15.83871 | 53 | 0.637475 |
73e86865a81a672e8e4a1c4d58e9b04b5edec774 | 489 | py | Python | mbrl/planning/__init__.py | MarkSelden/mbrl-lib | 26845b3a5d8930e00726cc56bf17707a7ac45083 | [
"MIT"
] | null | null | null | mbrl/planning/__init__.py | MarkSelden/mbrl-lib | 26845b3a5d8930e00726cc56bf17707a7ac45083 | [
"MIT"
] | null | null | null | mbrl/planning/__init__.py | MarkSelden/mbrl-lib | 26845b3a5d8930e00726cc56bf17707a7ac45083 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .core import Agent, RandomAgent, complete_agent_cfg, load_agent
from .trajectory_opt import (
CEMOptimizer,
ICEMOptimizer,
MPPIOptimizer,
TrajectoryOptimizer,
TrajectoryOptimizerAgent,
create_trajectory_optim_agent_for_model,
)
from .marks_optimizer import *
| 27.166667 | 71 | 0.773006 |
73e8758dfbf2aa53baf368cecf31a4631046c220 | 892 | py | Python | PyXAB/algos/VPCT.py | WilliamLwj/PyXAB | a95c713090fe875bc557bc7b0e11ab000a921407 | [
"MIT"
] | null | null | null | PyXAB/algos/VPCT.py | WilliamLwj/PyXAB | a95c713090fe875bc557bc7b0e11ab000a921407 | [
"MIT"
] | null | null | null | PyXAB/algos/VPCT.py | WilliamLwj/PyXAB | a95c713090fe875bc557bc7b0e11ab000a921407 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Implementation of VPCT (Li et al. 2021)
"""
# Author: Wenjie Li <li3549@purdue.edu>
# License: MIT
import numpy as np
from PyXAB.algos.Algo import Algorithm
from PyXAB.algos.VHCT import VHCT
from PyXAB.algos.GPO import GPO
class VPCT(Algorithm):
def __init__(self, numax=1, rhomax=0.9, rounds=1000, domain=None, partition=None):
super(VPCT, self).__init__()
self.algorithm = GPO(numax=numax, rhomax=rhomax, rounds=rounds, domain=domain, partition=partition, algo=VHCT)
if domain is None:
raise ValueError("Parameter space is not given.")
if partition is None:
raise ValueError("Partition of the parameter space is not given.")
def pull(self, time):
return self.algorithm.pull(time)
def receive_reward(self, time, reward):
return self.algorithm.receive_reward(time, reward)
| 29.733333 | 118 | 0.682735 |
73e89fc36e39029cfb0c0425ff4239b8400a8512 | 21,811 | py | Python | trainer_base_mul_apex.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 8 | 2022-03-01T09:02:44.000Z | 2022-03-18T14:41:56.000Z | trainer_base_mul_apex.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 1 | 2022-03-09T12:12:22.000Z | 2022-03-10T09:08:42.000Z | trainer_base_mul_apex.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 2 | 2022-03-02T01:46:52.000Z | 2022-03-02T13:51:53.000Z | # coding=utf-8
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import random
import sys
from typing import Dict, Union
import apex.parallel
import hydra
import numpy as np
import torch
from apex import amp
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset, Dataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (AdamW, get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)
from general_util.logger import setting_logger
from general_util.training_utils import set_seed, batch_to_device, unwrap_model
try:
from tensorboardX import SummaryWriter
except ImportError:
from torch.utils.tensorboard import SummaryWriter
"""
Requirements: torch==1.8.1
"""
logger: logging.Logger
def save_model(model: torch.nn.Module, cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):
# Save model checkpoint.
if cfg.local_rank != -1:
state_dict = model.state_dict()
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)
else:
model.save_pretrained(output_dir)
# Save tokenizer and training args.
if cfg.local_rank in [-1, 0]:
if tokenizer is not None:
tokenizer.save_pretrained(output_dir)
OmegaConf.save(cfg, os.path.join(output_dir, "training_config.yaml"))
logger.info("Saving model checkpoint to %s", output_dir)
def forward_step(model, optimizer, inputs: Dict[str, torch.Tensor], cfg, delay_unscale: bool):
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in transformers (see doc)
if cfg.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if cfg.fp16:
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def train(cfg, model, tokenizer, continue_from_global_step=0):
""" Train the model """
if cfg.local_rank in [-1, 0]:
_dir_splits = cfg.output_dir.split('/')
_log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])
tb_writer = SummaryWriter(log_dir=_log_dir)
else:
tb_writer = None
cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)
num_examples = 0
if os.path.exists(cfg.train_file):
train_files = [cfg.train_file]
else:
train_files = list(glob.glob(cfg.train_file))
logger.info("Pre-loading dataset(s) to count the total steps.")
for _train_file in train_files:
_sub_train_dataset, _ = load_and_cache_examples(cfg, tokenizer, _split="train", _file=_train_file)
num_examples += len(_sub_train_dataset)
del _sub_train_dataset
if "do_preprocess" in cfg and cfg.do_preprocess:
exit(0)
if cfg.local_rank != -1:
cum_steps = int(num_examples * 1.0 / cfg.train_batch_size / dist.get_world_size())
else:
cum_steps = int(num_examples * 1.0 / cfg.train_batch_size)
if "extended_vocab" in cfg and cfg.extended_vocab:
model.resize_token_embeddings(model.config.vocab_size + hydra.utils.call(cfg.extended_vocab))
if cfg.max_steps > 0:
t_total = cfg.max_steps
cfg.num_train_epochs = cfg.max_steps // (cum_steps // cfg.gradient_accumulation_steps) + 1
else:
t_total = cum_steps // cfg.gradient_accumulation_steps * cfg.num_train_epochs
num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': cfg.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
if "optimizer" in cfg and cfg.optimizer == 'lamb':
# if cfg.local_rank == -1:
from apex.optimizers.fused_lamb import FusedLAMB
# else:
# from apex.contrib.optimizers.distributed_fused_lamb import DistributedFusedLAMB as FusedLAMB
optimizer = FusedLAMB(optimizer_grouped_parameters,
lr=cfg.learning_rate,
betas=eval(cfg.adam_betas),
eps=cfg.adam_epsilon,
use_nvlamb=(cfg.use_nvlamb if "use_nvlamb" in cfg else False),
max_grad_norm=cfg.max_grad_norm)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=cfg.learning_rate, eps=cfg.adam_epsilon, betas=eval(cfg.adam_betas))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if cfg.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level=cfg.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
model_single_gpu = model
if cfg.n_gpu > 1:
model = torch.nn.DataParallel(model_single_gpu)
# Distributed training (should be after apex fp16 initialization)
if cfg.local_rank != -1:
# if cfg.fp16_opt_level == 'O2':
# model = apex.parallel.DistributedDataParallel(model, delay_allreduce=True)
# else:
model = torch.nn.parallel.DistributedDataParallel(model,
find_unused_parameters=True,
device_ids=[cfg.local_rank],
output_device=cfg.local_rank)
logger.info(optimizer)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", num_examples)
logger.info(" Num Epochs = %d", cfg.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", cfg.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", cfg.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(cfg.num_train_epochs), desc="Epoch", disable=cfg.local_rank not in [-1, 0])
set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)
train_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
for epoch in train_iterator:
random.shuffle(train_files)
for _file_index, _train_file in enumerate(train_files):
logger.info(f"Loading tensors from {_train_file}")
_sub_train_dataset, _ = load_and_cache_examples(cfg, tokenizer, _split="train", _file=_train_file)
_sub_train_sampler = RandomSampler(_sub_train_dataset) if cfg.local_rank == -1 else DistributedSampler(_sub_train_dataset)
train_dataloader = DataLoader(dataset=_sub_train_dataset, sampler=_sub_train_sampler, batch_size=cfg.train_batch_size,
collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,
prefetch_factor=cfg.prefetch_factor)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)
if cfg.local_rank != -1:
train_dataloader.sampler.set_epoch(epoch * len(train_files) + _file_index)
for step, batch in enumerate(epoch_iterator):
# If training is continued from a checkpoint, fast forward
# to the state of that checkpoint.
if global_step < continue_from_global_step:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
global_step += 1
continue
model.train()
batch = batch_to_device(batch, cfg.device)
if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
# if cfg.fp16_opt_level != 'O2':
with model.no_sync():
loss = forward_step(model, optimizer, batch, cfg, delay_unscale=True)
# else:
# loss = forward_step(model, optimizer, batch, cfg, delay_unscale=True)
else:
loss = forward_step(model, optimizer, batch, cfg, delay_unscale=False)
tr_loss += loss
if (step + 1) % cfg.gradient_accumulation_steps == 0:
if cfg.max_grad_norm and not ("optimizer" in cfg and cfg.optimizer == "lamb"):
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(cfg.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(cfg.max_grad_norm)
else:
if cfg.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:
output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(model, cfg, output_dir, tokenizer)
# Evaluation
if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:
if cfg.local_rank in [-1, 0]:
results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split="dev")
for key, value in results.items():
tb_writer.add_scalar(f"eval/{key}", value, global_step)
if 0 < cfg.max_steps < global_step:
epoch_iterator.close()
break
del _sub_train_dataset
del _sub_train_sampler
del train_dataloader
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if cfg.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix="", _split="dev"):
dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)
if not os.path.exists(os.path.join(cfg.output_dir, prefix)):
os.makedirs(os.path.join(cfg.output_dir, prefix))
cfg.eval_batch_size = cfg.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,
collate_fn=eval_collator)
single_model_gpu = unwrap_model(model)
single_model_gpu.get_eval_log(reset=True)
# Eval!
logger.info("***** Running evaluation {}.{} *****".format(_split, prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", cfg.eval_batch_size)
# Seems FSDP does not need to unwrap the model for evaluating.
model.eval()
pred_list = []
prob_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = batch_to_device(batch, cfg.device)
with torch.no_grad():
outputs = model(**batch)
# logits = outputs["logits"].detach().cpu()
probs = outputs["logits"].softmax(dim=-1).detach().cpu().float()
prob, pred = probs.max(dim=-1)
pred_list.extend(pred.tolist())
prob_list.extend(prob.tolist())
metric_log, results = single_model_gpu.get_eval_log(reset=True)
logger.info("****** Evaluation Results ******")
logger.info(f"Global Steps: {prefix}")
logger.info(metric_log)
prediction_file = os.path.join(cfg.output_dir, prefix, "eval_predictions.npy")
np.save(prediction_file, pred_list)
json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, "eval_probs.json"), "w"))
return results
def load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split="train", _file=None):
if cfg.local_rank not in [-1, 0] and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if _file is not None:
input_file = _file
elif _split == "train":
input_file = cfg.train_file
elif _split == "dev":
input_file = cfg.dev_file
elif _split == "test":
input_file = cfg.test_file
else:
raise RuntimeError(_split)
examples, features, res = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)
if cfg.local_rank == 0 and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if isinstance(res, Dataset):
return res, features
dataset = TensorDataset(*res)
return dataset, features
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
dist.init_process_group(backend='nccl')
cfg.n_gpu = 1
cfg.device = device
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
# Set seed
set_seed(cfg)
# Load pre-trained model and tokenizer
if cfg.local_rank not in [-1, 0]:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.pretrain:
pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')
else:
pretrain_state_dict = None
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)
model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)
if cfg.local_rank == 0:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
# if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.
model.to(cfg.device)
# logger.info("Training/evaluation parameters %s", OmegaConf.to_yaml(cfg))
if cfg.local_rank in [-1, 0]:
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
# Training
if cfg.do_train:
# TODO: Add option for continuously training from checkpoint.
# The operation should be introduced in ``train`` method since both the state dict
# of schedule and optimizer (and scaler, if any) should be loaded.
# If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)
continue_from_global_step = 0 # If set to 0, start training from the beginning
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
# checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))
# if len(checkpoints) > 0:
# checkpoint = checkpoints[-1]
# logger.info("Resuming training from the latest checkpoint: %s", checkpoint)
# continue_from_global_step = int(checkpoint.split('-')[-1])
# model = model_class.from_pretrained(checkpoint)
# model.to(args.device)
# train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split="train")
global_step, tr_loss = train(cfg, model, tokenizer, continue_from_global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if cfg.do_train:
# Create output directory if needed
if not os.path.exists(cfg.output_dir) and cfg.local_rank in [-1, 0]:
os.makedirs(cfg.output_dir)
logger.info("Saving model checkpoint to %s", cfg.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
# model_to_save.save_pretrained(cfg.output_dir)
save_model(model, cfg, cfg.output_dir)
if cfg.local_rank == -1 or dist.get_rank() == 0:
tokenizer.save_pretrained(cfg.output_dir)
# Good practice: save your training arguments together with the trained model
# torch.save(cfg, os.path.join(cfg.output_dir, 'training_args.bin'))
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_args.yaml"))
# Test
results = {}
if cfg.do_eval and cfg.local_rank in [-1, 0]:
checkpoints = [cfg.output_dir]
if cfg.eval_sub_path:
checkpoints = list(
os.path.dirname(c) for c in
sorted(glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model.bin", recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
model = hydra.utils.call(cfg.model, checkpoint)
model.to(device)
if cfg.test_file:
prefix = 'test-' + prefix
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
hydra_formatted_args = []
# convert the cli params added by torch.distributed.launch into Hydra format
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
main()
| 44.603272 | 137 | 0.641236 |
73e8aec8eec39a6fea22d6463ad11d269aa4d05f | 46 | py | Python | link/version.py | uhjish/link | b6a60b0c62547b5b6f3f1d6e89d0f5bfb798fbb9 | [
"Apache-2.0"
] | null | null | null | link/version.py | uhjish/link | b6a60b0c62547b5b6f3f1d6e89d0f5bfb798fbb9 | [
"Apache-2.0"
] | null | null | null | link/version.py | uhjish/link | b6a60b0c62547b5b6f3f1d6e89d0f5bfb798fbb9 | [
"Apache-2.0"
] | null | null | null | version = '0.1.1'
version_details = (0, 1, 1)
| 15.333333 | 27 | 0.608696 |
73e8be9cbee5420e2900177872da1fef4c557a7e | 3,243 | py | Python | tests/test_models/test_sngan_proj.py | liuhd073/mmgeneration | 2e09a6b63c5f0ddee850d429c5b739ae1e0cc76d | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_sngan_proj.py | liuhd073/mmgeneration | 2e09a6b63c5f0ddee850d429c5b739ae1e0cc76d | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_sngan_proj.py | liuhd073/mmgeneration | 2e09a6b63c5f0ddee850d429c5b739ae1e0cc76d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmgen.models.gans import BasicConditionalGAN
class TestSNGAN_PROJ:
@classmethod
def setup_class(cls):
cls.generator_cfg = dict(
type='SNGANGenerator',
output_scale=32,
base_channels=256,
num_classes=10)
cls.discriminator_cfg = dict(
type='ProjDiscriminator',
input_scale=32,
base_channels=128,
num_classes=10)
cls.disc_auxiliary_loss = None
cls.gan_loss = dict(type='GANLoss', gan_type='hinge')
cls.train_cfg = None
def test_sngan_proj_cpu(self):
# test default config
snganproj = BasicConditionalGAN(
self.generator_cfg,
self.discriminator_cfg,
self.gan_loss,
disc_auxiliary_loss=None,
train_cfg=self.train_cfg)
# test sample from noise
outputs = snganproj.sample_from_noise(None, num_batches=2)
assert outputs.shape == (2, 3, 32, 32)
outputs = snganproj.sample_from_noise(
None, num_batches=2, return_noise=True, sample_model='orig')
assert outputs['fake_img'].shape == (2, 3, 32, 32)
# test train step
img = torch.randn((2, 3, 32, 32))
lab = torch.randint(0, 10, (2, ))
data_input = dict(img=img, gt_label=lab)
optimizer_g = torch.optim.SGD(
snganproj.generator.parameters(), lr=0.01)
optimizer_d = torch.optim.SGD(
snganproj.discriminator.parameters(), lr=0.01)
optim_dict = dict(generator=optimizer_g, discriminator=optimizer_d)
model_outputs = snganproj.train_step(data_input, optim_dict)
assert 'results' in model_outputs
assert 'log_vars' in model_outputs
assert model_outputs['num_samples'] == 2
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_sngan_proj_cuda(self):
# test default config
snganproj = BasicConditionalGAN(
self.generator_cfg,
self.discriminator_cfg,
self.gan_loss,
disc_auxiliary_loss=self.disc_auxiliary_loss,
train_cfg=self.train_cfg).cuda()
# test sample from noise
outputs = snganproj.sample_from_noise(None, num_batches=2)
assert outputs.shape == (2, 3, 32, 32)
outputs = snganproj.sample_from_noise(
None, num_batches=2, return_noise=True, sample_model='orig')
assert outputs['fake_img'].shape == (2, 3, 32, 32)
# test train step
img = torch.randn((2, 3, 32, 32)).cuda()
lab = torch.randint(0, 10, (2, )).cuda()
data_input = dict(img=img, gt_label=lab)
optimizer_g = torch.optim.SGD(
snganproj.generator.parameters(), lr=0.01)
optimizer_d = torch.optim.SGD(
snganproj.discriminator.parameters(), lr=0.01)
optim_dict = dict(generator=optimizer_g, discriminator=optimizer_d)
model_outputs = snganproj.train_step(data_input, optim_dict)
assert 'results' in model_outputs
assert 'log_vars' in model_outputs
assert model_outputs['num_samples'] == 2
| 35.25 | 78 | 0.625039 |
73e8d06cf883e1469a91da290401a15b0ac22074 | 17,480 | py | Python | appengine/gce-backend/proto/config_pb2.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | null | null | null | appengine/gce-backend/proto/config_pb2.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | null | null | null | appengine/gce-backend/proto/config_pb2.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | 1 | 2020-07-05T19:54:40.000Z | 2020-07-05T19:54:40.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='config.proto',
package='gce_backend',
syntax='proto2',
serialized_pb=_b('\n\x0c\x63onfig.proto\x12\x0bgce_backend\"\xb3\x04\n\x16InstanceTemplateConfig\x12G\n\ttemplates\x18\x01 \x03(\x0b\x32\x34.gce_backend.InstanceTemplateConfig.InstanceTemplate\x1a\xcf\x03\n\x10InstanceTemplate\x12\x11\n\tbase_name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x12\n\ndimensions\x18\x03 \x03(\t\x12\x12\n\nimage_name\x18\x04 \x01(\t\x12\x15\n\rimage_project\x18\n \x01(\t\x12\x11\n\tdisk_type\x18\x0f \x01(\t\x12\x14\n\x0c\x64isk_size_gb\x18\x05 \x01(\x05\x12]\n\x10service_accounts\x18\x06 \x03(\x0b\x32\x43.gce_backend.InstanceTemplateConfig.InstanceTemplate.ServiceAccount\x12\x0c\n\x04tags\x18\x07 \x03(\t\x12\x10\n\x08metadata\x18\x08 \x03(\t\x12\x1a\n\x12metadata_from_file\x18\x0e \x03(\t\x12\x14\n\x0cmachine_type\x18\t \x01(\t\x12\x13\n\x0bnetwork_url\x18\x0b \x01(\t\x12\x1f\n\x17\x61uto_assign_external_ip\x18\x0c \x01(\x08\x12\x18\n\x10min_cpu_platform\x18\r \x01(\t\x1a.\n\x0eServiceAccount\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06scopes\x18\x02 \x03(\t\"\xda\x01\n\x1aInstanceGroupManagerConfig\x12N\n\x08managers\x18\x01 \x03(\x0b\x32<.gce_backend.InstanceGroupManagerConfig.InstanceGroupManager\x1al\n\x14InstanceGroupManager\x12\x1a\n\x12template_base_name\x18\x01 \x01(\t\x12\x14\n\x0cminimum_size\x18\x02 \x01(\x05\x12\x14\n\x0cmaximum_size\x18\x03 \x01(\x05\x12\x0c\n\x04zone\x18\x04 \x01(\t\">\n\x0bSettingsCfg\x12\x1c\n\x14\x65nable_ts_monitoring\x18\x01 \x01(\x08\x12\x11\n\tmp_server\x18\x02 \x01(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE_SERVICEACCOUNT = _descriptor.Descriptor(
name='ServiceAccount',
full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.ServiceAccount',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.ServiceAccount.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scopes', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.ServiceAccount.scopes', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=593,
)
_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE = _descriptor.Descriptor(
name='InstanceTemplate',
full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base_name', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.base_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='project', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.project', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dimensions', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.dimensions', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_name', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.image_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_project', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.image_project', index=4,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disk_type', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.disk_type', index=5,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disk_size_gb', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.disk_size_gb', index=6,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='service_accounts', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.service_accounts', index=7,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tags', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.tags', index=8,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.metadata', index=9,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata_from_file', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.metadata_from_file', index=10,
number=14, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='machine_type', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.machine_type', index=11,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='network_url', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.network_url', index=12,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='auto_assign_external_ip', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.auto_assign_external_ip', index=13,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_cpu_platform', full_name='gce_backend.InstanceTemplateConfig.InstanceTemplate.min_cpu_platform', index=14,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE_SERVICEACCOUNT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=593,
)
_INSTANCETEMPLATECONFIG = _descriptor.Descriptor(
name='InstanceTemplateConfig',
full_name='gce_backend.InstanceTemplateConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='templates', full_name='gce_backend.InstanceTemplateConfig.templates', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=593,
)
_INSTANCEGROUPMANAGERCONFIG_INSTANCEGROUPMANAGER = _descriptor.Descriptor(
name='InstanceGroupManager',
full_name='gce_backend.InstanceGroupManagerConfig.InstanceGroupManager',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='template_base_name', full_name='gce_backend.InstanceGroupManagerConfig.InstanceGroupManager.template_base_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='minimum_size', full_name='gce_backend.InstanceGroupManagerConfig.InstanceGroupManager.minimum_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maximum_size', full_name='gce_backend.InstanceGroupManagerConfig.InstanceGroupManager.maximum_size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='zone', full_name='gce_backend.InstanceGroupManagerConfig.InstanceGroupManager.zone', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=706,
serialized_end=814,
)
_INSTANCEGROUPMANAGERCONFIG = _descriptor.Descriptor(
name='InstanceGroupManagerConfig',
full_name='gce_backend.InstanceGroupManagerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='managers', full_name='gce_backend.InstanceGroupManagerConfig.managers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_INSTANCEGROUPMANAGERCONFIG_INSTANCEGROUPMANAGER, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=596,
serialized_end=814,
)
_SETTINGSCFG = _descriptor.Descriptor(
name='SettingsCfg',
full_name='gce_backend.SettingsCfg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_ts_monitoring', full_name='gce_backend.SettingsCfg.enable_ts_monitoring', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mp_server', full_name='gce_backend.SettingsCfg.mp_server', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=816,
serialized_end=878,
)
_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE_SERVICEACCOUNT.containing_type = _INSTANCETEMPLATECONFIG_INSTANCETEMPLATE
_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE.fields_by_name['service_accounts'].message_type = _INSTANCETEMPLATECONFIG_INSTANCETEMPLATE_SERVICEACCOUNT
_INSTANCETEMPLATECONFIG_INSTANCETEMPLATE.containing_type = _INSTANCETEMPLATECONFIG
_INSTANCETEMPLATECONFIG.fields_by_name['templates'].message_type = _INSTANCETEMPLATECONFIG_INSTANCETEMPLATE
_INSTANCEGROUPMANAGERCONFIG_INSTANCEGROUPMANAGER.containing_type = _INSTANCEGROUPMANAGERCONFIG
_INSTANCEGROUPMANAGERCONFIG.fields_by_name['managers'].message_type = _INSTANCEGROUPMANAGERCONFIG_INSTANCEGROUPMANAGER
DESCRIPTOR.message_types_by_name['InstanceTemplateConfig'] = _INSTANCETEMPLATECONFIG
DESCRIPTOR.message_types_by_name['InstanceGroupManagerConfig'] = _INSTANCEGROUPMANAGERCONFIG
DESCRIPTOR.message_types_by_name['SettingsCfg'] = _SETTINGSCFG
InstanceTemplateConfig = _reflection.GeneratedProtocolMessageType('InstanceTemplateConfig', (_message.Message,), dict(
InstanceTemplate = _reflection.GeneratedProtocolMessageType('InstanceTemplate', (_message.Message,), dict(
ServiceAccount = _reflection.GeneratedProtocolMessageType('ServiceAccount', (_message.Message,), dict(
DESCRIPTOR = _INSTANCETEMPLATECONFIG_INSTANCETEMPLATE_SERVICEACCOUNT,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:gce_backend.InstanceTemplateConfig.InstanceTemplate.ServiceAccount)
))
,
DESCRIPTOR = _INSTANCETEMPLATECONFIG_INSTANCETEMPLATE,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:gce_backend.InstanceTemplateConfig.InstanceTemplate)
))
,
DESCRIPTOR = _INSTANCETEMPLATECONFIG,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:gce_backend.InstanceTemplateConfig)
))
_sym_db.RegisterMessage(InstanceTemplateConfig)
_sym_db.RegisterMessage(InstanceTemplateConfig.InstanceTemplate)
_sym_db.RegisterMessage(InstanceTemplateConfig.InstanceTemplate.ServiceAccount)
InstanceGroupManagerConfig = _reflection.GeneratedProtocolMessageType('InstanceGroupManagerConfig', (_message.Message,), dict(
InstanceGroupManager = _reflection.GeneratedProtocolMessageType('InstanceGroupManager', (_message.Message,), dict(
DESCRIPTOR = _INSTANCEGROUPMANAGERCONFIG_INSTANCEGROUPMANAGER,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:gce_backend.InstanceGroupManagerConfig.InstanceGroupManager)
))
,
DESCRIPTOR = _INSTANCEGROUPMANAGERCONFIG,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:gce_backend.InstanceGroupManagerConfig)
))
_sym_db.RegisterMessage(InstanceGroupManagerConfig)
_sym_db.RegisterMessage(InstanceGroupManagerConfig.InstanceGroupManager)
SettingsCfg = _reflection.GeneratedProtocolMessageType('SettingsCfg', (_message.Message,), dict(
DESCRIPTOR = _SETTINGSCFG,
__module__ = 'config_pb2'
# @@protoc_insertion_point(class_scope:gce_backend.SettingsCfg)
))
_sym_db.RegisterMessage(SettingsCfg)
# @@protoc_insertion_point(module_scope)
| 43.591022 | 1,478 | 0.76127 |
73e8ecfc1a391b62d1032891ac8e8a249635cf2f | 951 | py | Python | buildscripts/gdb_test_ci.py | calcmogul/Robot-2020 | b416c202794fb7deea0081beff2f986de7001ed9 | [
"BSD-3-Clause"
] | 10 | 2020-02-07T04:13:15.000Z | 2022-02-26T00:13:39.000Z | buildscripts/gdb_test_ci.py | calcmogul/Robot-2020 | b416c202794fb7deea0081beff2f986de7001ed9 | [
"BSD-3-Clause"
] | 82 | 2020-02-12T03:05:15.000Z | 2022-02-18T02:14:38.000Z | buildscripts/gdb_test_ci.py | calcmogul/Robot-2020 | b416c202794fb7deea0081beff2f986de7001ed9 | [
"BSD-3-Clause"
] | 5 | 2020-02-14T16:24:01.000Z | 2022-03-31T09:10:01.000Z | #!/usr/bin/env python3
import os
import platform
import subprocess
if platform.system() == "Linux":
task_os = "linux"
elif platform.system() == "Darwin":
task_os = "osx"
elif platform.system() == "Windows":
task_os = "windows"
# Build tests
subprocess.run([
"./gradlew installFrcUserProgramTest" + task_os.capitalize() +
"x86-64DebugGoogleTestExe"
],
shell=True,
check=True)
# Go to directory for tests debug build
os.chdir(f"build/install/frcUserProgramTest/{task_os}x86-64/debug")
# Write non-interactive gdb commands to a file
with open("gdb-cmds.txt", "w") as output:
output.write("run\nbt\nquit\n")
# Make wrapper script run gdb
with open("frcUserProgramTest") as input:
content = input.read()
with open("frcUserProgramTest", "w") as output:
output.write(content.replace("exec ", "gdb -batch -x gdb-cmds.txt "))
subprocess.run(["./frcUserProgramTest"], shell=True, check=True)
| 26.416667 | 73 | 0.683491 |
73e9280839fc70cd960bf152f528ce7e653b2ed9 | 2,118 | py | Python | src/dataverk/connectors/jsonstat.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 3 | 2019-09-29T20:48:46.000Z | 2021-03-31T10:16:07.000Z | src/dataverk/connectors/jsonstat.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 148 | 2019-02-08T12:30:58.000Z | 2021-03-11T15:31:55.000Z | src/dataverk/connectors/jsonstat.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 1 | 2020-11-18T14:10:05.000Z | 2020-11-18T14:10:05.000Z | import requests
import json
from pyjstat import pyjstat
from collections import OrderedDict
import pandas as pd
import ast
from dataverk.abc.base import DataverkBase
class JSONStatConnector(DataverkBase):
"""JSONStat based connections
"""
def __init__(self):
super().__init__()
def get_pandas_df(self, url, params=None, table_format='json'):
"""
Get Pandas dataframe
"""
self.log(str(url))
if params == None:
params = json.dumps(self._full_json(url))
response=requests.post(url,params).content
response=response.decode('utf-8')
df = pyjstat.from_json_stat(json.loads(response))[0]
return df
def _get_table(self, url, table_format = 'json'):
if table_format == 'json':
response = requests.get(url)
df = pyjstat.from_json_stat(response.json(object_pairs_hook=OrderedDict))[0]
elif table_format == 'csv':
df = pd.read_csv(url)
else:
print("""table_format param must be either 'json' or 'csv'""")
df = None
return df
def _full_json(self,url):
variables = self._get_variables(url)
nvars = len(variables)
var_list = list(range(nvars))
query_element = {}
for x in var_list:
query_element[x] ='{{"code": "{code}", "selection": {{"filter": "item", "values": {values} }}}}'.format(
code = variables[x]['code'],
values = variables[x]['values'])
query_element[x] = query_element[x].replace("\'", '"')
all_elements = str(list(query_element.values()))
all_elements = all_elements.replace("\'", "")
query = '{{"query": {all_elements} , "response": {{"format": "json-stat" }}}}'.format(all_elements = all_elements)
query = ast.literal_eval(query)
return query
def _get_variables(self,url):
df = pd.read_json(url)
variables = [values for values in df.iloc[:,1]]
return variables
| 29.830986 | 122 | 0.573654 |
73e95f711f5ba25afd68c795f8b60e9ded907844 | 37 | py | Python | tests/assets/dependenciespackage/dependenciespackage/two.py | SimonBiggs/layer_linter | 9eb518b74118e4a2d8079e2f32ecc12612ca9e86 | [
"BSD-3-Clause"
] | 63 | 2018-06-21T10:39:54.000Z | 2021-06-04T14:28:44.000Z | tests/assets/dependenciespackage/dependenciespackage/two.py | SimonBiggs/layer_linter | 9eb518b74118e4a2d8079e2f32ecc12612ca9e86 | [
"BSD-3-Clause"
] | 86 | 2018-06-20T13:30:30.000Z | 2019-06-04T12:47:28.000Z | tests/assets/dependenciespackage/dependenciespackage/two.py | SimonBiggs/layer_linter | 9eb518b74118e4a2d8079e2f32ecc12612ca9e86 | [
"BSD-3-Clause"
] | 4 | 2018-08-14T08:49:55.000Z | 2019-02-16T09:24:47.000Z | from . import one # Relative import
| 18.5 | 36 | 0.72973 |
73e97fb50ba18a0b480bcaaabf2ca5cc867742c1 | 352 | py | Python | python/django/basic/pages/tests.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | python/django/basic/pages/tests.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | python/django/basic/pages/tests.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | from django.test import SimpleTestCase
class SimpleTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_about_page_status_code(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
| 29.333333 | 51 | 0.715909 |
73e98d2d17d64ab0a875d084db5be4081efffcc7 | 2,247 | py | Python | machinelearning/test/iteratortest.py | hayj/MachineLearning | 66a34b6776450f7d597acca05525120fb28c8deb | [
"MIT"
] | null | null | null | machinelearning/test/iteratortest.py | hayj/MachineLearning | 66a34b6776450f7d597acca05525120fb28c8deb | [
"MIT"
] | null | null | null | machinelearning/test/iteratortest.py | hayj/MachineLearning | 66a34b6776450f7d597acca05525120fb28c8deb | [
"MIT"
] | null | null | null | from systemtools.basics import *
from systemtools.logger import *
from systemtools.location import *
from datastructuretools.processing import *
from datatools.jsonutils import *
import random
from multiprocessing import cpu_count, Process, Pipe, Queue, JoinableQueue
import queue
from machinelearning.iterator import *
def useIt(seed, containers):
random.seed(seed)
def itemGenerator(container, **kwargs):
for a in NDJson(container):
yield a
def subProcessParseFunct(item, key=None, **kwargs):
return str(item)[:40] + " " + key
def mainProcessParseFunct(item, key=None, **kwargs):
return item + " " + key
cg = ConsistentIterator(containers, itemGenerator, subProcessParseFunct=subProcessParseFunct, mainProcessParseFunct=mainProcessParseFunct, subProcessParseFunctKwargs={"key": "aaa"}, mainProcessParseFunctKwargs={"key": "bbb"})
allElements = []
for a in cg:
allElements.append(a)
print(len(allElements))
print(allElements[0])
print(allElements[1000])
if len(allElements) >= 59000:
print(allElements[60000])
print(allElements[-2])
print(allElements[-1])
return allElements
def test1():
tt = TicToc()
tt.tic(display=False)
containers = sortedGlob("/home/hayj/tmp/Asa/asaminbis/asamin-train-2019.05.22-19.46/*.bz2")
# containers = sortedGlob("/home/hayj/tmp/Asa/asaminbis/aaa/*.bz2")
printLTS(containers)
tt.tic(display=False)
allElements1 = useIt(0, containers)
tt.tic()
allElements2 = useIt(1, containers)
tt.tic()
assert len(allElements1) == len(allElements2)
print("ok1")
for i in range(len(allElements1)):
assert allElements1[i] == allElements2[i]
print("ok2")
tt.tic(display=False)
count = 0
for file in containers:
for row in NDJson(file):
count += 1
tt.tic()
print(count)
assert count == len(allElements1)
print("ok3")
def test2():
containers = sortedGlob("/home/hayj/tmp/Asa/asaminbis/asamin-train-2019.05.22-19.46/*.bz2")
def itemGenerator(container, *args, **kwargs):
for a in NDJson(container):
yield str(a)[:30]
# gen = ConsistentIterator(containers, itemGenerator)
gen = AgainAndAgain(ConsistentIterator, containers, itemGenerator)
for i in range(3):
count = 0
for current in gen:
count += 1
print(count)
if __name__ == '__main__':
test1()
# test2()
| 29.181818 | 226 | 0.733867 |
73e99b5ecb770674fdee260defa32af93e9be770 | 571 | py | Python | Chapter28/15_1.py | PacktPublishing/GettingStartedwithPythonfortheInternetofThings- | a5a86ae38b3a4c625dfc1213d32a3f49e1e298c6 | [
"MIT"
] | 19 | 2018-06-28T15:48:47.000Z | 2022-01-08T12:40:52.000Z | Chapter28/15_1.py | PacktPublishing/GettingStartedwithPythonfortheInternetofThings- | a5a86ae38b3a4c625dfc1213d32a3f49e1e298c6 | [
"MIT"
] | null | null | null | Chapter28/15_1.py | PacktPublishing/GettingStartedwithPythonfortheInternetofThings- | a5a86ae38b3a4c625dfc1213d32a3f49e1e298c6 | [
"MIT"
] | 13 | 2018-06-30T10:33:52.000Z | 2021-12-29T11:31:31.000Z | import signal
import flicklib
import time
def message(value):
print value
@flicklib.move()
def move(x, y, z):
global xyztxt
xyztxt = '{:5.3f} {:5.3f} {:5.3f}'.format(x,y,z)
@flicklib.flick()
def flick(start,finish):
global flicktxt
flicktxt = 'FLICK-' + start[0].upper() + finish[0].upper()
message(flicktxt)
def main():
global xyztxt
global flicktxt
xyztxt = ''
flicktxt = ''
flickcount = 0
while True:
xyztxt = ''
if len(flicktxt) > 0 and flickcount < 5:
flickcount += 1
else:
flicktxt = ''
flickcount = 0
main() | 19.689655 | 61 | 0.621716 |
73e9ad342226b1f1d009c5030e3af16d31cabe63 | 48,915 | py | Python | rigify/legacy/rigs/biped/limb_common.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | 1 | 2018-06-18T09:46:10.000Z | 2018-06-18T09:46:10.000Z | rigify/legacy/rigs/biped/limb_common.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | null | null | null | rigify/legacy/rigs/biped/limb_common.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | null | null | null | #====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
from math import pi
import bpy
from rna_prop_ui import rna_idprop_ui_prop_get
from mathutils import Vector
from ...utils import angle_on_plane, align_bone_roll, align_bone_z_axis
from ...utils import new_bone, copy_bone, put_bone, make_nonscaling_child
from ...utils import strip_org, make_mechanism_name, make_deformer_name, insert_before_lr
from ...utils import create_widget, create_limb_widget, create_line_widget, create_sphere_widget
class FKLimb:
def __init__(self, obj, bone1, bone2, bone3, primary_rotation_axis, layers):
self.obj = obj
self.org_bones = [bone1, bone2, bone3]
# Get (optional) parent
if self.obj.data.bones[bone1].parent is None:
self.org_parent = None
else:
self.org_parent = self.obj.data.bones[bone1].parent.name
# Get the rig parameters
self.layers = layers
self.primary_rotation_axis = primary_rotation_axis
def generate(self):
bpy.ops.object.mode_set(mode='EDIT')
# Create non-scaling parent bone
if self.org_parent is not None:
loc = Vector(self.obj.data.edit_bones[self.org_bones[0]].head)
parent = make_nonscaling_child(self.obj, self.org_parent, loc, "_fk")
else:
parent = None
# Create the control bones
ulimb = copy_bone(self.obj, self.org_bones[0], strip_org(insert_before_lr(self.org_bones[0], ".fk")))
flimb = copy_bone(self.obj, self.org_bones[1], strip_org(insert_before_lr(self.org_bones[1], ".fk")))
elimb = copy_bone(self.obj, self.org_bones[2], strip_org(insert_before_lr(self.org_bones[2], ".fk")))
# Create the end-limb mechanism bone
elimb_mch = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(self.org_bones[2])))
# Create the anti-stretch bones
# These sit between a parent and its child, and counteract the
# stretching of the parent so that the child is unaffected
fantistr = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], "_antistr.fk"))))
eantistr = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], "_antistr.fk"))))
# Create the hinge bones
if parent is not None:
socket1 = copy_bone(self.obj, ulimb, make_mechanism_name(ulimb + ".socket1"))
socket2 = copy_bone(self.obj, ulimb, make_mechanism_name(ulimb + ".socket2"))
# Get edit bones
eb = self.obj.data.edit_bones
ulimb_e = eb[ulimb]
flimb_e = eb[flimb]
elimb_e = eb[elimb]
fantistr_e = eb[fantistr]
eantistr_e = eb[eantistr]
elimb_mch_e = eb[elimb_mch]
if parent is not None:
socket1_e = eb[socket1]
socket2_e = eb[socket2]
# Parenting
elimb_mch_e.use_connect = False
elimb_mch_e.parent = elimb_e
elimb_e.use_connect = False
elimb_e.parent = eantistr_e
eantistr_e.use_connect = False
eantistr_e.parent = flimb_e
flimb_e.use_connect = False
flimb_e.parent = fantistr_e
fantistr_e.use_connect = False
fantistr_e.parent = ulimb_e
if parent is not None:
socket1_e.use_connect = False
socket1_e.parent = eb[parent]
socket2_e.use_connect = False
socket2_e.parent = None
ulimb_e.use_connect = False
ulimb_e.parent = socket2_e
# Positioning
fantistr_e.length /= 8
put_bone(self.obj, fantistr, Vector(ulimb_e.tail))
eantistr_e.length /= 8
put_bone(self.obj, eantistr, Vector(flimb_e.tail))
if parent is not None:
socket1_e.length /= 4
socket2_e.length /= 3
# Object mode, get pose bones
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
ulimb_p = pb[ulimb]
flimb_p = pb[flimb]
elimb_p = pb[elimb]
fantistr_p = pb[fantistr]
eantistr_p = pb[eantistr]
if parent is not None:
socket2_p = pb[socket2]
# Lock axes
ulimb_p.lock_location = (True, True, True)
flimb_p.lock_location = (True, True, True)
elimb_p.lock_location = (True, True, True)
# Set the elbow to only bend on the x-axis.
flimb_p.rotation_mode = 'XYZ'
if 'X' in self.primary_rotation_axis:
flimb_p.lock_rotation = (False, True, True)
elif 'Y' in self.primary_rotation_axis:
flimb_p.lock_rotation = (True, False, True)
else:
flimb_p.lock_rotation = (True, True, False)
# Set up custom properties
if parent is not None:
prop = rna_idprop_ui_prop_get(ulimb_p, "isolate", create=True)
ulimb_p["isolate"] = 0.0
prop["soft_min"] = prop["min"] = 0.0
prop["soft_max"] = prop["max"] = 1.0
prop = rna_idprop_ui_prop_get(ulimb_p, "stretch_length", create=True)
ulimb_p["stretch_length"] = 1.0
prop["min"] = 0.05
prop["max"] = 20.0
prop["soft_min"] = 0.25
prop["soft_max"] = 4.0
# Stretch drivers
def add_stretch_drivers(pose_bone):
driver = pose_bone.driver_add("scale", 1).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "stretch_length"
driver = pose_bone.driver_add("scale", 0).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "1/sqrt(stretch_length)"
driver = pose_bone.driver_add("scale", 2).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "1/sqrt(stretch_length)"
def add_antistretch_drivers(pose_bone):
driver = pose_bone.driver_add("scale", 1).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "1/stretch_length"
driver = pose_bone.driver_add("scale", 0).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "sqrt(stretch_length)"
driver = pose_bone.driver_add("scale", 2).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "sqrt(stretch_length)"
add_stretch_drivers(ulimb_p)
add_stretch_drivers(flimb_p)
add_antistretch_drivers(fantistr_p)
add_antistretch_drivers(eantistr_p)
# Hinge constraints / drivers
if parent is not None:
con = socket2_p.constraints.new('COPY_LOCATION')
con.name = "copy_location"
con.target = self.obj
con.subtarget = socket1
con = socket2_p.constraints.new('COPY_TRANSFORMS')
con.name = "isolate_off"
con.target = self.obj
con.subtarget = socket1
# Driver
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'AVERAGE'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = ulimb_p.path_from_id() + '["isolate"]'
mod = fcurve.modifiers[0]
mod.poly_order = 1
mod.coefficients[0] = 1.0
mod.coefficients[1] = -1.0
# Constrain org bones to controls
con = pb[self.org_bones[0]].constraints.new('COPY_TRANSFORMS')
con.name = "fk"
con.target = self.obj
con.subtarget = ulimb
con = pb[self.org_bones[1]].constraints.new('COPY_TRANSFORMS')
con.name = "fk"
con.target = self.obj
con.subtarget = flimb
con = pb[self.org_bones[2]].constraints.new('COPY_TRANSFORMS')
con.name = "fk"
con.target = self.obj
con.subtarget = elimb_mch
# Set layers if specified
if self.layers:
ulimb_p.bone.layers = self.layers
flimb_p.bone.layers = self.layers
elimb_p.bone.layers = self.layers
# Create control widgets
create_limb_widget(self.obj, ulimb)
create_limb_widget(self.obj, flimb)
ob = create_widget(self.obj, elimb)
if ob is not None:
verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)]
edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)]
mesh = ob.data
mesh.from_pydata(verts, edges, [])
mesh.update()
mod = ob.modifiers.new("subsurf", 'SUBSURF')
mod.levels = 2
return [ulimb, flimb, elimb, elimb_mch]
class IKLimb:
""" An IK limb rig, with an optional ik/fk switch.
"""
def __init__(self, obj, bone1, bone2, bone3, pole_parent, pole_target_base_name, primary_rotation_axis, bend_hint, layers, ikfk_switch=False):
self.obj = obj
self.switch = ikfk_switch
# Get the chain of 3 connected bones
self.org_bones = [bone1, bone2, bone3]
# Get (optional) parent
if self.obj.data.bones[bone1].parent is None:
self.org_parent = None
else:
self.org_parent = self.obj.data.bones[bone1].parent.name
self.pole_parent = pole_parent
# Get the rig parameters
self.pole_target_base_name = pole_target_base_name
self.layers = layers
self.bend_hint = bend_hint
self.primary_rotation_axis = primary_rotation_axis
def generate(self):
bpy.ops.object.mode_set(mode='EDIT')
# Create non-scaling parent bone
if self.org_parent is not None:
loc = Vector(self.obj.data.edit_bones[self.org_bones[0]].head)
parent = make_nonscaling_child(self.obj, self.org_parent, loc, "_ik")
if self.pole_parent is None:
self.pole_parent = parent
else:
parent = None
# Create the bones
ulimb = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], ".ik"))))
flimb = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], ".ik"))))
elimb = copy_bone(self.obj, self.org_bones[2], strip_org(insert_before_lr(self.org_bones[2], ".ik")))
elimb_mch = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(self.org_bones[2])))
ulimb_nostr = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], ".nostr.ik"))))
flimb_nostr = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], ".nostr.ik"))))
ulimb_str = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], ".stretch.ik"))))
flimb_str = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], ".stretch.ik"))))
pole_target_name = self.pole_target_base_name + "." + insert_before_lr(self.org_bones[0], ".ik").split(".", 1)[1]
pole = copy_bone(self.obj, self.org_bones[0], pole_target_name)
if self.pole_parent == self.org_bones[2]:
self.pole_parent = elimb_mch
if self.pole_parent is not None:
pole_par = copy_bone(self.obj, self.pole_parent, make_mechanism_name(insert_before_lr(pole_target_name, "_parent")))
viselimb = copy_bone(self.obj, self.org_bones[2], "VIS-" + strip_org(insert_before_lr(self.org_bones[2], ".ik")))
vispole = copy_bone(self.obj, self.org_bones[1], "VIS-" + strip_org(insert_before_lr(self.org_bones[0], "_pole.ik")))
# Get edit bones
eb = self.obj.data.edit_bones
if parent is not None:
parent_e = eb[parent]
ulimb_e = eb[ulimb]
flimb_e = eb[flimb]
elimb_e = eb[elimb]
elimb_mch_e = eb[elimb_mch]
ulimb_nostr_e = eb[ulimb_nostr]
flimb_nostr_e = eb[flimb_nostr]
ulimb_str_e = eb[ulimb_str]
flimb_str_e = eb[flimb_str]
pole_e = eb[pole]
if self.pole_parent is not None:
pole_par_e = eb[pole_par]
viselimb_e = eb[viselimb]
vispole_e = eb[vispole]
# Parenting
ulimb_e.use_connect = False
ulimb_nostr_e.use_connect = False
if parent is not None:
ulimb_e.parent = parent_e
ulimb_nostr_e.parent = parent_e
flimb_e.parent = ulimb_e
flimb_nostr_e.parent = ulimb_nostr_e
elimb_e.use_connect = False
elimb_e.parent = None
elimb_mch_e.use_connect = False
elimb_mch_e.parent = elimb_e
ulimb_str_e.use_connect = False
ulimb_str_e.parent = ulimb_e.parent
flimb_str_e.use_connect = False
flimb_str_e.parent = ulimb_e.parent
pole_e.use_connect = False
if self.pole_parent is not None:
pole_par_e.parent = None
pole_e.parent = pole_par_e
viselimb_e.use_connect = False
viselimb_e.parent = None
vispole_e.use_connect = False
vispole_e.parent = None
# Misc
elimb_e.use_local_location = False
viselimb_e.hide_select = True
vispole_e.hide_select = True
# Positioning
v1 = flimb_e.tail - ulimb_e.head
if 'X' in self.primary_rotation_axis or 'Y' in self.primary_rotation_axis:
v2 = v1.cross(flimb_e.x_axis)
if (v2 * flimb_e.z_axis) > 0.0:
v2 *= -1.0
else:
v2 = v1.cross(flimb_e.z_axis)
if (v2 * flimb_e.x_axis) < 0.0:
v2 *= -1.0
v2.normalize()
v2 *= v1.length
if '-' in self.primary_rotation_axis:
v2 *= -1
pole_e.head = flimb_e.head + v2
pole_e.tail = pole_e.head + (Vector((0, 1, 0)) * (v1.length / 8))
pole_e.roll = 0.0
if parent is not None:
pole_par_e.length *= 0.75
viselimb_e.tail = viselimb_e.head + Vector((0, 0, v1.length / 32))
vispole_e.tail = vispole_e.head + Vector((0, 0, v1.length / 32))
# Determine the pole offset value
plane = (flimb_e.tail - ulimb_e.head).normalized()
vec1 = ulimb_e.x_axis.normalized()
vec2 = (pole_e.head - ulimb_e.head).normalized()
pole_offset = angle_on_plane(plane, vec1, vec2)
# Object mode, get pose bones
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
ulimb_p = pb[ulimb]
flimb_p = pb[flimb]
elimb_p = pb[elimb]
ulimb_nostr_p = pb[ulimb_nostr]
flimb_nostr_p = pb[flimb_nostr]
ulimb_str_p = pb[ulimb_str]
flimb_str_p = pb[flimb_str]
pole_p = pb[pole]
if self.pole_parent is not None:
pole_par_p = pb[pole_par]
viselimb_p = pb[viselimb]
vispole_p = pb[vispole]
# Set the elbow to only bend on the primary axis
if 'X' in self.primary_rotation_axis:
flimb_p.lock_ik_y = True
flimb_p.lock_ik_z = True
flimb_nostr_p.lock_ik_y = True
flimb_nostr_p.lock_ik_z = True
elif 'Y' in self.primary_rotation_axis:
flimb_p.lock_ik_x = True
flimb_p.lock_ik_z = True
flimb_nostr_p.lock_ik_x = True
flimb_nostr_p.lock_ik_z = True
else:
flimb_p.lock_ik_x = True
flimb_p.lock_ik_y = True
flimb_nostr_p.lock_ik_x = True
flimb_nostr_p.lock_ik_y = True
# Limb stretches
ulimb_nostr_p.ik_stretch = 0.0
flimb_nostr_p.ik_stretch = 0.0
# This next bit is weird. The values calculated cause
# ulimb and flimb to preserve their relative lengths
# while stretching.
l1 = ulimb_p.length
l2 = flimb_p.length
if l1 < l2:
ulimb_p.ik_stretch = (l1 ** (1 / 3)) / (l2 ** (1 / 3))
flimb_p.ik_stretch = 1.0
else:
ulimb_p.ik_stretch = 1.0
flimb_p.ik_stretch = (l2 ** (1 / 3)) / (l1 ** (1 / 3))
# Pole target only translates
pole_p.lock_location = False, False, False
pole_p.lock_rotation = True, True, True
pole_p.lock_rotation_w = True
pole_p.lock_scale = True, True, True
# Set up custom properties
if self.switch is True:
prop = rna_idprop_ui_prop_get(elimb_p, "ikfk_switch", create=True)
elimb_p["ikfk_switch"] = 0.0
prop["soft_min"] = prop["min"] = 0.0
prop["soft_max"] = prop["max"] = 1.0
if self.pole_parent is not None:
prop = rna_idprop_ui_prop_get(pole_p, "follow", create=True)
pole_p["follow"] = 1.0
prop["soft_min"] = prop["min"] = 0.0
prop["soft_max"] = prop["max"] = 1.0
prop = rna_idprop_ui_prop_get(elimb_p, "stretch_length", create=True)
elimb_p["stretch_length"] = 1.0
prop["min"] = 0.05
prop["max"] = 20.0
prop["soft_min"] = 0.25
prop["soft_max"] = 4.0
prop = rna_idprop_ui_prop_get(elimb_p, "auto_stretch", create=True)
elimb_p["auto_stretch"] = 1.0
prop["soft_min"] = prop["min"] = 0.0
prop["soft_max"] = prop["max"] = 1.0
# Stretch parameter drivers
def add_stretch_drivers(pose_bone):
driver = pose_bone.driver_add("scale", 1).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "stretch_length"
driver = pose_bone.driver_add("scale", 0).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "stretch_length"
driver = pose_bone.driver_add("scale", 2).driver
var = driver.variables.new()
var.name = "stretch_length"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["stretch_length"]'
driver.type = 'SCRIPTED'
driver.expression = "stretch_length"
add_stretch_drivers(ulimb_nostr_p)
# Bend direction hint
def add_bend_hint(pose_bone, axis):
con = pose_bone.constraints.new('LIMIT_ROTATION')
con.name = "bend_hint"
con.owner_space = 'LOCAL'
if axis == 'X':
con.use_limit_x = True
con.min_x = pi / 10
con.max_x = pi / 10
elif axis == '-X':
con.use_limit_x = True
con.min_x = -pi / 10
con.max_x = -pi / 10
elif axis == 'Y':
con.use_limit_y = True
con.min_y = pi / 10
con.max_y = pi / 10
elif axis == '-Y':
con.use_limit_y = True
con.min_y = -pi / 10
con.max_y = -pi / 10
elif axis == 'Z':
con.use_limit_z = True
con.min_z = pi / 10
con.max_z = pi / 10
elif axis == '-Z':
con.use_limit_z = True
con.min_z = -pi / 10
con.max_z = -pi / 10
if self.bend_hint:
add_bend_hint(flimb_p, self.primary_rotation_axis)
add_bend_hint(flimb_nostr_p, self.primary_rotation_axis)
# Constrain normal IK chain to no-stretch IK chain
con = ulimb_p.constraints.new('COPY_TRANSFORMS')
con.name = "pre_stretch"
con.target = self.obj
con.subtarget = ulimb_nostr
con = flimb_p.constraints.new('COPY_TRANSFORMS')
con.name = "pre_stretch"
con.target = self.obj
con.subtarget = flimb_nostr
# IK Constraints
con = flimb_nostr_p.constraints.new('IK')
con.name = "ik"
con.target = self.obj
con.subtarget = elimb_mch
con.pole_target = self.obj
con.pole_subtarget = pole
con.pole_angle = pole_offset
con.chain_count = 2
con = flimb_p.constraints.new('IK')
con.name = "ik"
con.target = self.obj
con.subtarget = elimb_mch
con.chain_count = 2
# Driver to enable/disable auto stretching IK chain
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'AVERAGE'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["auto_stretch"]'
# Stretch bone constraints
con = ulimb_str_p.constraints.new('COPY_TRANSFORMS')
con.name = "anchor"
con.target = self.obj
con.subtarget = ulimb
con = ulimb_str_p.constraints.new('MAINTAIN_VOLUME')
con.name = "stretch"
con.owner_space = 'LOCAL'
con = flimb_str_p.constraints.new('COPY_TRANSFORMS')
con.name = "anchor"
con.target = self.obj
con.subtarget = flimb
con = flimb_str_p.constraints.new('MAINTAIN_VOLUME')
con.name = "stretch"
con.owner_space = 'LOCAL'
# Pole target parent
if self.pole_parent is not None:
con = pole_par_p.constraints.new('COPY_TRANSFORMS')
con.name = "parent"
con.target = self.obj
con.subtarget = self.pole_parent
driver = con.driver_add("influence").driver
var = driver.variables.new()
var.name = "follow"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = pole_p.path_from_id() + '["follow"]'
driver.type = 'SUM'
# Constrain org bones
con = pb[self.org_bones[0]].constraints.new('COPY_TRANSFORMS')
con.name = "ik"
con.target = self.obj
con.subtarget = ulimb_str
if self.switch is True:
# IK/FK switch driver
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'AVERAGE'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["ikfk_switch"]'
con = pb[self.org_bones[1]].constraints.new('COPY_TRANSFORMS')
con.name = "ik"
con.target = self.obj
con.subtarget = flimb_str
if self.switch is True:
# IK/FK switch driver
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'AVERAGE'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["ikfk_switch"]'
con = pb[self.org_bones[2]].constraints.new('COPY_TRANSFORMS')
con.name = "ik"
con.target = self.obj
con.subtarget = elimb_mch
if self.switch is True:
# IK/FK switch driver
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'AVERAGE'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = elimb_p.path_from_id() + '["ikfk_switch"]'
# VIS limb-end constraints
con = viselimb_p.constraints.new('COPY_LOCATION')
con.name = "copy_loc"
con.target = self.obj
con.subtarget = self.org_bones[2]
con = viselimb_p.constraints.new('STRETCH_TO')
con.name = "stretch_to"
con.target = self.obj
con.subtarget = elimb
con.volume = 'NO_VOLUME'
con.rest_length = viselimb_p.length
# VIS pole constraints
con = vispole_p.constraints.new('COPY_LOCATION')
con.name = "copy_loc"
con.target = self.obj
con.subtarget = self.org_bones[1]
con = vispole_p.constraints.new('STRETCH_TO')
con.name = "stretch_to"
con.target = self.obj
con.subtarget = pole
con.volume = 'NO_VOLUME'
con.rest_length = vispole_p.length
# Set layers if specified
if self.layers:
elimb_p.bone.layers = self.layers
pole_p.bone.layers = self.layers
viselimb_p.bone.layers = self.layers
vispole_p.bone.layers = self.layers
# Create widgets
create_line_widget(self.obj, vispole)
create_line_widget(self.obj, viselimb)
create_sphere_widget(self.obj, pole)
ob = create_widget(self.obj, elimb)
if ob is not None:
verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)]
edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)]
mesh = ob.data
mesh.from_pydata(verts, edges, [])
mesh.update()
mod = ob.modifiers.new("subsurf", 'SUBSURF')
mod.levels = 2
return [ulimb, flimb, elimb, elimb_mch, pole, vispole, viselimb]
class RubberHoseLimb:
def __init__(self, obj, bone1, bone2, bone3, use_complex_limb, junc_base_name, primary_rotation_axis, layers):
self.obj = obj
# Get the chain of 3 connected bones
self.org_bones = [bone1, bone2, bone3]
# Get (optional) parent
if self.obj.data.bones[bone1].parent is None:
self.org_parent = None
else:
self.org_parent = self.obj.data.bones[bone1].parent.name
# Get rig parameters
self.layers = layers
self.primary_rotation_axis = primary_rotation_axis
self.use_complex_limb = use_complex_limb
self.junc_base_name = junc_base_name
def generate(self):
bpy.ops.object.mode_set(mode='EDIT')
# Create non-scaling parent bone
if self.org_parent is not None:
loc = Vector(self.obj.data.edit_bones[self.org_bones[0]].head)
parent = make_nonscaling_child(self.obj, self.org_parent, loc, "_rh")
else:
parent = None
if not self.use_complex_limb:
# Simple rig
# Create bones
ulimb = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0])))
flimb = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1])))
elimb = copy_bone(self.obj, self.org_bones[2], make_deformer_name(strip_org(self.org_bones[2])))
# Get edit bones
eb = self.obj.data.edit_bones
ulimb_e = eb[ulimb]
flimb_e = eb[flimb]
elimb_e = eb[elimb]
# Parenting
elimb_e.parent = flimb_e
elimb_e.use_connect = True
flimb_e.parent = ulimb_e
flimb_e.use_connect = True
if parent is not None:
elimb_e.use_connect = False
ulimb_e.parent = eb[parent]
# Object mode, get pose bones
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
ulimb_p = pb[ulimb]
flimb_p = pb[flimb]
elimb_p = pb[elimb]
# Constrain def bones to org bones
con = ulimb_p.constraints.new('COPY_TRANSFORMS')
con.name = "def"
con.target = self.obj
con.subtarget = self.org_bones[0]
con = flimb_p.constraints.new('COPY_TRANSFORMS')
con.name = "def"
con.target = self.obj
con.subtarget = self.org_bones[1]
con = elimb_p.constraints.new('COPY_TRANSFORMS')
con.name = "def"
con.target = self.obj
con.subtarget = self.org_bones[2]
return []
else:
# Complex rig
# Get the .R or .L off the end of the upper limb name if it exists
lr = self.org_bones[0].split(".", 1)
if len(lr) == 1:
lr = ""
else:
lr = lr[1]
# Create bones
# Deformation bones
ulimb1 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(insert_before_lr(self.org_bones[0], ".01"))))
ulimb2 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(insert_before_lr(self.org_bones[0], ".02"))))
flimb1 = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(insert_before_lr(self.org_bones[1], ".01"))))
flimb2 = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(insert_before_lr(self.org_bones[1], ".02"))))
elimb = copy_bone(self.obj, self.org_bones[2], make_deformer_name(strip_org(self.org_bones[2])))
# Bones for switchable smooth bbone transition at elbow/knee
ulimb2_smoother = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], "_smth.02"))))
flimb1_smoother = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], "_smth.01"))))
flimb1_pos = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], ".01"))))
# Elbow/knee junction bone
junc = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], ".junc"))))
# Hose controls
uhoseend = new_bone(self.obj, strip_org(insert_before_lr(self.org_bones[0], "_hose_end")))
uhose = new_bone(self.obj, strip_org(insert_before_lr(self.org_bones[0], "_hose")))
jhose = new_bone(self.obj, self.junc_base_name + "_hose." + lr)
fhose = new_bone(self.obj, strip_org(insert_before_lr(self.org_bones[1], "_hose")))
fhoseend = new_bone(self.obj, strip_org(insert_before_lr(self.org_bones[1], "_hose_end")))
# Hose control parents
uhoseend_par = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(uhoseend, "_p"))))
uhose_par = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(uhose, "_p"))))
jhose_par = copy_bone(self.obj, junc, make_mechanism_name(strip_org(insert_before_lr(jhose, "_p"))))
fhose_par = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(fhose, "_p"))))
fhoseend_par = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(fhoseend, "_p"))))
# Get edit bones
eb = self.obj.data.edit_bones
if parent is not None:
parent_e = eb[parent]
else:
parent_e = None
ulimb1_e = eb[ulimb1]
ulimb2_e = eb[ulimb2]
flimb1_e = eb[flimb1]
flimb2_e = eb[flimb2]
elimb_e = eb[elimb]
ulimb2_smoother_e = eb[ulimb2_smoother]
flimb1_smoother_e = eb[flimb1_smoother]
flimb1_pos_e = eb[flimb1_pos]
junc_e = eb[junc]
uhoseend_e = eb[uhoseend]
uhose_e = eb[uhose]
jhose_e = eb[jhose]
fhose_e = eb[fhose]
fhoseend_e = eb[fhoseend]
uhoseend_par_e = eb[uhoseend_par]
uhose_par_e = eb[uhose_par]
jhose_par_e = eb[jhose_par]
fhose_par_e = eb[fhose_par]
fhoseend_par_e = eb[fhoseend_par]
# Parenting
if parent is not None:
ulimb1_e.use_connect = False
ulimb1_e.parent = parent_e
ulimb2_e.use_connect = False
ulimb2_e.parent = eb[self.org_bones[0]]
ulimb2_smoother_e.use_connect = True
ulimb2_smoother_e.parent = ulimb2_e
flimb1_e.use_connect = True
flimb1_e.parent = flimb1_smoother_e
flimb1_smoother_e.use_connect = False
flimb1_smoother_e.parent = flimb1_pos_e
flimb1_pos_e.use_connect = False
flimb1_pos_e.parent = eb[self.org_bones[1]]
flimb2_e.use_connect = False
flimb2_e.parent = eb[self.org_bones[1]]
elimb_e.use_connect = False
elimb_e.parent = eb[self.org_bones[2]]
junc_e.use_connect = False
junc_e.parent = eb[self.org_bones[0]]
uhoseend_e.use_connect = False
uhoseend_e.parent = uhoseend_par_e
uhose_e.use_connect = False
uhose_e.parent = uhose_par_e
jhose_e.use_connect = False
jhose_e.parent = jhose_par_e
fhose_e.use_connect = False
fhose_e.parent = fhose_par_e
fhoseend_e.use_connect = False
fhoseend_e.parent = fhoseend_par_e
uhoseend_par_e.use_connect = False
uhoseend_par_e.parent = parent_e
uhose_par_e.use_connect = False
uhose_par_e.parent = parent_e
jhose_par_e.use_connect = False
jhose_par_e.parent = parent_e
fhose_par_e.use_connect = False
fhose_par_e.parent = parent_e
fhoseend_par_e.use_connect = False
fhoseend_par_e.parent = parent_e
# Positioning
ulimb1_e.length *= 0.5
ulimb2_e.head = Vector(ulimb1_e.tail)
flimb1_e.length *= 0.5
flimb2_e.head = Vector(flimb1_e.tail)
align_bone_roll(self.obj, flimb2, elimb)
ulimb2_smoother_e.tail = Vector(flimb1_e.tail)
ulimb2_smoother_e.roll = flimb1_e.roll
flimb1_smoother_e.head = Vector(ulimb1_e.tail)
flimb1_pos_e.length *= 0.5
junc_e.length *= 0.2
uhoseend_par_e.length *= 0.25
uhose_par_e.length *= 0.25
jhose_par_e.length *= 0.15
fhose_par_e.length *= 0.25
fhoseend_par_e.length *= 0.25
put_bone(self.obj, uhoseend_par, Vector(ulimb1_e.head))
put_bone(self.obj, uhose_par, Vector(ulimb1_e.tail))
put_bone(self.obj, jhose_par, Vector(ulimb2_e.tail))
put_bone(self.obj, fhose_par, Vector(flimb1_e.tail))
put_bone(self.obj, fhoseend_par, Vector(flimb2_e.tail))
put_bone(self.obj, uhoseend, Vector(ulimb1_e.head))
put_bone(self.obj, uhose, Vector(ulimb1_e.tail))
put_bone(self.obj, jhose, Vector(ulimb2_e.tail))
put_bone(self.obj, fhose, Vector(flimb1_e.tail))
put_bone(self.obj, fhoseend, Vector(flimb2_e.tail))
if 'X' in self.primary_rotation_axis:
upoint = Vector(ulimb1_e.z_axis)
fpoint = Vector(flimb1_e.z_axis)
elif 'Z' in self.primary_rotation_axis:
upoint = Vector(ulimb1_e.x_axis)
fpoint = Vector(flimb1_e.x_axis)
else: # Y
upoint = Vector(ulimb1_e.z_axis)
fpoint = Vector(flimb1_e.z_axis)
if '-' not in self.primary_rotation_axis:
upoint *= -1
fpoint *= -1
if 'Y' in self.primary_rotation_axis:
uside = Vector(ulimb1_e.x_axis)
fside = Vector(flimb1_e.x_axis)
else:
uside = Vector(ulimb1_e.y_axis) * -1
fside = Vector(flimb1_e.y_axis) * -1
uhoseend_e.tail = uhoseend_e.head + upoint
uhose_e.tail = uhose_e.head + upoint
jhose_e.tail = fhose_e.head + upoint + fpoint
fhose_e.tail = fhose_e.head + fpoint
fhoseend_e.tail = fhoseend_e.head + fpoint
align_bone_z_axis(self.obj, uhoseend, uside)
align_bone_z_axis(self.obj, uhose, uside)
align_bone_z_axis(self.obj, jhose, uside + fside)
align_bone_z_axis(self.obj, fhose, fside)
align_bone_z_axis(self.obj, fhoseend, fside)
l = 0.125 * (ulimb1_e.length + ulimb2_e.length + flimb1_e.length + flimb2_e.length)
uhoseend_e.length = l
uhose_e.length = l
jhose_e.length = l
fhose_e.length = l
fhoseend_e.length = l
# Object mode, get pose bones
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
ulimb1_p = pb[ulimb1]
ulimb2_p = pb[ulimb2]
flimb1_p = pb[flimb1]
flimb2_p = pb[flimb2]
elimb_p = pb[elimb]
ulimb2_smoother_p = pb[ulimb2_smoother]
flimb1_smoother_p = pb[flimb1_smoother]
flimb1_pos_p = pb[flimb1_pos]
junc_p = pb[junc]
uhoseend_p = pb[uhoseend]
uhose_p = pb[uhose]
jhose_p = pb[jhose]
fhose_p = pb[fhose]
fhoseend_p = pb[fhoseend]
#uhoseend_par_p = pb[uhoseend_par]
uhose_par_p = pb[uhose_par]
jhose_par_p = pb[jhose_par]
fhose_par_p = pb[fhose_par]
fhoseend_par_p = pb[fhoseend_par]
# Lock axes
uhose_p.lock_rotation = (True, True, True)
uhose_p.lock_rotation_w = True
uhose_p.lock_scale = (True, True, True)
jhose_p.lock_rotation = (True, True, True)
jhose_p.lock_rotation_w = True
jhose_p.lock_scale = (True, True, True)
fhose_p.lock_rotation = (True, True, True)
fhose_p.lock_rotation_w = True
fhose_p.lock_scale = (True, True, True)
# B-bone settings
ulimb2_p.bone.bbone_segments = 16
ulimb2_p.bone.bbone_easein = 0.0
ulimb2_p.bone.bbone_easeout = 1.0
ulimb2_smoother_p.bone.bbone_segments = 16
ulimb2_smoother_p.bone.bbone_easein = 1.0
ulimb2_smoother_p.bone.bbone_easeout = 0.0
flimb1_p.bone.bbone_segments = 16
flimb1_p.bone.bbone_easein = 1.0
flimb1_p.bone.bbone_easeout = 0.0
flimb1_smoother_p.bone.bbone_segments = 16
flimb1_smoother_p.bone.bbone_easein = 0.0
flimb1_smoother_p.bone.bbone_easeout = 1.0
# Custom properties
prop = rna_idprop_ui_prop_get(jhose_p, "smooth_bend", create=True)
jhose_p["smooth_bend"] = 0.0
prop["soft_min"] = prop["min"] = 0.0
prop["soft_max"] = prop["max"] = 1.0
# Constraints
con = ulimb1_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = uhoseend
con = ulimb1_p.constraints.new('COPY_SCALE')
con.name = "anchor"
con.target = self.obj
con.subtarget = self.org_bones[0]
con = ulimb1_p.constraints.new('DAMPED_TRACK')
con.name = "track"
con.target = self.obj
con.subtarget = uhose
con = ulimb1_p.constraints.new('STRETCH_TO')
con.name = "track"
con.target = self.obj
con.subtarget = uhose
con.volume = 'NO_VOLUME'
con = ulimb2_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = uhose
con = ulimb2_p.constraints.new('DAMPED_TRACK')
con.name = "track"
con.target = self.obj
con.subtarget = jhose
con = ulimb2_p.constraints.new('STRETCH_TO')
con.name = "track"
con.target = self.obj
con.subtarget = jhose
con.volume = 'NO_VOLUME'
con = ulimb2_smoother_p.constraints.new('COPY_TRANSFORMS')
con.name = "smoother"
con.target = self.obj
con.subtarget = flimb1_pos
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'SUM'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = jhose_p.path_from_id() + '["smooth_bend"]'
con = flimb1_pos_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = jhose
con = flimb1_pos_p.constraints.new('DAMPED_TRACK')
con.name = "track"
con.target = self.obj
con.subtarget = fhose
con = flimb1_pos_p.constraints.new('STRETCH_TO')
con.name = "track"
con.target = self.obj
con.subtarget = fhose
con.volume = 'NO_VOLUME'
con = flimb1_p.constraints.new('COPY_TRANSFORMS')
con.name = "position"
con.target = self.obj
con.subtarget = flimb1_pos
con = flimb1_smoother_p.constraints.new('COPY_TRANSFORMS')
con.name = "smoother"
con.target = self.obj
con.subtarget = ulimb2
fcurve = con.driver_add("influence")
driver = fcurve.driver
var = driver.variables.new()
driver.type = 'SUM'
var.name = "var"
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = self.obj
var.targets[0].data_path = jhose_p.path_from_id() + '["smooth_bend"]'
con = flimb1_smoother_p.constraints.new('STRETCH_TO')
con.name = "track"
con.target = self.obj
con.subtarget = jhose
con.volume = 'NO_VOLUME'
con = flimb2_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = fhose
con = flimb2_p.constraints.new('COPY_ROTATION')
con.name = "twist"
con.target = self.obj
con.subtarget = elimb
con = flimb2_p.constraints.new('DAMPED_TRACK')
con.name = "track"
con.target = self.obj
con.subtarget = fhoseend
con = flimb2_p.constraints.new('STRETCH_TO')
con.name = "track"
con.target = self.obj
con.subtarget = fhoseend
con.volume = 'NO_VOLUME'
con = junc_p.constraints.new('COPY_TRANSFORMS')
con.name = "bend"
con.target = self.obj
con.subtarget = self.org_bones[1]
con.influence = 0.5
con = uhose_par_p.constraints.new('COPY_ROTATION')
con.name = "follow"
con.target = self.obj
con.subtarget = self.org_bones[0]
con.influence = 1.0
con = uhose_par_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = self.org_bones[0]
con.influence = 1.0
con = uhose_par_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = jhose
con.influence = 0.5
con = jhose_par_p.constraints.new('COPY_ROTATION')
con.name = "follow"
con.target = self.obj
con.subtarget = junc
con.influence = 1.0
con = jhose_par_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = junc
con.influence = 1.0
con = fhose_par_p.constraints.new('COPY_ROTATION')
con.name = "follow"
con.target = self.obj
con.subtarget = self.org_bones[1]
con.influence = 1.0
con = fhose_par_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = jhose
con.influence = 1.0
con = fhose_par_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = self.org_bones[2]
con.influence = 0.5
con = fhoseend_par_p.constraints.new('COPY_ROTATION')
con.name = "follow"
con.target = self.obj
con.subtarget = self.org_bones[1]
con.influence = 1.0
con = fhoseend_par_p.constraints.new('COPY_LOCATION')
con.name = "anchor"
con.target = self.obj
con.subtarget = self.org_bones[2]
con.influence = 1.0
# Layers
if self.layers:
uhoseend_p.bone.layers = self.layers
uhose_p.bone.layers = self.layers
jhose_p.bone.layers = self.layers
fhose_p.bone.layers = self.layers
fhoseend_p.bone.layers = self.layers
else:
layers = list(pb[self.org_bones[0]].bone.layers)
uhoseend_p.bone.layers = layers
uhose_p.bone.layers = layers
jhose_p.bone.layers = layers
fhose_p.bone.layers = layers
fhoseend_p.bone.layers = layers
# Create widgets
create_sphere_widget(self.obj, uhoseend)
create_sphere_widget(self.obj, uhose)
create_sphere_widget(self.obj, jhose)
create_sphere_widget(self.obj, fhose)
create_sphere_widget(self.obj, fhoseend)
return [uhoseend, uhose, jhose, fhose, fhoseend]
| 37.977484 | 168 | 0.576858 |
73e9b65a192ba7a3c2a24d8871625e4b243a974a | 4,930 | py | Python | qiskit/optimization/algorithms/multistart_optimizer.py | georgios-ts/qiskit-aqua | 76f6a88bfc5f718e389da9c0e673a86101a8f452 | [
"Apache-2.0"
] | 1 | 2020-08-01T21:07:54.000Z | 2020-08-01T21:07:54.000Z | qiskit/optimization/algorithms/multistart_optimizer.py | georgios-ts/qiskit-aqua | 76f6a88bfc5f718e389da9c0e673a86101a8f452 | [
"Apache-2.0"
] | null | null | null | qiskit/optimization/algorithms/multistart_optimizer.py | georgios-ts/qiskit-aqua | 76f6a88bfc5f718e389da9c0e673a86101a8f452 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Defines an abstract class for multi start optimizers. A multi start optimizer is an optimizer
that may run minimization algorithm for the several time with different initial guesses to achieve
better results. This implementation is suitable for local optimizers."""
import logging
import time
from abc import ABC
from typing import Optional, Callable
import numpy as np
from scipy.stats import uniform
from qiskit.optimization import QuadraticProgram, INFINITY
from qiskit.optimization.algorithms.optimization_algorithm import (OptimizationAlgorithm,
OptimizationResult)
logger = logging.getLogger(__name__)
# we disable a warning: "Method 'a method' is abstract in class 'OptimizationAlgorithm' but
# is not overridden (abstract-method) since this class is not intended for instantiation
# pylint: disable=W0223
class MultiStartOptimizer(OptimizationAlgorithm, ABC):
"""
An abstract class that implements multi start optimization and should be sub-classed by
other optimizers.
"""
def __init__(self, trials: int = 1, clip: float = 100.) -> None:
"""
Constructs an instance of this optimizer.
Args:
trials: The number of trials for multi-start method. The first trial is solved with
the initial guess of zero. If more than one trial is specified then
initial guesses are uniformly drawn from ``[lowerbound, upperbound]``
with potential clipping.
clip: Clipping parameter for the initial guesses in the multi-start method.
If a variable is unbounded then the lower bound and/or upper bound are replaced
with the ``-clip`` or ``clip`` values correspondingly for the initial guesses.
"""
super().__init__()
self._trials = trials
self._clip = clip
def multi_start_solve(self, minimize: Callable[[np.array], np.array],
problem: QuadraticProgram) -> OptimizationResult:
"""Applies a multi start method given a local optimizer.
Args:
minimize: A callable object that minimizes the problem specified
problem: A problem to solve
Returns:
The result of the multi start algorithm applied to the problem.
"""
fval_sol = INFINITY
x_sol = None # type: Optional[np.array]
# Implementation of multi-start optimizer
for trial in range(self._trials):
x_0 = np.zeros(problem.get_num_vars())
if trial > 0:
for i, var in enumerate(problem.variables):
lowerbound = var.lowerbound if var.lowerbound > -INFINITY else -self._clip
upperbound = var.upperbound if var.upperbound < INFINITY else self._clip
x_0[i] = uniform.rvs(lowerbound, (upperbound - lowerbound))
# run optimization
t_0 = time.time()
x = minimize(x_0)
logger.debug("minimize done in: %s seconds", str(time.time() - t_0))
# we minimize, to get actual objective value we must multiply by the sense value
fval = problem.objective.evaluate(x) * problem.objective.sense.value
# we minimize the objective
if fval < fval_sol:
# here we get back to the original sense of the problem
fval_sol = fval * problem.objective.sense.value
x_sol = x
return OptimizationResult(x=x_sol, fval=fval_sol, variables=problem.variables,
raw_results=x_sol)
@property
def trials(self) -> int:
""" Returns the number of trials for this optimizer.
Returns:
The number of trials.
"""
return self._trials
@trials.setter
def trials(self, trials: int) -> None:
"""Sets the number of trials.
Args:
trials: The number of trials to set.
"""
self._trials = trials
@property
def clip(self) -> float:
""" Returns the clip value for this optimizer.
Returns:
The clip value.
"""
return self._clip
@clip.setter
def clip(self, clip: float) -> None:
"""Sets the clip value.
Args:
clip: The clip value to set.
"""
self._clip = clip
| 37.067669 | 98 | 0.631034 |
73e9f3ffb7c1366b16978ba26a76789aa736a7b3 | 47,692 | py | Python | floris/simulation/turbine.py | tangsmall/floris | 51e4e15c6a9b0c8f55bf01772111799803acf224 | [
"Apache-2.0"
] | null | null | null | floris/simulation/turbine.py | tangsmall/floris | 51e4e15c6a9b0c8f55bf01772111799803acf224 | [
"Apache-2.0"
] | null | null | null | floris/simulation/turbine.py | tangsmall/floris | 51e4e15c6a9b0c8f55bf01772111799803acf224 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import math
import numpy as np
from scipy.stats import norm
from scipy.spatial import distance_matrix
from scipy.interpolate import interp1d
from ..utilities import cosd, sind, tand
from ..logging_manager import LoggerBase
from floris.simulation.wake_vortex.VortexCylinder import vc_tang_u, vc_longi_u, vc_root_u, vcs_tang_u, vcs_longi_u, vc_tang_u_doublet
from floris.simulation.wake_vortex.VortexDoublet import doublet_line_u
from floris.simulation.wake_vortex.SelfSimilar import ss_u
from floris.simulation.wake_vortex.VortexCylinderSkewed import svc_tang_u, svc_longi_u, svc_root_u, svcs_tang_u, svcs_longi_u
from floris.simulation.wake_vortex.Solver import Ct_const_cutoff, WakeVorticityFromCt, WakeVorticityFromGamma
class Turbine(LoggerBase):
"""
Turbine is a class containing objects pertaining to the individual
turbines.
Turbine is a model class representing a particular wind turbine. It
is largely a container of data and parameters, but also contains
methods to probe properties for output.
Args:
instance_dictionary: A dictionary that is generated from the
input_reader; it should have the following key-value pairs:
- **description** (*str*): A string containing a description of
the turbine.
- **properties** (*dict*): A dictionary containing the following
key-value pairs:
- **rotor_diameter** (*float*): The rotor diameter (m).
- **hub_height** (*float*): The hub height (m).
- **blade_count** (*int*): The number of blades.
- **pP** (*float*): The cosine exponent relating the yaw
misalignment angle to power.
- **pT** (*float*): The cosine exponent relating the rotor
tilt angle to power.
- **generator_efficiency** (*float*): The generator
efficiency factor used to scale the power production.
- **power_thrust_table** (*dict*): A dictionary containing the
following key-value pairs:
- **power** (*list(float)*): The coefficient of power at
different wind speeds.
- **thrust** (*list(float)*): The coefficient of thrust
at different wind speeds.
- **wind_speed** (*list(float)*): The wind speeds for
which the power and thrust values are provided (m/s).
- **yaw_angle** (*float*): The yaw angle of the turbine
relative to the wind direction (deg). A positive value
represents a counter-clockwise rotation relative to the
wind direction.
- **tilt_angle** (*float*): The tilt angle of the turbine
(deg). Positive values correspond to a downward rotation of
the rotor for an upstream turbine.
- **TSR** (*float*): The tip-speed ratio of the turbine. This
parameter is used in the "curl" wake model.
- **ngrid** (*int*, optional): The square root of the number
of points to use on the turbine grid. This number will be
squared so that the points can be evenly distributed.
Defaults to 5.
- **rloc** (*float, optional): A value, from 0 to 1, that determines
the width/height of the grid of points on the rotor as a ratio of
the rotor radius.
Defaults to 0.5.
Need to Update _________________________________________
- R: rotor radius
- r_hub: position of the turbine hub in global coordinate system
- e_shaft_yaw0: unit vector along the shaft (untitled for now), going downwind, when the turbine has zero yaw
- e_vert: unit vertical vector, about which positive yawing is done
- U0: Free stream velocity in global coordinates (can be changerd with `update_wind`)
- Ct: Thrust coefficient (can be changed with `update_loading`)
- Ground: Include ground effect in calculations
- Model: one of ['VC','VCFF', 'VD', 'SS']
'VCFF': Vortex cylinder with far-field approximation (fastest)
'VC': Vortex cylinder
'SS': Self similar model of Troldborg et al. (not good close to rotor)
'VD': Self similar model of Troldborg et al. (not good close to rotor)
Returns:
Turbine: An instantiated Turbine object.
"""
def __init__(self, instance_dictionary):
self.description = instance_dictionary["description"]
properties = instance_dictionary["properties"]
self.rotor_diameter = properties["rotor_diameter"]
self.hub_height = properties["hub_height"]
self.blade_count = properties["blade_count"]
self.pP = properties["pP"]
self.pT = properties["pT"]
self.generator_efficiency = properties["generator_efficiency"]
self.power_thrust_table = properties["power_thrust_table"]
self.yaw_angle = properties["yaw_angle"]
self.tilt_angle = properties["tilt_angle"]
self.tsr = properties["TSR"]
# Vortex turbine (for induction computation) parameters
self.R = self.rotor_diameter/2
self.r_hub = [0,0,self.hub_height]
self.e_shaft_yaw0 = [1,0,0]
self.e_vert = [0,0,1]
"""
Specifies vectors to define coordinate notations for transformation
matrices between vortex turbine cylindrical corrdinates and global coordinates
"""
# Specify global coordinate system [TODO ??? need to check]
self.e_shaft_g0 = np.asarray([1,0,0]).reshape(3,1)
self.e_vert_g = np.asarray([0,0,1]).reshape(3,1)
self.e_horz_g = np.asarray([1.,0.,0.]).reshape(3,1)
# Transformation matrix from cylindrical to wind turbine coordinate system
self.T_c2wt = np.asarray([[0,0,1,1,0,0,0,1,0]]).reshape(3,3)
self.set_yaw_angle(self.yaw_angle)
self.update_position(self.r_hub)
self.U0_g = np.asarray([10,0,0]).ravel().reshape(3,1)
#self.update_wind([0,0,10])
self.name=''
self.r=None
self.gamma_t=None
self.gamma_t=None
self.Gamma_r=None
self.Lambda=np.inf
self.Ground=False# Ground effect will be included in calculation of induced velocity
self.chi=None
self.Model='VC'
# initialize to an invalid value until calculated
self.air_density = -1
self.use_turbulence_correction = False
# Initiate to False unless specifically set
# For the following parameters, use default values if not user-specified
self.ngrid = int(properties["ngrid"]) if "ngrid" in properties else 5
self.rloc = float(properties["rloc"]) if "rloc" in properties else 0.5
if "use_points_on_perimeter" in properties:
self.use_points_on_perimeter = bool(properties["use_points_on_perimeter"])
else:
self.use_points_on_perimeter = False
self._initialize_turbine()
# The indices for this Turbine instance's points from the FlowField
# are set in `FlowField._discretize_turbine_domain` and stored
# in this variable.
self.flow_field_point_indices = None
# Private methods
def _initialize_turbine(self):
# Initialize the turbine given saved parameter settings
# Precompute interps
wind_speed = self.power_thrust_table["wind_speed"]
cp = self.power_thrust_table["power"]
self.fCpInterp = interp1d(wind_speed, cp, fill_value="extrapolate")
ct = self.power_thrust_table["thrust"]
self.fCtInterp = interp1d(wind_speed, ct, fill_value="extrapolate")
# constants
self.grid_point_count = self.ngrid * self.ngrid
if np.sqrt(self.grid_point_count) % 1 != 0.0:
raise ValueError("Turbine.grid_point_count must be the square of a number")
self.reset_velocities()
# initialize derived attributes
self.grid = self._create_swept_area_grid()
# Compute list of inner powers
inner_power = np.array([self._power_inner_function(ws) for ws in wind_speed])
self.powInterp = interp1d(wind_speed, inner_power, fill_value="extrapolate")
def _create_swept_area_grid(self):
# TODO: add validity check:
# rotor points has a minimum in order to always include points inside
# the disk ... 2?
#
# the grid consists of the y,z coordinates of the discrete points which
# lie within the rotor area: [(y1,z1), (y2,z2), ... , (yN, zN)]
# update:
# using all the grid point because that how roald did it.
# are the points outside of the rotor disk used later?
# determine the dimensions of the square grid
num_points = int(np.round(np.sqrt(self.grid_point_count)))
pt = self.rloc * self.rotor_radius
# syntax: np.linspace(min, max, n points)
horizontal = np.linspace(-pt, pt, num_points)
vertical = np.linspace(-pt, pt, num_points)
# build the grid with all of the points
grid = [(h, vertical[i]) for i in range(num_points) for h in horizontal]
# keep only the points in the swept area
if self.use_points_on_perimeter:
grid = [
point
for point in grid
if np.hypot(point[0], point[1]) <= self.rotor_radius
]
else:
grid = [
point
for point in grid
if np.hypot(point[0], point[1]) < self.rotor_radius
]
return grid
def _power_inner_function(self, yaw_effective_velocity):
"""
This method calculates the power for an array of yaw effective wind
speeds without the air density and turbulence correction parameters.
This is used to initialize the power interpolation method used to
compute turbine power.
"""
# Now compute the power
cptmp = self._fCp(
yaw_effective_velocity
) # Note Cp is also now based on yaw effective velocity
return (
0.5
* (np.pi * self.rotor_radius ** 2)
* cptmp
* self.generator_efficiency
* yaw_effective_velocity ** 3
)
def _fCp(self, at_wind_speed):
wind_speed = self.power_thrust_table["wind_speed"]
if at_wind_speed < min(wind_speed):
return 0.0
else:
_cp = self.fCpInterp(at_wind_speed)
if _cp.size > 1:
_cp = _cp[0]
return float(_cp)
def _fCt(self, at_wind_speed):
wind_speed = self.power_thrust_table["wind_speed"]
if at_wind_speed < min(wind_speed):
return 0.99
else:
_ct = self.fCtInterp(at_wind_speed)
if _ct.size > 1:
_ct = _ct[0]
if _ct > 1.0:
_ct = 0.9999
return float(_ct)
# Public methods
def change_turbine_parameters(self, turbine_change_dict):
"""
Change a turbine parameter and call the initialize function.
Args:
turbine_change_dict (dict): A dictionary of parameters to change.
"""
for param in turbine_change_dict:
self.logger.info(
"Setting {} to {}".format(param, turbine_change_dict[param])
)
setattr(self, param, turbine_change_dict[param])
self._initialize_turbine()
def calculate_swept_area_velocities(
self, local_wind_speed, coord, x, y, z, additional_wind_speed=None
):
"""
This method calculates and returns the wind speeds at each
rotor swept area grid point for the turbine, interpolated from
the flow field grid.
Args:
wind_direction (float): The wind farm wind direction (deg).
local_wind_speed (np.array): The wind speed at each grid point in
the flow field (m/s).
coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
x (np.array): The x-coordinates of the flow field grid.
y (np.array): The y-coordinates of the flow field grid.
z (np.array): The z-coordinates of the flow field grid.
Returns:
np.array: The wind speed at each rotor grid point
for the turbine (m/s).
"""
u_at_turbine = local_wind_speed
# TODO:
# # PREVIOUS METHOD========================
# # UNCOMMENT IF ANY ISSUE UNCOVERED WITH NEW MOETHOD
# x_grid = x
# y_grid = y
# z_grid = z
# yPts = np.array([point[0] for point in self.grid])
# zPts = np.array([point[1] for point in self.grid])
# # interpolate from the flow field to get the flow field at the grid
# # points
# dist = [np.sqrt((coord.x1 - x_grid)**2 \
# + (coord.x2 + yPts[i] - y_grid) **2 \
# + (self.hub_height + zPts[i] - z_grid)**2) \
# for i in range(len(yPts))]
# idx = [np.where(dist[i] == np.min(dist[i])) for i in range(len(yPts))]
# data = [np.mean(u_at_turbine[idx[i]]) for i in range(len(yPts))]
# # PREVIOUS METHOD========================
# Use this if no saved points (curl)
if self.flow_field_point_indices is None:
# # NEW METHOD========================
# Sort by distance
flow_grid_points = np.column_stack([x.flatten(), y.flatten(), z.flatten()])
# Set up a grid array
y_array = np.array(self.grid)[:, 0] + coord.x2
z_array = np.array(self.grid)[:, 1] + self.hub_height
x_array = np.ones_like(y_array) * coord.x1
grid_array = np.column_stack([x_array, y_array, z_array])
ii = np.argmin(distance_matrix(flow_grid_points, grid_array), axis=0)
else:
ii = self.flow_field_point_indices
# return np.array(data)
if additional_wind_speed is not None:
return (
np.array(u_at_turbine.flatten()[ii]),
np.array(additional_wind_speed.flatten()[ii]),
)
else:
return np.array(u_at_turbine.flatten()[ii])
def return_grid_points(self, coord):
"""
Retrieve the x, y, and z grid points on the rotor.
Args:
coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
Returns:
np.array, np.array, np.array:
- x grid points on the rotor.
- y grid points on the rotor.
- xzgrid points on the rotor.
"""
y_array = np.array(self.grid)[:, 0] + coord.x2
z_array = np.array(self.grid)[:, 1] + self.hub_height
x_array = np.ones_like(y_array) * coord.x1
return x_array, y_array, z_array
def update_velocities(
self, u_wake, coord, flow_field, rotated_x, rotated_y, rotated_z
):
"""
This method updates the velocities at the rotor swept area grid
points based on the flow field freestream velocities and wake
velocities.
Args:
u_wake (np.array): The wake deficit velocities at all grid points
in the flow field (m/s).
coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
flow_field (:py:class:`~.flow_field.FlowField`): The flow field.
rotated_x (np.array): The x-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_y (np.array): The y-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_z (np.array): The z-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
"""
# reset the waked velocities
local_wind_speed = flow_field.u_initial - u_wake
self.velocities = self.calculate_swept_area_velocities(
local_wind_speed, coord, rotated_x, rotated_y, rotated_z
)
def reset_velocities(self):
"""
This method sets the velocities at the turbine's rotor swept
area grid points to zero.
"""
self.velocities = np.array([0.0] * self.grid_point_count)
def set_yaw_angle(self, yaw_angle):
"""
This method sets the turbine's yaw angle.
Args:
yaw_angle (float): The new yaw angle (deg).
Examples:
To set a turbine's yaw angle:
>>> floris.farm.turbines[0].set_yaw_angle(20.0)
"""
self._yaw_angle = yaw_angle
# Vortex wind turbine
# print('>>> turbine.py : set yaw VC_WT')
self.yaw_pos = yaw_angle * np.pi/180 # Convert from degrees to radians
# print('Yaw Angle',yaw_angle)
# print('Yaw_pos',self.yaw_pos)
# Transformation matrix for rotating vector around yaw angle
c,s=np.cos(self.yaw_pos),np.sin(self.yaw_pos)
self.T_wt2g = np.asarray([c,-s,0,s,c,0,0,0,1]).reshape(3,3)
# Rotating the shaft vector so that its coordinate follow the new yaw position
self.e_shaft_g=np.dot(self.T_wt2g , self.e_shaft_g0)
def update_position(self,r_hub):
self.r_hub=np.asarray(r_hub).ravel().reshape(3,1)
def compute_induction(self, Ind_Opts, rotated_x, rotated_y, rotated_z, CT0=None):
"""
Computes induction from the turbine as a result of the blockage effect. Applied to velocity
field to simulate the induction zone of a turbine.
INPUTS:
Ind_Opts (dict): Dictionary of inputs to model the resulting
turbine induction zone as a result of the blockage effect.
rotated_x (np.array): The x-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_y (np.array): The y-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_z (np.array): The z-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
"""
self.Ind_Opts = Ind_Opts
if Ind_Opts['induction']: # Can remove (won't be called unless induction)
if Ind_Opts['Ct_test']:
print('Ct-test')
# update vortex cylinder velocity and loading
r_bar_cut = 0.11
r_bar_tip = 0.9
if CT0 is None:
CT0 = self.Ct
print('CT0: ', CT0)
self.R = self.rotor_diameter/2*Ind_Opts['Rfact']
nCyl = 1 # For now
Lambda = np.inf
vr_bar = np.linspace(0,1.0,100)
Ct_AD = Ct_const_cutoff(CT0,r_bar_cut,vr_bar,r_bar_tip) # TODO change me to distributed
gamma_t_Ct = None
self.update_loading(r=vr_bar*self.R, VC_Ct=Ct_AD, Lambda=Lambda, nCyl=nCyl, gamma_t_Ct=gamma_t_Ct)
self.gamma_t= self.gamma_t*Ind_Opts['GammaFact']
root = False
longi = False
tang = True
# print('.',end='')
ux,uy,uz = self.compute_u(rotated_x,rotated_y,rotated_z,root=root,longi=longi,tang=tang, only_ind=True, no_wake=False, Decay=False, Model = Ind_Opts['Model'], ground=Ind_Opts['Ground'],R_far_field=Ind_Opts['R_far_field'])
else:
# update vortex cylinder velocity and loading
r_bar_cut = 0.01
# r_bar_cut = 0.11
# r_bar_tip = 0.9
# print("------Ct:", self.Ct)
if CT0 is None:
CT0 = self.Ct
# print('CT0: ', CT0)
self.R = self.rotor_diameter/2*Ind_Opts['Rfact']
nCyl = 1 # For now
Lambda = 30 # if >20 then no swirl
# Lambda = np.inf
vr_bar = np.linspace(0,1.0,100)
Ct_AD = Ct_const_cutoff(CT0,r_bar_cut,vr_bar) # TODO change me to distributed
# Ct_AD = Ct_const_cutoff(CT0,r_bar_cut,vr_bar,r_bar_tip) # TODO change me to distributed
gamma_t_Ct = None
self.update_loading(r=vr_bar*self.R, VC_Ct=Ct_AD, Lambda=Lambda, nCyl=nCyl, gamma_t_Ct=gamma_t_Ct)
self.gamma_t= self.gamma_t*Ind_Opts['GammaFact']
# print('gamma_t: ', self.gamma_t)
root = False
longi = False
tang = True
# print('.',end='')
ux,uy,uz = self.compute_u(rotated_x,rotated_y,rotated_z,root=root,longi=longi,tang=tang, only_ind=True, no_wake=True, Decay=True, Model = Ind_Opts['Model'], ground=Ind_Opts['Ground'],R_far_field=Ind_Opts['R_far_field'])
return ux,uy,uz
def update_loading(self,r=None,VC_Ct=None,Gamma=None,Lambda=None,nCyl=1,gamma_t_Ct=None):
"""
VC_Ct differs from Ct in that for a vortex cylinder VC_Ct is constant along the blade and
zero at the root and the tip
"""
"""
Computes relevant parameters when the turbine loading is updated, mainly, gamma_t,
the intensity of the tangential vorticity sheet.
The ditributon will be determined based on the inputs, with one these three approaches:
1. VC_Ct(r) distribution
2. Gamma(r) distribution
3. gamma_t(VC_Ct(r)) function
INPUTS:
r: radial coordinates at which VC_Ct or Gamma are provided
VC_Ct: local thrust coefficient (VC_Ct(r), array), or total thrust coefficient (CT, scalar)
Gamma: bound circulation (Gamma(r), array), or total rotor circulation (Gamma_tot, scalar)
Lambda: tip speed ratio (assumed infinite if None)
nCyl : number of cylindrical model used in the spanwise direction (default is 1)
The circulation (gamma_t) will be determined for each of the radial cylinder
gamma_t_Ct: function that provides gamma_t as function of VC_Ct (or gamma_t as function of CT)
"""
# Update vortex cylinder average velocity at turbine
self.U0_g = np.asarray([self.average_velocity,0,0]).ravel().reshape(3,1)
U0=np.linalg.norm(self.U0_g)
# print('Turbineprint('Turbine Avg U:',self.average_velocity)
# --- Reinterpolating loading to number of cylinders if needed
if nCyl is not None:
if nCyl==1:
vr0= np.array([0.995*self.R])
if VC_Ct is not None:
VC_Ct =np.array([np.mean(VC_Ct)])
if Gamma is not None:
Gamma =np.array([np.mean(Gamma)])
else:
vr0= np.linspace(0.005,0.995,nCyl)*self.R
if VC_Ct is not None:
VC_Ct = np.interp(vr0,r,VC_Ct)
else:
Gamma = np.interp(vr0,r,Gamma)
r=vr0
# Updating Lambda
if Lambda is None:
Lambda=self.Lambda
if Lambda is None:
raise Exception('Provide `Lambda` for update_loading. (Note: `Lambda=np.Inf` supported) ')
Omega = Lambda*U0/self.R
#print('U0',U0)
#print('VC_Ct',VC_Ct)
# Computing and storing gamma distribution and loading
if gamma_t_Ct is not None:
if VC_Ct is None:
raise Exception('Provide `Ct` along `gamma_t_Ct`')
self.gamma_t = gamma_t_Ct(VC_Ct)
self.gamma_l=None # TODO
self.Gamma_r=None # TODO
elif VC_Ct is not None:
self.gamma_t,self.gamma_l,self.Gamma_r,misc=WakeVorticityFromCt(r,VC_Ct,self.R,U0,Omega)
elif Gamma is not None:
self.gamma_t,self.gamma_l,self.Gamma_r,misc=WakeVorticityFromGamma(r,Gamma,self.R,U0,Omega)
else:
raise Exception('Unknown loading spec')
#self.gamma_t=self.gamma_t*1.06
#print('gamma_t ',self.gamma_t)
#print('gamma_l ',self.gamma_l)
#print('Gamma_r ',self.Gamma_r)
#print('Gamma_/2piR',-self.Gamma_r/(2*np.pi*self.R))
#print(misc)
self.Lambda=Lambda
self.r=r
self.VC_Ct=VC_Ct
def compute_u(self, Xg, Yg, Zg, only_ind=False, longi=False, tang=True, root=False, no_wake=False, ground=None, Decay=False, Model=None, R_far_field=6):
"""
INPUTS:
Xg, Yg, Zg: Control points in global coordinates where the flow is to be computed.
only_ind: if true, only induction is returned (without the free stream)
longi, tang, root: booleans specifying which component of vorticity is considered.
Default is `tang` only
no_wake: boolean, if true: the induced velocity in the wake is set to 0.
Typically set to true when combining with wake models.
Model : string in ['VC','VCFF','SS','VD']
'VCFF': Vortex cylinder with far-field approximation (fastest)
'VC': Vortex cylinder
'SS': Self similar model of Troldborg et al. (not good close to rotor)
'VD': Self similar model of Troldborg et al. (not good close to rotor)
"""
# --- Optional argument overriding self
if ground is None:
ground=self.Ground
if Model is None:
Model=self.Model
# Control points in "Cylinder coordinate system" (rotation only)
T_c2g=np.dot(self.T_wt2g,self.T_c2wt)
Xc,Yc,Zc = transform_T(T_c2g, Xg,Yg,Zg)
# Detecting whether our vertical convention match, and define chi
e_vert_c = np.dot(T_c2g.T , self.e_vert_g)
# if self.chi is None:
# # TODO TODO chi needs induction effect!
# self.chi= np.sign(e_vert_c.ravel()[1])* (self.yaw_wind-self.yaw_pos)
# TODO TODO chi needs induction effect!
# self.chi= np.sign(e_vert_c.ravel()[1])* (self.yaw_wind-self.yaw_pos)
# print('Chi: ', self.chi)
if self.VC_Ct > 1:
self.VC_Ct = 1
self.chi= np.sign(e_vert_c.ravel()[1])* (self.yaw_wind-self.yaw_pos) * (1+0.3*(1-np.sqrt(1-self.VC_Ct[0])))
# print('Chi_: ', self.chi)
# self.chi = self.chi*1.5
# print('Chi: ', self.chi)
if self.gamma_t is None:
raise Exception('Please set loading with `update_loading` before calling `compute_u`')
uxc = np.zeros(Xg.shape)
uyc = np.zeros(Xg.shape)
uzc = np.zeros(Xg.shape)
m=np.tan(self.chi)
# Cylinder position in "Cylinder coordinate system) (rotation only)
Xcyl, Ycyl, Zcyl = transform_T(T_c2g,np.array([self.r_hub[0]]), np.array([self.r_hub[1]]), np.array([self.r_hub[2]]))
# Translate control points such that origin is at rotor center. NOTE: not all routines use this
Xc0,Yc0,Zc0=Xc-Xcyl[0],Yc-Ycyl[0],Zc-Zcyl[0]
if ground:
# Mirror control points are two time the hub height above the cylinder
Yc0mirror=Yc0+2*Ycyl[0]
Ylist=[Yc0,Yc0mirror]
#print('>>> Ground effect',Ycyl[0])
else:
Ylist=[Yc0]
# --- Root vortex influence
if root and (self.Gamma_r is not None) and self.Gamma_r!=0:
for Y in Ylist:
if np.abs(self.chi)>1e-7:
uxc0,uyc0,uzc0 = svc_root_u(Xc0,Y,Zc0,Gamma_r=self.Gamma_r,m=m,polar_out=False)
else:
uxc0,uyc0,uzc0 = vc_root_u(Xc0,Y,Zc0,Gamma_r=self.Gamma_r,polar_out=False)
uxc += uxc0
uyc += uyc0
uzc += uzc0
if len(self.gamma_t)==1:
# --- Tangential and longi - ONE Cylinder only
for iY,Y in enumerate(Ylist):
if tang and (self.gamma_t!=0):
if np.abs(self.chi)>1e-7:
if Model =='VC':
uxc0,uyc0,uzc0 = svc_tang_u(Xc0,Y,Zc0,gamma_t=self.gamma_t,R=self.R,m=m,polar_out=False)
# print('-----------------Vortex Cylinder Skewed Model------------------')
else:
pass
# raise NotImplementedError('Model '+Model + ', with yaw.')
else:
if Model =='VC':
uxc0,uyc0,uzc0 = vc_tang_u (Xc0,Y,Zc0, gamma_t=self.gamma_t, R=self.R, polar_out=False)
elif Model =='VCFF':
uxc0,uyc0,uzc0 = vc_tang_u_doublet(Xc0,Y,Zc0, gamma_t=self.gamma_t, R=self.R, polar_out=False,r_bar_Cut=R_far_field)
elif Model =='VD':
uxc0,uyc0,uzc0 = doublet_line_u(Xc0, Y, Zc0, dmz_dz = self.gamma_t * self.R**2 * np.pi)
elif Model =='SS':
uzc0 = ss_u (Xc0, Y, Zc0, gamma_t=self.gamma_t, R=self.R)
uxc0=uzc0*0
uyc0=uzc0*0
else:
raise NotImplementedError('Model'+Model)
uxc += uxc0
uyc += uyc0
uzc += uzc0
if longi and (self.gamma_l is not None) and self.gamma_l!=0 :
if np.abs(self.chi)>1e-7:
if Model =='VC':
uxc0,uyc0,uzc0 = svc_longi_u(Xc0,Y,Zc0,gamma_l=self.gamma_l,R=self.R,m=m,polar_out=False)
else:
raise NotImplementedError('Model '+Model + ', longi component.')
else:
if Model =='VC':
uxc0,uyc0,uzc0 = vc_longi_u (Xc0,Y,Zc0,gamma_l=self.gamma_l,R=self.R ,polar_out=False)
else:
raise NotImplementedError('Model'+Model + ', longi component.')
uxc += uxc0
uyc += uyc0
uzc += uzc0
else:
# --- Tangential and longi - MULTI Cylinders
if Model =='VC':
nr = len(self.r)
nWT = 1
# Control points are directly translated by routine
gamma_t = self.gamma_t.reshape((nWT,nr))
# print('r ',self.r)
# print('gamma_t',gamma_t)
if self.gamma_l is not None:
gamma_l = self.gamma_l.reshape((nWT,nr))
vR = self.r.reshape((nWT,nr))
vm = m* np.ones((nWT,nr))
if tang:
if np.abs(self.chi)>1e-7:
uxc0,uyc0,uzc0 = svcs_tang_u(Xc,Yc,Zc,gamma_t=gamma_t,R=vR,m=vm,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl,Ground=ground)
else:
uxc0,uyc0,uzc0 = vcs_tang_u (Xc,Yc,Zc,gamma_t=gamma_t,R=vR ,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl, Ground=ground)
uxc += uxc0
uyc += uyc0
uzc += uzc0
if longi and (self.gamma_l is not None):
if np.abs(self.chi)>1e-7:
uxc0,uyc0,uzc0 = svcs_longi_u(Xc,Yc,Zc,gamma_l=gamma_l,R=vR,m=vm,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl, Ground=ground)
else:
uxc0,uyc0,uzc0 = vcs_longi_u (Xc,Yc,Zc,gamma_l=gamma_l,R=vR ,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl, Ground=ground)
uxc += uxc0
uyc += uyc0
uzc += uzc0
else:
raise NotImplementedError('Model'+Model, 'with multiple cylinders')
# if no_wake:
# # uxc[:]=0
# # uyc[:]=0
# # uzc[:]=1
# # Zero wake induction
# bDownStream=Zc0>=-0.20*self.R
# # bDownStream=Zc0>=0
# Rc = np.sqrt(Xc0**2 + Yc0**2)
# bRotorTube = Rc<self.R*1.001 # we give a margin since VD and VC have fields very dissimilar at R+/-eps
# bSelZero = np.logical_and(bRotorTube,bDownStream)
# uxc[bSelZero]=0
# uyc[bSelZero]=0
# uzc[bSelZero]=0
# Transform back to global
uxg = T_c2g[0,0]*uxc+T_c2g[0,1]*uyc+T_c2g[0,2]*uzc
uyg = T_c2g[1,0]*uxc+T_c2g[1,1]*uyc+T_c2g[1,2]*uzc
uzg = T_c2g[2,0]*uxc+T_c2g[2,1]*uyc+T_c2g[2,2]*uzc
# Decay
if Decay:
bDownStream=Xg>=(Yg-self.r_hub[1])*np.tan(-self.yaw_pos)-0.20*self.R+self.r_hub[0]
XDecay = np.ones(uxg.shape)
# XDecay[bDownStream] = np.exp(-((Xg[bDownStream]-self.r_hub[0])/(self.R*2))**2)
# XDecay[bDownStream] = np.exp(-((Xg[bDownStream]-self.r_hub[0])/(self.R*2)*self.VC_Ct)**2)
# XDecay[bDownStream] = np.exp(-(((Xg[bDownStream]-self.r_hub[0])*np.cos(-self.yaw_pos)-(Yg[bDownStream]-self.r_hub[1])*np.sin(-self.yaw_pos))/(self.R*2))**2)
XDecay[bDownStream] = np.exp(-(((Xg[bDownStream]-self.r_hub[0])*np.cos(-self.yaw_pos)-(Yg[bDownStream]-self.r_hub[1])*np.sin(-self.yaw_pos))/(self.R*2)*self.VC_Ct)**2)
uxg*=XDecay
uyg*=XDecay
uzg*=XDecay
if no_wake:
# Zero wake induction
# Remove wake downstream of turbine (include small region in front of turbine to ensure induction does not affect free stream velocity)
bDownStream=Xg>=(Yg-self.r_hub[1])*np.tan(-self.yaw_pos)-0.20*self.R+self.r_hub[0]
# Only remove wake if within vortex cylinder radius
# Rc = np.sqrt((Yg-self.r_hub[1])**2 + (Zg-self.hub_height)**2)
vortex_vector = np.sqrt((-(Zg-self.r_hub[2]))**2+((Yg-self.r_hub[1])-(Xg-self.r_hub[0])*np.tan(self.chi+self.yaw_pos))**2)
Rc = vortex_vector/np.linalg.norm(np.array([1,np.tan(self.chi+self.yaw_pos),0]))
bRotorTube = Rc<self.R*1.001 # we give a margin since VD and VC have fields very dissimilar at R+/-eps
# Check if point is both downstream and within vortex cylinder radius
bSelZero = np.logical_and(bRotorTube,bDownStream)
uxg[bSelZero]=0
uyg[bSelZero]=0
uzg[bSelZero]=0
# # Removes ground effect vortex cylinder wake
# # Remove wake downstream of turbine (include small region in front of turbine to ensure induction does not affect free stream velocity)
# bDownStream=Xg>=(Yg+self.r_hub[1])*np.tan(-self.yaw_pos)-0.20*self.R+self.r_hub[0]
# vortex_vector = np.sqrt((-(Zg+self.r_hub[2]))**2+((Yg+self.r_hub[1])-(Xg-self.r_hub[0])*np.tan(self.chi+self.yaw_pos))**2)
# Rc = vortex_vector/np.linalg.norm(np.array([1,np.tan(self.chi+self.yaw_pos),0]))
# bRotorTube = Rc<self.R*1.001 # we give a margin since VD and VC have fields very dissimilar at R+/-eps
# # Check if point is both downstream and within vortex cylinder radius
# bSelZero = np.logical_and(bRotorTube,bDownStream)
# uxg[bSelZero]=0
# uyg[bSelZero]=0
# uzg[bSelZero]=0
# Add free stream if requested
if not only_ind:
uxg += self.U0_g[0]
uyg += self.U0_g[1]
uzg += self.U0_g[2]
return uxg,uyg,uzg
def TKE_to_TI(self, turbulence_kinetic_energy):
"""
Converts a list of turbulence kinetic energy values to
turbulence intensity.
Args:
turbulence_kinetic_energy (list): Values of turbulence kinetic
energy in units of meters squared per second squared.
wind_speed (list): Measurements of wind speed in meters per second.
Returns:
list: converted turbulence intensity values expressed as a decimal
(e.g. 10%TI -> 0.10).
"""
total_turbulence_intensity = (
np.sqrt((2 / 3) * turbulence_kinetic_energy)
) / self.average_velocity
return total_turbulence_intensity
def TI_to_TKE(self):
"""
Converts TI to TKE.
Args:
wind_speed (list): Measurements of wind speed in meters per second.
Returns:
list: converted TKE values
"""
return ((self.average_velocity * self.current_turbulence_intensity) ** 2) / (2 / 3)
def u_prime(self):
"""
Converts a TKE to horizontal deviation component.
Args:
wind_speed (list): Measurements of wind speed in meters per second.
Returns:
list: converted u_prime values in meters per second
"""
tke = self.TI_to_TKE()
return np.sqrt(2 * tke)
# Getters & Setters
@property
def yaw_wind(self):
""" NOTE: this is wind angle not wind direction, measured with same convention as yaw:
- around the axis e_vert
"""
u_horz = self.U0_g - np.dot(self.U0_g.T,self.e_vert_g)*self.e_vert_g
e_w = u_horz/np.linalg.norm(u_horz)
sign = np.sign ( np.dot(np.cross(self.e_horz_g.T, self.U0_g.T),self.e_vert_g) )
if sign==0:
yaw_wind = np.arccos(np.dot(e_w.T,self.e_horz_g))
else:
yaw_wind = sign * np.arccos(np.dot(e_w.T,self.e_horz_g))
return yaw_wind.ravel()[0]
@property
def turbulence_parameter(self):
"""
This property calculates and returns the turbulence correction
parameter for the turbine, a value used to account for the
change in power output due to the effects of turbulence.
Returns:
float: The value of the turbulence parameter.
"""
if not self.use_turbulence_correction:
return 1.0
else:
# define wind speed, ti, and power curve components
ws = np.array(self.power_thrust_table["wind_speed"])
cp = np.array(self.power_thrust_table["power"])
ws = ws[np.where(cp != 0)]
ciws = ws[0] # cut in wind speed
cows = ws[len(ws) - 1] # cut out wind speed
speed = self.average_velocity
ti = self.current_turbulence_intensity
if ciws >= speed or cows <= speed or ti == 0.0 or math.isnan(speed):
return 1.0
else:
# define mean and standard deviation to create normalized pdf with sum = 1
mu = speed
sigma = ti * mu
if mu + sigma >= cows:
xp = np.linspace((mu - sigma), cows, 100)
else:
xp = np.linspace((mu - sigma), (mu + sigma), 100)
pdf = norm.pdf(xp, mu, sigma)
npdf = np.array(pdf) * (1 / np.sum(pdf))
# calculate turbulence parameter (ratio of corrected power to original power)
return np.sum([npdf[k] * self.powInterp(xp[k]) for k in range(100)]) / (
self.powInterp(mu)
)
@property
def current_turbulence_intensity(self):
"""
This method returns the current turbulence intensity at
the turbine expressed as a decimal fraction.
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (float): Value to set.
Returns:
float: Value currently set.
Examples:
To get the turbulence intensity for a turbine:
>>> current_turbulence_intensity = floris.farm.turbines[0].turbulence_intensity()
"""
return self._turbulence_intensity
@current_turbulence_intensity.setter
def current_turbulence_intensity(self, value):
self._turbulence_intensity = value
@property
def rotor_radius(self):
"""
This method returns the rotor radius of the turbine (m).
**Note:** This is a virtual property used to "get" a value.
Returns:
float: The rotor radius of the turbine.
Examples:
To get the rotor radius for a turbine:
>>> rotor_radius = floris.farm.turbines[0].rotor_radius()
"""
return self.rotor_diameter / 2.0
@property
def yaw_angle(self):
"""
This method gets or sets the turbine's yaw angle.
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (float): Value to set.
Returns:
float: Value currently set.
Examples:
To set the yaw angle for each turbine in the wind farm:
>>> yaw_angles = [20.0, 10.0, 0.0]
>>> for yaw_angle, turbine in
... zip(yaw_angles, floris.farm.turbines):
... turbine.yaw_angle = yaw_angle
To get the current yaw angle for each turbine in the wind
farm:
>>> yaw_angles = []
>>> for i, turbine in enumerate(floris.farm.turbines):
... yaw_angles.append(turbine.yaw_angle())
"""
return self._yaw_angle
@yaw_angle.setter
def yaw_angle(self, value):
self._yaw_angle = value
@property
def tilt_angle(self):
"""
This method gets the turbine's tilt angle.
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (float): Value to set.
Returns:
float: Value currently set.
Examples:
To get the current tilt angle for a turbine:
>>> tilt_angle = floris.farm.turbines[0].tilt_angle()
"""
return self._tilt_angle
@tilt_angle.setter
def tilt_angle(self, value):
self._tilt_angle = value
@property
def average_velocity(self):
"""
This property calculates and returns the cube root of the
mean cubed velocity in the turbine's rotor swept area (m/s).
Returns:
float: The average velocity across a rotor.
Examples:
To get the average velocity for a turbine:
>>> avg_vel = floris.farm.turbines[0].average_velocity()
"""
# remove all invalid numbers from interpolation
data = self.velocities[np.where(~np.isnan(self.velocities))]
avg_vel = np.cbrt(np.mean(data ** 3))
if np.isnan(avg_vel):
avg_vel = 0
elif np.isinf(avg_vel):
avg_vel = 0
return avg_vel
@property
def Cp(self):
"""
This property returns the power coeffcient of a turbine.
This property returns the coefficient of power of the turbine
using the rotor swept area average velocity, interpolated from
the coefficient of power table. The average velocity is
calculated as the cube root of the mean cubed velocity in the
rotor area.
**Note:** The velocity is scalled to an effective velocity by the yaw.
Returns:
float: The power coefficient of a turbine at the current
operating conditions.
Examples:
To get the power coefficient value for a turbine:
>>> Cp = floris.farm.turbines[0].Cp()
"""
# Compute the yaw effective velocity
pW = self.pP / 3.0 # Convert from pP to pW
yaw_effective_velocity = self.average_velocity * cosd(self.yaw_angle) ** pW
return self._fCp(yaw_effective_velocity)
@property
def Ct(self):
"""
This property returns the thrust coefficient of a turbine.
This method returns the coefficient of thrust of the yawed
turbine, interpolated from the coefficient of power table,
using the rotor swept area average velocity and the turbine's
yaw angle. The average velocity is calculated as the cube root
of the mean cubed velocity in the rotor area.
Returns:
float: The thrust coefficient of a turbine at the current
operating conditions.
Examples:
To get the thrust coefficient value for a turbine:
>>> Ct = floris.farm.turbines[0].Ct()
"""
return self._fCt(self.average_velocity) * cosd(self.yaw_angle) # **self.pP
@property
def power(self):
"""
This property returns the power produced by turbine (W),
adjusted for yaw and tilt.
Returns:
float: Power of a turbine in watts.
Examples:
To get the power for a turbine:
>>> power = floris.farm.turbines[0].power()
"""
# Update to power calculation which replaces the fixed pP exponent with
# an exponent pW, that changes the effective wind speed input to the power
# calculation, rather than scaling the power. This better handles power
# loss to yaw in above rated conditions
#
# based on the paper "Optimising yaw control at wind farm level" by
# Ervin Bossanyi
# Compute the yaw effective velocity
pW = self.pP / 3.0 # Convert from pP to w
yaw_effective_velocity = self.average_velocity * cosd(self.yaw_angle) ** pW
# Now compute the power
return (
self.air_density
* self.powInterp(yaw_effective_velocity)
* self.turbulence_parameter
)
@property
def aI(self):
"""
This property returns the axial induction factor of the yawed
turbine calculated from the coefficient of thrust and the yaw
angle.
Returns:
float: Axial induction factor of a turbine.
Examples:
To get the axial induction factor for a turbine:
>>> aI = floris.farm.turbines[0].aI()
"""
return (
0.5
/ cosd(self.yaw_angle)
* (1 - np.sqrt(1 - self.Ct * cosd(self.yaw_angle)))
)
# --------------------------------------------------------------------------------}
# --- Helper functions for geometry
# --------------------------------------------------------------------------------{
def transform_T(T_a2b,Xb,Yb,Zb):
Xa=T_a2b[0,0]*Xb+T_a2b[1,0]*Yb+T_a2b[2,0]*Zb
Ya=T_a2b[0,1]*Xb+T_a2b[1,1]*Yb+T_a2b[2,1]*Zb
Za=T_a2b[0,2]*Xb+T_a2b[1,2]*Yb+T_a2b[2,2]*Zb
return Xa,Ya,Za | 42.317657 | 237 | 0.573136 |
73e9f99ef9550a3c7afd415664878ca0ae55cf8c | 147,708 | py | Python | design_utils/design.py | facebookresearch/Project_FARSI | 12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4 | [
"MIT"
] | 14 | 2021-06-01T16:45:19.000Z | 2022-03-08T20:07:00.000Z | design_utils/design.py | facebookresearch/Project_FARSI | 12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4 | [
"MIT"
] | null | null | null | design_utils/design.py | facebookresearch/Project_FARSI | 12b40e4f16ba7418a0f3b997ad124cdb51f4e7f4 | [
"MIT"
] | 3 | 2021-08-05T16:37:47.000Z | 2022-01-06T00:25:49.000Z | #Copyright (c) Facebook, Inc. and its affiliates.
#This source code is licensed under the MIT license found in the
#LICENSE file in the root directory of this source tree.
import _pickle as cPickle
from design_utils.components.hardware import *
from design_utils.components.workload import *
from design_utils.components.mapping import *
from design_utils.components.scheduling import *
from design_utils.components.krnel import *
from design_utils.common_design_utils import *
import collections
import datetime
from datetime import datetime
from error_handling.custom_error import *
import gc
import statistics as st
if config.use_cacti:
from misc.cacti_hndlr import cact_handlr
if config.simulation_method == "power_knobs":
from specs import database_input_powerKnobs as database_input
elif config.simulation_method == "performance":
from specs import database_input
else:
raise NameError("Simulation method unavailable")
# This class logs the insanity (opposite of sanity (check), so the flaw) with the design
class Insanity:
def __init__(self, task, block, name):
self.name = name
self.task = task
self.block = block
# the problematic task
def set_task(self, task_):
self.task = task_
# the problematic block
def set_block(self, block_):
self.block = block_
# name of the insanity
def set_name(self, name_):
self.name= name_
def gen_msg(self):
output = "sanity check failed with: "
output += "insanity name:" + self.name
if not(self.task== "_"):
output += self.task.name
if not(self.block == "_"):
output += self.block.instance_name
return output
# This class emulates a design point containing
# hardware/software, their mapping and scheduling
class ExDesignPoint:
def __init__(self, hardware_graph:HardwareGraph):
self.hardware_graph = hardware_graph # hardware graph contains the hardware blocks
# and their connections
self.PA_prop_dict = {} # PA prop is used for PA design generation
self.id = str(-1) # this means it hasn't been set
self.valid = True
self.FARSI_ex_id = str(-1)
self.PA_knob_ctr_id = str(-1)
self.check_pointed_population_generation_cnt = 0 # only for check pointing purposes, and only work if the design has been checkpointed
self.check_pointed_total_iteration_cnt = 0
def set_check_pointed_population_generation_cnt(self, generation_cnt):
self.check_pointed_population_generation_cnt = generation_cnt
def set_check_pointed_total_iteration_cnt(self, total_iteration):
self.check_pointed_total_iteration_cnt = total_iteration
def get_check_pointed_population_generation_cnt(self):
return self.check_pointed_population_generation_cnt
def get_check_pointed_total_iteration_cnt(self):
self.check_pointed_total_iteration_cnt
def eliminate_system_bus(self):
all_drams = [el for el in self.get_hardware_graph().get_blocks() if el.subtype == "dram"]
ics_with_dram = []
for dram in all_drams:
for neigh in dram.get_neighs():
if neigh.type == "ic":
ics_with_dram.append(neigh)
# can only have one ic with dram hanging from it
return len(ics_with_dram) == 1
def has_system_bus(self):
return False
# find all the drams and their ics
all_drams = [el for el in self.get_hardware_graph().get_blocks() if el.subtype == "dram"]
ics_with_dram = []
for dram in all_drams:
for neigh in dram.get_neighs():
if neigh.type == "ic":
ics_with_dram.append(neigh)
# can only have one ic with dram hanging from it
return len(ics_with_dram) == 1
def get_system_bus(self):
if not self.has_system_bus():
return None
else:
all_drams = [el for el in self.get_hardware_graph().get_blocks() if el.subtype == "dram"]
ics_with_dram = []
for dram in all_drams:
for neigh in dram.get_neighs():
if neigh.type == "ic":
neigh.set_as_system_bus()
return neigh
# get hardware blocks of a design
def get_blocks(self):
return self.hardware_graph.blocks
# get hardware blocks within a specific SOC of the design
def get_blocks_of_SOC(self,SOC_type, SOC_id):
return [block for block in self.hardware_graph.blocks if block.SOC_type == SOC_type and SOC_id == SOC_id]
# get tasks (software tasks) of the design
def get_tasks(self):
return self.hardware_graph.get_all_tasks()
def get_tasks_of_SOC(self, SOC_type, SOC_id):
return [task for task in self.get_tasks() if task.SOC_type == SOC_type and SOC_id == SOC_id]
# samples the task distribution within the hardware graph.
# used for jitter modeling.
def sample_hardware_graph(self, hw_sampling):
self.hardware_graph.sample(hw_sampling)
# get blocks that a task uses (host the task)
def get_blocks_of_task(self, task):
blocks = []
for block in self.get_blocks():
if task in block.get_tasks_of_block():
blocks.append(block)
return blocks
# if set, the design is complete and valid
def set_validity(self, validity):
self.valid = validity
def get_validity(self):
return self.valid
# delete this later. Used for debugging
def check_mem_fronts_sanity(self):
fronts_1 = sum([len(block.get_fronts("task_name_dir")) for block in self.get_blocks() if block.type == "mem"])
fronts_2 = sum(
[len(block.get_fronts("task_dir_work_ratio")) for block in self.get_blocks() if block.type == "mem"])
def check_system_ic_exist(self, block):
assert (block.type == "ic"), "should be checking this with non ic input"
system_ic_exist = False
connectd_ics = [block_ for block_ in block.get_neighs() if block_.type == "ic"]
# iterate though the connected ics, get their neighbouring ics
# and make sure there is a ic with only dram
system_ic_list = []
for neigh_ic in connectd_ics:
has_dram = len([neigh for neigh in neigh_ic.get_neighs() if neigh.subtype == "dram"]) >= 1
has_pe = len([neigh for neigh in neigh_ic.get_neighs() if neigh.type == "pe"]) >= 1
if has_dram:
if has_pe:
pass
#return False
#print(" system ic can not have a pe")
#exit(0)
else:
system_ic_list.append(neigh_ic)
if self.block_is_system_ic(block):
system_ic_list.append(block)
if len(set(system_ic_list)) > 1:
print("can only have one system ic")
exit(0)
return len(system_ic_list) == 1
def block_is_system_ic(self, block):
assert (block.type == "ic"), "should be checking this with non ic input"
# iterate though the connected ics, get their neighbouring ics
# and make sure there is a ic with only dram
system_ic_list = []
has_dram = len([neigh for neigh in block.get_neighs() if neigh.subtype == "dram"]) >= 1
has_pe = len([neigh for neigh in block.get_neighs() if neigh.type == "pe"]) >= 1
if has_dram:
if has_pe:
pass
#print(" system ic can not have a pe")
#exit(0)
else:
return True
else:
return False
return False
# sanity check the design
def sanity_check(self):
insanity_list = [] # list of Inanities
# fronts check
fronts_1 = sum([len(block.get_fronts("task_name_dir")) for block in self.get_blocks() if block.type == "mem"])
fronts_2 = sum([len(block.get_fronts("task_dir_work_ratio")) for block in self.get_blocks() if block.type== "mem"])
if not fronts_1 == fronts_2:
pre_mvd_fronts_1 = [block.get_fronts("task_name_dir") for block in self.get_blocks() if block.type == "mem"]
pre_mvd_fronts_2 = [block.get_fronts("task_dir_work_ratio") for block in self.get_blocks() if block.type == "mem"]
raise UnEqualFrontsError
# all the tasks have pe and mem
for task in self.get_tasks():
pe_blocks = self.get_blocks_of_task_by_block_type(task, "pe")
mem_blocks = self.get_blocks_of_task_by_block_type(task, "mem")
if len(pe_blocks) == 0:
print("task:" + task.name + " does not have any pes")
insanity = Insanity("_", "_", "none")
insanity.set_block("_")
insanity.set_name("no_pe")
insanity_list.append(insanity)
pe_blocks = self.get_blocks_of_task_by_block_type(task, "pe")
print(insanity.gen_msg())
raise NoPEError
#break
elif (len(mem_blocks) == 0 and not("siink" in task.name)):
print("task:" + task.name + " does not have any mems")
insanity = Insanity("_", "_", "none")
insanity.set_block("_")
insanity.set_name("no_mem")
insanity_list.append(insanity)
print(insanity.gen_msg())
mem_blocks = self.get_blocks_of_task_by_block_type(task, "mem")
raise NoMemError
#break
# every pe or memory needs to be connected to a bus
for block in self.get_blocks():
if block.type in ["pe", "mem"]:
connectd_ics = [True for block_ in block.get_neighs() if block_.type =="ic" ]
if len(connectd_ics) > 1:
print("block: " + block.instance_name + " is connected to more than one ic")
insanity = Insanity("_", "_", "multi_bus")
insanity.set_name("multi_bus")
insanity_list.append(insanity)
print(insanity.gen_msg())
raise MultiBusBlockError
#break
elif len(connectd_ics) < 1:
print("block: " + block.instance_name + " is not connected any ic")
insanity = Insanity("_", "_", "none")
insanity.set_block(block)
insanity.set_name("no_bus")
insanity_list.append(insanity)
print(insanity.gen_msg())
raise NoBusError
#break
# every bus needs to have at least one pe and mem
for block in self.get_blocks():
if block.type in ["ic"]:
connectd_pes = [True for block_ in block.get_neighs() if block_.type =="pe" ]
connectd_mems = [True for block_ in block.get_neighs() if block_.type =="mem" ]
connectd_ics = [True for block_ in block.get_neighs() if block_.type =="ic" ]
system_ic_exist = self.check_system_ic_exist(block)
if len(connectd_mems) == 0 and not system_ic_exist:
insanity = Insanity("_",block, "bus_with_no_mem")
print(insanity.gen_msg())
if self.hardware_graph.generation_mode == "user_generated":
print("deactivated Bus with No memory error, since hardware graph was directly user generated/parsed ")
else:
raise BusWithNoMemError
"""
elif len(connectd_pes) > 0 and self.block_is_system_ic(block):
insanity = Insanity("_", block, "system_ic_with_pe")
insanity_list.append(insanity)
print(insanity.gen_msg())
if self.hardware_graph.generation_mode == "user_generated":
print(
"deactivated Bus with No Bus error, since hardware graph was directly user generated/parsed ")
else:
raise SystemICWithPEException
"""
elif len(connectd_pes) == 0 and not self.block_is_system_ic(block):
insanity = Insanity("_", block, "bus_with_no_pes")
insanity_list.append(insanity)
print(insanity.gen_msg())
if self.hardware_graph.generation_mode == "user_generated":
print("deactivated Bus with No Bus error, since hardware graph was directly user generated/parsed ")
else:
raise BusWithNoPEError
# every design needs to have at least on pe, mem, and bus
block_type_count_dict = {}
block_type_count_dict["mem"] = 0
block_type_count_dict["pe"] = 0
block_type_count_dict["ic"] = 0
for block in self.get_blocks():
block_type_count_dict[block.type] +=1
for type_, count in block_type_count_dict.items():
if count < 1:
print("no block of type " + type_ + " found")
insanity = Insanity("_", "_", "none")
insanity.set_name("not_enough_ip_of_certain_type")
insanity_list.append(insanity)
print(insanity.gen_msg())
raise NotEnoughIPOfCertainType
#break
# every block should host at least one task
for block in self.get_blocks():
if block.type == "ic": # since we unload
continue
if len(block.get_tasks_of_block()) == 0:
print( "block: " + block.instance_name + " does not host any tasks")
insanity = Insanity("_", "_", "none")
insanity.set_block(block)
insanity.set_name("no_task")
insanity_list.append(insanity)
print(insanity.gen_msg())
raise BlockWithNoTaskError
# get blocks within the design (filtered by type)
def get_blocks_by_type(self, block_type):
return [block for block in self.get_blocks() if block.type == block_type]
# gets blocks for a task, and filter them based on hardware type (pe, mem, ic)
def get_blocks_of_task_by_block_type(self, task, block_type):
blocks_of_task = self.get_blocks_of_task(task)
blocks_by_type = []
for block in blocks_of_task:
if block.type == block_type:
blocks_by_type.append(block)
return blocks_by_type
def get_write_mem_tasks(self, task, mem):
# get conncted ic
ics = [el for el in mem.get_neighs() if el.type =="ic"]
assert(len(ics) <= 1), "Each memory can be only connected to one bus master"
# get the pipes
pipes = self.get_hardware_graph().get_pipes_between_two_blocks(ics[0], mem, "write")
assert(len(pipes) <= 1), "can only have one pipe (in a direction) between a memory and a ic"
# traffic
traffics = pipes[0].get_traffic()
return [trf.child for trf in traffics if trf.parent.name == task.name]
# for a specific task, find all the specific blocks of a type and their direction
def get_blocks_of_task_by_block_type_and_task_dir(self, task, block_type, task_dir=""):
assert ((block_type == "pe") != task_dir) # XORing the expression
blocks_of_task = self.get_blocks_of_task(task)
blocks_of_task_by_type = [block for block in blocks_of_task if block.type == block_type]
blocks_of_task_by_type_and_task_dir = [block for block in blocks_of_task_by_type if block.get_task_dir_by_task_name(task)[0][1] == task_dir]
return blocks_of_task_by_type_and_task_dir
# get the properties of the design. This is used for the more accurate simulation
def filter_props_by_keyword(self, knob_order, knob_values, type_name):
prop_value_dict = collections.OrderedDict()
for knob_name, knob_value in zip(knob_order, knob_values):
if type_name+"_props" in knob_name:
knob_name_refined = knob_name.split("__")[-1]
prop_value_dict[knob_name_refined] = knob_value
return prop_value_dict
def filter_auto_tune_props(self, type_name, auto_tune_props):
auto_tune_list = []
for knob_name in auto_tune_props:
if type_name+"_props" in knob_name:
knob_name_refined = knob_name.split("__")[-1]
auto_tune_list.append(knob_name_refined)
return auto_tune_list
# get id associated with a design. Each design has it's unique id.
def get_ex_id(self):
if self.id == str(-1):
print("experiments id is:" + str(self.id) + ". This means id has not been set")
exit(0)
return self.id
def update_ex_id(self, id):
self.id = id
def get_FARSI_ex_id(self):
if self.FARSI_ex_id == str(-1):
print("experiments id is:" + str(self.id) + ". This means id has not been set")
exit(0)
return self.FARSI_ex_id
def get_PA_knob_ctr_id(self):
if self.PA_knob_ctr_id == str(-1):
print("experiments id is:" + str(self.PA_knob_ctr_id) + ". This means id has not been set")
exit(0)
return self.PA_knob_ctr_id
def update_FARSI_ex_id(self, FARSI_ex_id):
self.FARSI_ex_id = FARSI_ex_id
def update_PA_knob_ctr_id(self, knob_ctr):
self.PA_knob_ctr_id = knob_ctr
def reset_PA_knobs(self, mode="batch"):
if mode == "batch":
# parse and set design props
self.PA_prop_dict = collections.OrderedDict()
# parse and set hw and update the props
for keyword in ["pe", "ic", "mem"]:
blocks_ = self.get_blocks_by_type(keyword)
for block in blocks_:
block.reset_PA_props()
# parse and set sw props
for keyword in ["sw"]:
tasks = self.get_tasks()
for task_ in tasks:
task_.reset_PA_props()
else:
print("mode:" + mode + " is not defind for apply_PA_knobs")
exit(0)
def update_PA_knobs(self, knob_values, knob_order, all_auto_tunning_knobs, mode="batch"):
if mode == "batch":
# parse and set design props
prop_value_dict = {}
prop_value_dict["ex_id"] = self.get_ex_id()
prop_value_dict["FARSI_ex_id"] = self.get_FARSI_ex_id()
prop_value_dict["PA_knob_ctr_id"] = self.get_PA_knob_ctr_id()
self.PA_prop_dict.update(prop_value_dict)
# parse and set hw and update the props
for keyword in ["pe", "ic", "mem"]:
blocks_ = self.get_blocks_by_type(keyword)
prop_value_dict = self.filter_props_by_keyword(knob_order, knob_values, keyword)
prop_auto_tuning_list = self.filter_auto_tune_props(keyword, all_auto_tunning_knobs)
for block in blocks_:
block.update_PA_props(prop_value_dict)
block.update_PA_auto_tunning_knob_list(prop_auto_tuning_list)
# parse and set sw props
for keyword in ["sw"]:
tasks = self.get_tasks()
prop_value_dict = self.filter_props_by_keyword(knob_order, knob_values, keyword)
prop_auto_tuning_list = self.filter_auto_tune_props(keyword, all_auto_tunning_knobs)
for task_ in tasks:
task_.update_PA_props(prop_value_dict)
task_.update_PA_auto_tunning_knob_list(prop_auto_tuning_list)
else:
print("mode:"+ mode +" is not defined for apply_PA_knobs")
exit(0)
# write all the props into the design file.
# design file is set in the config file (config.verification_result_file)
def dump_props(self, result_folder, mode="batch"): # batch means that all the blocks of similar type have similar props
file_name = config.verification_result_file
file_addr = os.path.join(result_folder, file_name)
if mode == "batch":
with open(file_addr, "a+") as output:
for prop_name, prop_value in self.PA_prop_dict.items():
prop_name_modified = "\"design" + "__" + prop_name +"\""
if not str(prop_value).isdigit():
prop_value = "\"" + prop_value + "\""
output.write(prop_name_modified + ": " + str(prop_value) + ",\n")
# writing the hardware props
for keyword in ["pe", "ic", "mem"]:
block = self.get_blocks_by_type(keyword)[0] # since in batch mode, the first element shares the same prop values
# as all
for prop_name, prop_value in block.PA_prop_dict.items():
prop_name_modified = "\""+ keyword+"__"+prop_name + "\""
if "ic__/Buffer/enable" in prop_name_modified: # this is just because now parsing throws an error
continue
if not str(prop_value).isdigit():
prop_value = "\"" + prop_value +"\""
output.write(prop_name_modified+": " + str(prop_value) + ",\n")
# writing the software props
for keyword in ["sw"]:
task_ = self.get_tasks()[0]
for prop_name, prop_value in task_.PA_prop_dict.items():
prop_name_modified = "\""+ keyword + "__" + prop_name +"\""
if not str(prop_value).isdigit():
prop_value = "\"" + prop_value +"\""
output.write(prop_name_modified + ": " + str(prop_value) + ",\n")
else:
print("mode:" + mode + " is not defind for apply_PA_knobs")
def get_hardware_graph(self):
return self.hardware_graph
def get_task_by_name(self, task_name):
return [task_ for task_ in self.get_tasks() if task_.name == task_name][0]
# collection of the simulated design points.
# not that you can query this container with the same functions as the
# SimDesignPoint (i.e, same modules are provide). However, this is not t
class SimDesignPointContainer:
def __init__(self, design_point_list, database, reduction_mode = "avg"):
self.design_point_list = design_point_list
self.reduction_mode = reduction_mode # how to reduce the results.
self.database = database # hw/sw database
self.dp_rep = self.design_point_list[0] # design representative
self.dp_stats = DPStatsContainer(self, self.dp_rep, self.database, reduction_mode) # design point stats
self.dp = self.dp_rep # design point is used to fill up some default values
# we use dp_rep, i.e., design point representative for this
self.move_applied = None
self.dummy_tasks = [krnl.get_task() for krnl in self.dp.get_kernels() if (krnl.get_task()).is_task_dummy()]
self.exploration_and_simulation_approximate_time = 0
def get_dummy_tasks(self):
return self.dummy_tasks
# bootstrap the design from scratch
def reset_design(self, workload_to_hardware_map=[], workload_to_hardware_schedule=[]):
self.dp_rep.reset_design()
def set_move_applied(self, move_applied):
self.move_applied = move_applied
def get_move_applied(self):
return self.move_applied
def add_exploration_and_simulation_approximate_time(self, time):
# the reason that this is approximte is because we tak
# the entire generation time and divide it by the number of iterations per iteration
self.exploration_and_simulation_approximate_time += time
def get_exploration_and_simulation_approximate_time(self):
return self.exploration_and_simulation_approximate_time
def get_dp_stats(self):
return self.dp_stats
# -----------------
# getters
# -----------------
def get_task_graph(self):
return self.dp_rep.get_hardware_graph().get_task_graph()
# Functionality:
# get the mapping
def get_workload_to_hardware_map(self):
return self.dp_rep.get_workload_to_hardware_map()
# Functionality
# get the scheduling
def get_workload_to_hardware_schedule(self):
return self.dp_rp.get_workload_to_hardware_schedule()
def get_kernels(self):
return self.dp_rp.get_kernels()
def get_kernel_by_task_name(self, task: Task):
return self.dp_rep.get_kernel_by_task_name(task)
# get the kernels of the design
def get_kernels(self):
return self.dp_rep.get_kernels()
# get the sw to hw mapping
def get_workload_to_hardware_map(self):
return self.dp_rep.get_workload_to_hardware_map()
# get the SOCs that the design resides in
def get_designs_SOCs(self):
return self.dp_rep.get_designs_SOCs()
# get all the design points
def get_design_point_list(self):
return self.design_point_list
# get the representative design point.
def get_dp_rep(self):
return self.dp_rep
# Container for all the design point stats.
# In order to collect profiling information, we reduce the statistics
# according to the desired reduction function.
# reduction semantically happens at two different levels depending on the question
# that we are asking.
# Level 1 Questions: Within/intra design questions to compare components of a
# single design. Example: finding the hottest kernel?
# To answer, reduce the results across at the task/kernel
# level 2 Questions: Across/inter design question to compare different designs?
# To answer, reduce the results from the end-to-end perspective, i.e.,
# reduce(end-to-end latency), reduce(end-to-end energy), ...
# PS: at the moment, a design here is defined as a sw/hw tuple with only sw
# characteristic changing.
class DPStatsContainer():
def __init__(self, sim_dp_container, dp_rep, database, reduction_mode):
self.comparison_mode = "latency" # metric to compare different design points
self.sim_dp_container = sim_dp_container
self.design_point_list = self.sim_dp_container.design_point_list # design point container (containing list of designs)
self.dp_rep = dp_rep #self.dp_container[0] # which design to use as representative (for plotting and so on
self.__kernels = self.sim_dp_container.design_point_list[0].get_kernels()
self.SOC_area_dict = defaultdict(lambda: defaultdict(dict)) # area of all blocks within each SOC
self.SOC_area_subtype_dict = defaultdict(lambda: defaultdict(dict)) # area of all blocks within each SOC
self.system_complex_area_dict = defaultdict()
self.SOC_metric_dict = defaultdict(lambda: defaultdict(dict))
self.system_complex_metric_dict = defaultdict(lambda: defaultdict(dict))
self.system_complex_area_dram_non_dram = defaultdict(lambda: defaultdict(dict))
self.database = database # hw/sw database
self.latency_list =[] # list of latency values associated with each design point
self.power_list =[] # list of power values associated with each design point
self.energy_list =[] # list of energy values associated with each design point
self.reduction_mode = reduction_mode # how to statistically reduce the values
# collect the data
self.collect_stats()
self.dp = self.sim_dp_container # container that has all the designs
self.parallel_kernels = dp_rep.parallel_kernels
def get_parallel_kernels(self):
return self.parallel_kernels
# helper function to apply an operator across two dictionaries
def operate_on_two_dic_values(self,dict1, dict2, operator):
dict_res = {}
for key in list(dict2.keys()) + list(dict1.keys()):
if key in dict1.keys() and dict2.keys():
dict_res[key] = operator(dict2[key], dict1[key])
else:
if key in dict1.keys():
dict_res[key] = dict1[key]
elif key in dict2.keys():
dict_res[key] = dict2[key]
return dict_res
# operate on multiple dictionaries. The operation is determined by the operator input
def operate_on_dicionary_values(self, dictionaries, operator):
res = {}
for SOCs_latency in dictionaries:
#res = copy.deepcopy(self.operate_on_two_dic_values(res, SOCs_latency, operator))
#gc.disable()
res = cPickle.loads(cPickle.dumps(self.operate_on_two_dic_values(res, SOCs_latency, operator), -1))
#gc.enable()
return res
# reduce the (list of) values based on a statistical parameter (such as average)
def reduce(self, list_):
if self.reduction_mode == 'avg':
if isinstance(list_[0],dict):
dict_added = self.operate_on_dicionary_values(list_, operator.add)
for key,val in dict_added.items():
dict_added[key] = val/len(list_)
return dict_added
else:
return sum(list_)/len(list_)
elif self.reduction_mode == 'min':
return min(list_)
elif self.reduction_mode == 'max':
#if (len(list_) == 0):
# print("What")
return max(list_)
else:
print("reduction mode "+ self.reduction_mode + ' is not defined')
exit(0)
def get_number_blocks_of_all_sub_types(self):
subtype_cnt = []
for block in self.dp_rep.get_blocks():
if block.subtype not in subtype_cnt:
subtype_cnt[block.subtype] = 0
subtype_cnt[block.subtype] += 1
return subtype_cnt
def get_compute_system_attr(self):
ips = [el for el in self.dp_rep.get_blocks() if el.subtype == "ip"]
gpps = [el for el in self.dp_rep.get_blocks() if el.subtype == "gpp"]
# get frequency data
ips_freqs = [mem.get_block_freq() for mem in ips]
gpp_freqs = [mem.get_block_freq() for mem in gpps]
if len(ips_freqs) == 0:
ips_avg_freq = 0
else:
ips_avg_freq= sum(ips_freqs)/max(len(ips_freqs),1)
loop_itr_ratio = []
for ip in ips:
loop_itr_ratio.append(ip.get_loop_itr_cnt()/ip.get_loop_max_possible_itr_cnt())
if len(ips) == 0:
loop_itr_ratio_avg = 0
else:
loop_itr_ratio_avg = st.mean(loop_itr_ratio)
if len(ips_freqs) in [0,1]:
ips_freq_std = 0
ips_freq_coeff_var = 0
loop_itr_ratio_std = 0
loop_itr_ratio_var = 0
else:
ips_freq_std = st.stdev(ips_freqs)
ips_freq_coeff_var = st.stdev(ips_freqs)/st.mean(ips_freqs)
loop_itr_ratio_std = st.stdev(loop_itr_ratio)
loop_itr_ratio_var = st.stdev(loop_itr_ratio)/st.mean(loop_itr_ratio)
if len(gpp_freqs) == 0:
gpps_avg_freq = 0
else:
gpps_avg_freq= sum(gpp_freqs)/max(len(gpp_freqs),1)
if len(gpp_freqs + ips_freqs) in [0,1]:
pes_freq_std = 0
pes_freq_coeff_var = 0
else:
pes_freq_std = st.stdev(ips_freqs + gpp_freqs)
pes_freq_coeff_var = st.stdev(ips_freqs + gpp_freqs) / st.mean(ips_freqs + gpp_freqs)
# get area data
ips_area = [mem.get_area() for mem in ips]
gpp_area = [mem.get_area() for mem in gpps]
if len(ips_area) == 0:
ips_total_area = 0
else:
ips_total_area = sum(ips_area)
if len(ips_area) in [0,1]:
ips_area_std = 0
ips_area_coeff_var = 0
else:
ips_area_std = st.stdev(ips_area)
ips_area_coeff_var = st.stdev(ips_area) / st.mean(ips_area)
if len(gpp_area) == 0:
gpps_total_area = 0
else:
gpps_total_area = sum(gpp_area)
if len(ips_area + gpp_area) in [0,1]:
pes_area_std = 0
pes_area_coeff_var = 0
else:
pes_area_std = st.stdev(ips_area+gpp_area)
pes_area_coeff_var = st.stdev(ips_area+gpp_area)/st.mean(ips_area+gpp_area)
phase_accelerator_parallelism = {}
for phase, krnls in self.dp_rep.phase_krnl_present.items():
accelerators_in_parallel = []
for krnl in krnls:
accelerators_in_parallel.extend([blk for blk in krnl.get_blocks() if blk.subtype == "ip"])
if len(accelerators_in_parallel) == 0:
continue
phase_accelerator_parallelism[phase] = len(accelerators_in_parallel)
if len(phase_accelerator_parallelism.keys()) == 0:
avg_accel_parallelism = 0
max_accel_parallelism = 0
else:
avg_accel_parallelism = sum(list(phase_accelerator_parallelism.values()))/len(list(phase_accelerator_parallelism.values()))
max_accel_parallelism = max(list(phase_accelerator_parallelism.values()))
phase_gpp_parallelism = {}
for phase, krnls in self.dp_rep.phase_krnl_present.items():
gpps_in_parallel = []
for krnl in krnls:
gpps_in_parallel.extend([blk for blk in krnl.get_blocks() if blk.subtype == "gpp"])
if len(gpps_in_parallel) == 0:
continue
phase_gpp_parallelism[phase] = len(gpps_in_parallel)
if len(phase_gpp_parallelism.keys()) == 0:
avg_gpp_parallelism = 0
max_gpp_parallelism = 0
else:
avg_gpp_parallelism = sum(list(phase_gpp_parallelism.values()))/len(list(phase_gpp_parallelism.values()))
max_gpp_parallelism = max(list(phase_gpp_parallelism.values()))
buses = [el for el in self.dp_rep.get_blocks() if el.subtype == "ic"]
bus_neigh_count = []
for bus in buses:
pe_neighs = [neigh for neigh in bus.get_neighs() if neigh.type == "pe"]
bus_neigh_count.append(len(pe_neighs))
cluster_pe_cnt_avg = st.mean(bus_neigh_count)
if len(bus_neigh_count) in [0,1]:
cluster_pe_cnt_std = 0
cluster_pe_cnt_coeff_var = 0
else:
cluster_pe_cnt_std = st.stdev(bus_neigh_count)
cluster_pe_cnt_coeff_var = st.stdev(bus_neigh_count)/st.mean(bus_neigh_count)
return {
"avg_accel_parallelism": avg_accel_parallelism, "max_accel_parallelism":max_accel_parallelism,
"avg_gpp_parallelism": avg_gpp_parallelism, "max_gpp_parallelism": max_gpp_parallelism,
"ip_cnt":len(ips), "gpp_cnt": len(gpps),
"ips_avg_freq": ips_avg_freq, "gpps_avg_freq":gpps_avg_freq,
"ips_freq_std": ips_freq_std, "pes_freq_std": pes_freq_std,
"ips_freq_coeff_var": ips_freq_coeff_var, "pes_freq_coeff_var": pes_freq_coeff_var,
"ips_total_area": ips_total_area, "gpps_total_area":gpps_total_area,
"ips_area_std": ips_area_std, "pes_area_std": pes_area_std,
"ips_area_coeff_var": ips_area_coeff_var, "pes_area_coeff_var": pes_area_coeff_var,
"pe_total_area":ips_total_area+gpps_total_area,
"loop_itr_ratio_avg":loop_itr_ratio_avg,
"loop_itr_ratio_std":loop_itr_ratio_std,
"loop_itr_ratio_var":loop_itr_ratio_var,
"cluster_pe_cnt_avg":cluster_pe_cnt_avg,
"cluster_pe_cnt_std":cluster_pe_cnt_std,
"cluster_pe_cnt_coeff_var":cluster_pe_cnt_coeff_var
}
def get_speedup_analysis(self,dse):
# lower the design
workload_speed_up = {}
customization_first_speed_up_list =[]
customization_second_speed_up_list = []
parallelism_first_speed_up_list = []
parallelism_second_speed_up_list = []
interference_degradation_list = []
for workload in self.database.get_workloads_last_task().keys():
# single out workload in the current best
cur_best_ex_singled_out_workload,database = dse.single_out_workload(dse.so_far_best_ex_dp, self.database, workload, self.database.db_input.workload_tasks[workload])
cur_best_sim_dp_singled_out_workload = dse.eval_design(cur_best_ex_singled_out_workload, database)
# lower the cur best with single out
most_infer_ex_dp = dse.transform_to_most_inferior_design(dse.so_far_best_ex_dp)
most_infer_ex_dp_singled_out_workload, database = dse.single_out_workload(most_infer_ex_dp, self.database, workload, self.database.db_input.workload_tasks[workload])
most_infer_sim_dp_singled_out_workload = dse.eval_design(most_infer_ex_dp_singled_out_workload,database)
# speed ups
customization_first_speed_up = most_infer_sim_dp_singled_out_workload.dp.get_serial_design_time()/cur_best_sim_dp_singled_out_workload.dp.get_serial_design_time()
parallelism_second_speed_up = cur_best_sim_dp_singled_out_workload.dp.get_par_speedup()
parallelism_first_speed_up = most_infer_sim_dp_singled_out_workload.dp.get_par_speedup()
customization_second_speed_up = most_infer_sim_dp_singled_out_workload.dp_stats.get_system_complex_metric("latency")[workload]/cur_best_sim_dp_singled_out_workload.dp.get_serial_design_time()
interference_degradation = dse.so_far_best_sim_dp.dp_stats.get_system_complex_metric("latency")[workload]/cur_best_sim_dp_singled_out_workload.dp_stats.get_system_complex_metric("latency")[workload]
workload_speed_up[workload] = {"customization_first_speed_up":customization_first_speed_up,
"parallelism_second_speed_up":parallelism_second_speed_up,
"customization_second_speed_up": customization_second_speed_up,
"parallelism_first_speed_up": parallelism_first_speed_up,
"interference_degradation":interference_degradation}
customization_first_speed_up_list.append(customization_first_speed_up)
customization_second_speed_up_list.append(customization_second_speed_up)
parallelism_first_speed_up_list.append(parallelism_first_speed_up)
parallelism_second_speed_up_list.append(parallelism_second_speed_up)
interference_degradation_list.append(interference_degradation)
# for the entire design
most_infer_ex_dp = dse.transform_to_most_inferior_design(dse.so_far_best_ex_dp)
most_infer_sim_dp = dse.eval_design(most_infer_ex_dp, self.database)
most_infer_ex_before_unrolling_dp = dse.transform_to_most_inferior_design_before_loop_unrolling(dse.so_far_best_ex_dp)
most_infer_sim_before_unrolling_dp = dse.eval_design(most_infer_ex_before_unrolling_dp, self.database)
#customization_first_speed_up_full_system = most_infer_sim_dp.dp.get_serial_design_time()/dse.so_far_best_sim_dp.dp.get_serial_design_time()
#parallelism_second_speed_up_full_system = dse.so_far_best_sim_dp.dp.get_par_speedup()
#parallelism_first_speed_up_full_system = most_infer_sim_dp.dp.get_par_speedup()
#customization_second_speed_up_full_system = max(list((most_infer_sim_dp.dp_stats.get_system_complex_metric("latency")).values()))/max(list((dse.so_far_best_sim_dp.dp_stats.get_system_complex_metric("latency")).values()))
customization_speed_up_full_system = most_infer_sim_dp.dp.get_serial_design_time()/most_infer_sim_before_unrolling_dp.dp.get_serial_design_time()
loop_unrolling_parallelism_speed_up_full_system = most_infer_sim_before_unrolling_dp.dp.get_serial_design_time()/dse.so_far_best_sim_dp.dp.get_serial_design_time()
task_level_parallelism_speed_up_full_system = dse.so_far_best_sim_dp.dp.get_serial_design_time()/max(list((dse.so_far_best_sim_dp.dp_stats.get_system_complex_metric("latency")).values()))
#
speedup_avg = {"customization_first_speed_up_avg": st.mean(customization_first_speed_up_list),
"parallelism_second_speed_up_avg": st.mean(parallelism_second_speed_up_list),
"customization_second_speed_up_avg": st.mean(customization_second_speed_up_list),
"parallelism_first_speed_up_avg": st.mean(parallelism_first_speed_up_list),
"interference_degradation_avg": st.mean(interference_degradation_list),
"customization_speed_up_full_system": customization_speed_up_full_system,
"loop_unrolling_parallelism_speed_up_full_system": loop_unrolling_parallelism_speed_up_full_system,
"task_level_parallelism_speed_up_full_system": task_level_parallelism_speed_up_full_system
}
return workload_speed_up,speedup_avg
def get_memory_system_attr(self):
memory_system_attr = {}
local_memories = [el for el in self.dp_rep.get_blocks() if el.subtype == "sram"]
global_memories = [el for el in self.dp_rep.get_blocks() if el.subtype == "dram"]
buses = [el for el in self.dp_rep.get_blocks() if el.subtype == "ic"]
# get frequency data
local_memory_freqs = [mem.get_block_freq() for mem in local_memories]
global_memory_freqs = [mem.get_block_freq() for mem in global_memories]
if len(local_memory_freqs) == 0:
local_memory_avg_freq = 0
else:
local_memory_avg_freq= sum(local_memory_freqs)/max(len(local_memory_freqs),1)
if len(local_memory_freqs) in [0, 1]:
local_memory_freq_std = 0
local_memory_freq_coeff_var = 0
else:
local_memory_freq_std = st.stdev(local_memory_freqs)
local_memory_freq_coeff_var = st.stdev(local_memory_freqs) / st.mean(local_memory_freqs)
if len(global_memory_freqs) == 0:
global_memory_avg_freq = 0
else:
global_memory_avg_freq= sum(global_memory_freqs)/max(len(global_memory_freqs),1)
# get bus width data
local_memory_bus_widths = [mem.get_block_bus_width() for mem in local_memories]
global_memory_bus_widths = [mem.get_block_bus_width() for mem in global_memories]
if len(local_memory_bus_widths) == 0:
local_memory_avg_bus_width = 0
else:
local_memory_avg_bus_width= sum(local_memory_bus_widths)/max(len(local_memory_bus_widths),1)
if len(local_memory_bus_widths) in [0, 1]:
local_memory_bus_width_std = 0
local_memory_bus_width_coeff_var = 0
else:
local_memory_bus_width_std = st.stdev(local_memory_bus_widths)
local_memory_bus_width_coeff_var = st.stdev(local_memory_bus_widths) / st.mean(local_memory_bus_widths)
if len(global_memory_bus_widths) == 0:
global_memory_avg_bus_width = 0
else:
global_memory_avg_bus_width= sum(global_memory_bus_widths)/max(len(global_memory_bus_widths),1)
#get bytes data
local_memory_bytes = []
for mem in local_memories:
mem_bytes = max(mem.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes) # to make sure we don't go smaller than cacti's minimum size
local_memory_bytes.append((math.ceil(mem_bytes / config.min_mem_size[mem.subtype])) * config.min_mem_size[mem.subtype]) # modulo calculation
if len(local_memory_bytes) == 0:
local_memory_total_bytes = 0
local_memory_bytes_avg = 0
else:
local_memory_total_bytes = sum(local_memory_bytes)
local_memory_bytes_avg = st.mean(local_memory_bytes)
if len(local_memory_bytes) in [0,1]:
local_memory_bytes_std = 0
local_memory_bytes_coeff_var = 0
else:
local_memory_bytes_std = st.stdev(local_memory_bytes)
local_memory_bytes_coeff_var = st.stdev(local_memory_bytes)/max(st.mean(local_memory_bytes),.0000000001)
global_memory_bytes = []
for mem in global_memories:
mem_bytes = max(mem.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes) # to make sure we don't go smaller than cacti's minimum size
global_memory_bytes.append((math.ceil(mem_bytes / config.min_mem_size[mem.subtype])) * config.min_mem_size[mem.subtype]) # modulo calculation
if len(global_memory_bytes) == 0:
global_memory_total_bytes = 0
else:
global_memory_total_bytes = sum(global_memory_bytes)
if len(global_memory_bytes) in [0,1]:
global_memory_bytes_std = 0
global_memory_bytes_coeff_var = 0
else:
global_memory_bytes_std = st.stdev(global_memory_bytes)
global_memory_bytes_coeff_var = st.stdev(global_memory_bytes) / max(st.mean(global_memory_bytes),.00000001)
# get area data
local_memory_area = [mem.get_area() for mem in local_memories]
global_memory_area = [mem.get_area() for mem in global_memories]
if len(local_memory_area) == 0:
local_memory_total_area = 0
else:
local_memory_total_area = sum(local_memory_area)
if len(local_memory_area) in [0,1]:
local_memory_area_std = 0
local_memory_area_coeff_var = 0
else:
local_memory_area_std = st.stdev(local_memory_area)
local_memory_area_coeff_var = st.stdev(local_memory_area) / st.mean(local_memory_area)
if len(global_memory_area) == 0:
global_memory_total_area = 0
else:
global_memory_total_area = sum(global_memory_area)
# get traffic data
local_total_traffic = 0
for mem in local_memories:
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
local_total_traffic += krnl.calc_traffic_per_block(mem)
local_traffic_per_mem = {}
for mem in local_memories:
local_traffic_per_mem[mem] =0
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
local_traffic_per_mem[mem] += krnl.calc_traffic_per_block(mem)
global_total_traffic = 0
for mem in global_memories:
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
global_total_traffic += krnl.calc_traffic_per_block(mem)
local_bus_traffic = {}
for mem in local_memories:
local_traffic = 0
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
local_traffic += krnl.calc_traffic_per_block(mem)
for bus in buses:
if mem in bus.get_neighs():
if bus not in local_bus_traffic.keys():
local_bus_traffic[bus] = 0
local_bus_traffic[bus] += local_traffic
break
# get traffic reuse
local_traffic_reuse_no_read_ratio = []
local_traffic_reuse_no_read_in_bytes = []
local_traffic_reuse_no_read_in_size = []
local_traffic_reuse_with_read_ratio= []
local_traffic_reuse_with_read_in_bytes = []
local_traffic_reuse_with_read_in_size = []
mem_local_traffic = {}
for mem in local_memories:
local_traffic = 0
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
local_traffic += krnl.calc_traffic_per_block(mem)
mem_local_traffic[mem] = local_traffic
mem_bytes = max(mem.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes) # to make sure we don't go smaller than cacti's minimum size
#mem_bytes_modulo = (math.ceil(mem_bytes/config.min_mem_size[mem.subtype]))*config.min_mem_size[mem.subtype] # modulo calculation
mem_size = mem.get_area()
reuse_ratio_no_read = max((local_traffic/mem_bytes)-2, 0)
local_traffic_reuse_no_read_ratio.append(reuse_ratio_no_read)
local_traffic_reuse_no_read_in_bytes.append(reuse_ratio_no_read*mem_bytes)
local_traffic_reuse_no_read_in_size.append(reuse_ratio_no_read*mem_size)
reuse_ratio_with_read = max((local_traffic/mem_bytes)-1, 0)
local_traffic_reuse_with_read_ratio.append(reuse_ratio_with_read)
local_traffic_reuse_with_read_in_bytes.append(reuse_ratio_with_read*mem_bytes)
local_traffic_reuse_with_read_in_size.append(reuse_ratio_with_read*mem_size)
if len(local_memories) == 0:
local_total_traffic_reuse_no_read_ratio = 0
local_total_traffic_reuse_no_read_in_bytes = 0
local_total_traffic_reuse_no_read_in_size = 0
local_total_traffic_reuse_with_read_ratio = 0
local_total_traffic_reuse_with_read_in_bytes = 0
local_total_traffic_reuse_with_read_in_size = 0
local_traffic_per_mem_avg = 0
else:
local_total_traffic_reuse_no_read_ratio = max((local_total_traffic/local_memory_total_bytes)-2, 0)
local_total_traffic_reuse_no_read_in_bytes = sum(local_traffic_reuse_no_read_in_bytes)
local_total_traffic_reuse_no_read_in_size = sum(local_traffic_reuse_no_read_in_size)
local_total_traffic_reuse_with_read_ratio = max((local_total_traffic/local_memory_total_bytes)-1, 0)
local_total_traffic_reuse_with_read_in_bytes = sum(local_traffic_reuse_with_read_in_bytes)
local_total_traffic_reuse_with_read_in_size = sum(local_traffic_reuse_with_read_in_size)
local_traffic_per_mem_avg = st.mean(list(local_traffic_per_mem.values()))
if len(local_bus_traffic) == 0:
local_bus_traffic_avg = 0
else:
local_bus_traffic_avg = st.mean(list(local_bus_traffic.values()))
if len(local_memories) in [0,1]:
local_traffic_per_mem_std = 0
local_traffic_per_mem_coeff_var = 0
else:
local_traffic_per_mem_std = st.stdev(list(local_traffic_per_mem.values()))
local_traffic_per_mem_coeff_var = st.stdev(list(local_traffic_per_mem.values()))/st.mean(list(local_traffic_per_mem.values()))
if len(local_bus_traffic) in [0,1]:
local_bus_traffic_std = 0
local_bus_traffic_coeff_var = 0
else:
local_bus_traffic_std = st.stdev(list(local_bus_traffic.values()))
local_bus_traffic_coeff_var = st.stdev(list(local_bus_traffic.values()))/st.mean(list(local_bus_traffic.values()))
# get traffic reuse
global_traffic_reuse_no_read_ratio= []
global_traffic_reuse_no_read_in_bytes = []
global_traffic_reuse_no_read_in_size = []
global_traffic_reuse_with_read_ratio= []
global_traffic_reuse_with_read_in_bytes = []
global_traffic_reuse_with_read_in_size = []
for mem in global_memories:
global_traffic = 0
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
global_traffic += krnl.calc_traffic_per_block(mem)
mem_bytes = max(mem.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes) # to make sure we don't go smaller than cacti's minimum size
#mem_bytes_modulo = (math.ceil(mem_bytes/config.min_mem_size[mem.subtype]))*config.min_mem_size[mem.subtype] # modulo calculation
mem_size = mem.get_area()
reuse_ratio_no_read = max((global_traffic/mem_bytes)-2, 0)
global_traffic_reuse_no_read_ratio.append(reuse_ratio_no_read)
global_traffic_reuse_no_read_in_bytes.append(reuse_ratio_no_read*mem_bytes)
global_traffic_reuse_no_read_in_size.append(reuse_ratio_no_read*mem_size)
reuse_ratio_with_read = max((global_traffic/mem_bytes)-1, 0)
global_traffic_reuse_with_read_ratio.append(reuse_ratio_with_read)
global_traffic_reuse_with_read_in_bytes.append(reuse_ratio_with_read*mem_bytes)
global_traffic_reuse_with_read_in_size.append(reuse_ratio_with_read*mem_size)
if len(global_memories) == 0:
global_total_traffic_reuse_no_read_ratio = 0
global_total_traffic_reuse_no_read_in_bytes = 0
global_total_traffic_reuse_no_read_in_size = 0
global_total_traffic_reuse_with_read_ratio = 0
global_total_traffic_reuse_with_read_in_bytes = 0
global_total_traffic_reuse_with_read_in_size = 0
else:
global_total_traffic_reuse_no_read_ratio = max((global_total_traffic/global_memory_total_bytes)-2, 0)
global_total_traffic_reuse_no_read_in_bytes = sum(global_traffic_reuse_no_read_in_bytes)
global_total_traffic_reuse_no_read_in_size = sum(global_traffic_reuse_no_read_in_size)
global_total_traffic_reuse_with_read_ratio = max((global_total_traffic/global_memory_total_bytes)-1, 0)
global_total_traffic_reuse_with_read_in_bytes = sum(global_traffic_reuse_with_read_in_bytes)
global_total_traffic_reuse_with_read_in_size = sum(global_traffic_reuse_with_read_in_size)
# per cluster start
# get traffic reuse
local_traffic_reuse_no_read_in_bytes_per_cluster = {}
local_traffic_reuse_no_read_in_size_per_cluster = {}
local_traffic_reuse_with_read_ratio_per_cluster = {}
local_traffic_reuse_with_read_in_bytes_per_cluster = {}
local_traffic_reuse_with_read_in_size_per_cluster = {}
for bus in buses:
mems = [blk for blk in bus.get_neighs() if blk.subtype == "sram"]
local_traffic_reuse_no_read_in_bytes_per_cluster[bus] = 0
local_traffic_reuse_no_read_in_size_per_cluster[bus] = 0
local_traffic_reuse_with_read_in_bytes_per_cluster[bus] = 0
local_traffic_reuse_with_read_in_size_per_cluster[bus] = 0
for mem in mems:
local_traffic = 0
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in block_s_krnels:
local_traffic += krnl.calc_traffic_per_block(mem)
mem_bytes = max(mem.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes) # to make sure we don't go smaller than cacti's minimum size
#mem_bytes_modulo = (math.ceil(mem_bytes/config.min_mem_size[mem.subtype]))*config.min_mem_size[mem.subtype] # modulo calculation
mem_size = mem.get_area()
reuse_ratio_no_read_per_cluster = max((local_traffic/mem_bytes)-2, 0)
local_traffic_reuse_no_read_in_bytes_per_cluster[bus]+= (reuse_ratio_no_read_per_cluster*mem_bytes)
local_traffic_reuse_no_read_in_size_per_cluster[bus]+=(reuse_ratio_no_read_per_cluster*mem_size)
reuse_ratio_with_read_per_cluster = max((local_traffic/mem_bytes)-1, 0)
local_traffic_reuse_with_read_in_bytes_per_cluster[bus] += (reuse_ratio_with_read_per_cluster*mem_bytes)
local_traffic_reuse_with_read_in_size_per_cluster[bus] += (reuse_ratio_with_read_per_cluster*mem_size)
local_total_traffic_reuse_no_read_in_size_per_cluster_avg = st.mean(list(local_traffic_reuse_no_read_in_size_per_cluster.values()))
local_total_traffic_reuse_with_read_in_size_per_cluster_avg = st.mean(list(local_traffic_reuse_with_read_in_size_per_cluster.values()))
local_total_traffic_reuse_no_read_in_bytes_per_cluster_avg = st.mean(list(local_traffic_reuse_no_read_in_bytes_per_cluster.values()))
local_total_traffic_reuse_with_read_in_bytes_per_cluster_avg = st.mean(list(local_traffic_reuse_with_read_in_bytes_per_cluster.values()))
if len(buses) in [0,1]:
local_total_traffic_reuse_no_read_in_size_per_cluster_std = 0
local_total_traffic_reuse_with_read_in_size_per_cluster_std = 0
local_total_traffic_reuse_no_read_in_bytes_per_cluster_std = 0
local_total_traffic_reuse_with_read_in_bytes_per_cluster_std = 0
local_total_traffic_reuse_no_read_in_size_per_cluster_var = 0
local_total_traffic_reuse_with_read_in_size_per_cluster_var = 0
local_total_traffic_reuse_no_read_in_bytes_per_cluster_var = 0
local_total_traffic_reuse_with_read_in_bytes_per_cluster_var = 0
else:
local_total_traffic_reuse_no_read_in_size_per_cluster_std = st.stdev(
list(local_traffic_reuse_no_read_in_size_per_cluster.values()))
local_total_traffic_reuse_with_read_in_size_per_cluster_std = st.stdev(
list(local_traffic_reuse_with_read_in_size_per_cluster.values()))
local_total_traffic_reuse_no_read_in_bytes_per_cluster_std = st.stdev(
list(local_traffic_reuse_no_read_in_bytes_per_cluster.values()))
local_total_traffic_reuse_with_read_in_bytes_per_cluster_std = st.stdev(
list(local_traffic_reuse_with_read_in_bytes_per_cluster.values()))
local_total_traffic_reuse_no_read_in_size_per_cluster_var = st.stdev(list(local_traffic_reuse_no_read_in_size_per_cluster.values()))/max(st.mean(list(local_traffic_reuse_no_read_in_size_per_cluster.values())),.000001)
local_total_traffic_reuse_with_read_in_size_per_cluster_var = st.stdev(list(local_traffic_reuse_with_read_in_size_per_cluster.values()))/max(st.mean(list(local_traffic_reuse_with_read_in_size_per_cluster.values())),.0000001)
local_total_traffic_reuse_no_read_in_bytes_per_cluster_var = st.stdev(list(local_traffic_reuse_no_read_in_bytes_per_cluster.values()))/max(st.mean(list(local_traffic_reuse_no_read_in_bytes_per_cluster.values())),.000000001)
local_total_traffic_reuse_with_read_in_bytes_per_cluster_var = st.stdev(list(local_traffic_reuse_with_read_in_bytes_per_cluster.values()))/max(st.mean(list(local_traffic_reuse_with_read_in_bytes_per_cluster.values())),.00000001)
# per cluseter end
locality_in_bytes = 0
for krnl in self.__kernels:
pe = [blk for blk in krnl.get_blocks() if blk.type == "pe"][0]
mems = [blk for blk in krnl.get_blocks() if blk.type == "mem"]
for mem in mems:
path_length = len(self.dp_rep.get_hardware_graph().get_path_between_two_vertecies(pe, mem))
locality_in_bytes += krnl.calc_traffic_per_block(mem)/(path_length-2)
"""
#parallelism data
for mem in local_memories:
bal_traffic = 0
block_s_krnels = self.get_krnels_of_block(mem)
for krnl in blocks_krnels:
krnl.block_phase_read_dict[mem][self.phase_num] += read_work
"""
return {"local_total_traffic":local_total_traffic, "global_total_traffic":global_total_traffic,
"local_total_traffic_reuse_no_read_ratio": local_total_traffic_reuse_no_read_ratio, "global_total_traffic_reuse_no_read_ratio": global_total_traffic_reuse_no_read_ratio,
"local_total_traffic_reuse_no_read_in_bytes": local_total_traffic_reuse_no_read_in_bytes, "global_total_traffic_reuse_no_read_in_bytes": global_total_traffic_reuse_no_read_in_bytes,
"local_total_traffic_reuse_no_read_in_size": local_total_traffic_reuse_no_read_in_size, "global_total_traffic_reuse_no_read_in_size": global_total_traffic_reuse_no_read_in_size,
"local_total_traffic_reuse_with_read_ratio": local_total_traffic_reuse_with_read_ratio,
"global_total_traffic_reuse_with_read_ratio": global_total_traffic_reuse_with_read_ratio,
"local_total_traffic_reuse_with_read_in_bytes": local_total_traffic_reuse_with_read_in_bytes,
"global_total_traffic_reuse_with_read_in_bytes": global_total_traffic_reuse_with_read_in_bytes,
"local_total_traffic_reuse_with_read_in_size": local_total_traffic_reuse_with_read_in_size,
"global_total_traffic_reuse_with_read_in_size": global_total_traffic_reuse_with_read_in_size,
"local_total_traffic_reuse_no_read_in_bytes_per_cluster_avg": local_total_traffic_reuse_no_read_in_bytes_per_cluster_avg,
"local_total_traffic_reuse_no_read_in_bytes_per_cluster_std": local_total_traffic_reuse_no_read_in_bytes_per_cluster_std,
"local_total_traffic_reuse_no_read_in_bytes_per_cluster_var": local_total_traffic_reuse_no_read_in_bytes_per_cluster_var,
"local_total_traffic_reuse_no_read_in_size_per_cluster_avg": local_total_traffic_reuse_no_read_in_size_per_cluster_avg,
"local_total_traffic_reuse_no_read_in_size_per_cluster_std": local_total_traffic_reuse_no_read_in_size_per_cluster_std,
"local_total_traffic_reuse_no_read_in_size_per_cluster_var": local_total_traffic_reuse_no_read_in_size_per_cluster_var,
"local_total_traffic_reuse_with_read_in_bytes_per_cluster_avg": local_total_traffic_reuse_with_read_in_bytes_per_cluster_avg,
"local_total_traffic_reuse_with_read_in_bytes_per_cluster_std": local_total_traffic_reuse_with_read_in_bytes_per_cluster_std,
"local_total_traffic_reuse_with_read_in_bytes_per_cluster_var": local_total_traffic_reuse_with_read_in_bytes_per_cluster_var,
"local_total_traffic_reuse_with_read_in_size_per_cluster_avg": local_total_traffic_reuse_with_read_in_size_per_cluster_avg,
"local_total_traffic_reuse_with_read_in_size_per_cluster_std": local_total_traffic_reuse_with_read_in_size_per_cluster_std,
"local_total_traffic_reuse_with_read_in_size_per_cluster_var": local_total_traffic_reuse_with_read_in_size_per_cluster_var,
"global_memory_avg_freq": global_memory_avg_freq,
"local_memory_avg_freq": local_memory_avg_freq,
"local_memory_freq_coeff_var": local_memory_freq_coeff_var, "local_memory_freq_std": local_memory_freq_std,
"global_memory_avg_bus_width": global_memory_avg_bus_width,
"local_memory_avg_bus_width": local_memory_avg_bus_width,
"local_memory_bus_width_coeff_var": local_memory_bus_width_coeff_var,
"local_memory_bus_width_std": local_memory_bus_width_std,
"global_memory_total_area": global_memory_total_area,
"local_memory_total_area":local_memory_total_area,
"local_memory_area_coeff_var": local_memory_area_coeff_var, "local_memory_area_std": local_memory_area_std,
"global_memory_total_bytes": global_memory_total_bytes,
"local_memory_total_bytes": local_memory_total_bytes,
"local_memory_bytes_avg": local_memory_bytes_avg,
"local_memory_bytes_coeff_var": local_memory_bytes_coeff_var, "local_memory_bytes_std": local_memory_bytes_std,
"memory_total_area":global_memory_total_area+local_memory_total_area,
"local_mem_cnt":len(local_memory_freqs),
"local_memory_traffic_per_mem_avg": local_traffic_per_mem_avg,
"local_memory_traffic_per_mem_std": local_traffic_per_mem_coeff_var,
"local_memory_traffic_per_mem_coeff_var": local_traffic_per_mem_coeff_var,
"local_bus_traffic_avg": local_bus_traffic_avg,
"local_bus_traffic_std": local_bus_traffic_std,
"local_bus_traffic_coeff_var": local_bus_traffic_coeff_var,
"locality_in_bytes": locality_in_bytes
}
def get_krnels_of_block(self, block):
block_s_tasks = block.get_tasks_of_block()
block_s_krnels = []
# get krnels of block
for task in block_s_tasks:
for krnl in self.__kernels:
if krnl.get_task_name() == task.get_name():
block_s_krnels.append(krnl)
return block_s_krnels
def get_krnels_of_block_clustered_by_workload(self, block):
workload_kernels = {}
block_s_tasks = block.get_tasks_of_block()
block_s_krnels = []
# get krnels of block
for task in block_s_tasks:
for krnl in self.__kernels:
if krnl.get_task_name() == task.get_name():
workload = self.database.db_input.task_workload[krnl.get_task_name()],
if workload not in workload_kernels.keys():
workload_kernels[workload] =[]
workload_kernels[workload].append(krnl)
return workload_kernels
def get_bus_system_attr(self):
bus_system_attr = {}
# in reality there is only one system bus
for el, val in self.infer_system_bus_attr().items():
bus_system_attr[el] = val
for el, val in self.infer_local_buses_attr().items():
bus_system_attr[el] = val
return bus_system_attr
def infer_system_bus_attr(self):
# has to get the max, as for now, system bus is infered and not imposed
highest_freq = 0
highest_width = 0
system_mems = []
for block in self.dp_rep.get_blocks():
if block.subtype == "dram":
system_mems.append(block)
system_mems_avg_work_rates = []
system_mems_max_work_rates = []
for mem in system_mems:
block_work_phase = {}
phase_write_work_rate = {}
phase_read_work_rate = {}
krnls_of_block = self.get_krnels_of_block(mem)
for krnl in krnls_of_block:
for phase, work in krnl.block_phase_write_dict[mem].items():
if phase not in phase_write_work_rate.keys():
phase_write_work_rate[phase] = 0
if krnl.stats.phase_latency_dict[phase] == 0:
phase_write_work_rate[phase] += 0
else:
phase_write_work_rate[phase] += (work/krnl.stats.phase_latency_dict[phase])
for krnl in krnls_of_block:
for phase, work in krnl.block_phase_read_dict[mem].items():
if phase not in phase_read_work_rate.keys():
phase_read_work_rate[phase] = 0
if krnl.stats.phase_latency_dict[phase] == 0:
phase_read_work_rate[phase] += 0
else:
phase_read_work_rate[phase] += (work/krnl.stats.phase_latency_dict[phase])
avg_write_work_rate = sum(list(phase_write_work_rate.values()))/len(list(phase_write_work_rate.values()))
avg_read_work_rate = sum(list(phase_read_work_rate.values()))/len(list(phase_read_work_rate.values()))
max_write_work_rate = max(list(phase_write_work_rate.values()))
max_read_work_rate = max(list(phase_read_work_rate.values()))
system_mems_avg_work_rates.append(max(avg_read_work_rate, avg_write_work_rate))
system_mems_max_work_rates.append(max(max_write_work_rate, max_read_work_rate))
# there might be no system bus at the moment
if len(system_mems) == 0:
count = 0
system_mem_theoretical_bandwidth = 0
highest_width = 0
highest_freq= 0
system_mem_avg_work_rate = system_mem_max_work_rate = 0
else:
count = 1
highest_width= max([system_mem.get_block_bus_width() for system_mem in system_mems])
highest_freq= max([system_mem.get_block_freq() for system_mem in system_mems])
system_mem_theoretical_bandwidth = highest_width*highest_freq
system_mem_avg_work_rate = sum(system_mems_avg_work_rates)/len(system_mems_avg_work_rates)
# averate of max
system_mem_max_work_rate = sum(system_mems_max_work_rates)/len(system_mems_max_work_rates)
return {"system_bus_count":count, "system_bus_avg_freq":highest_freq, "system_bus_avg_bus_width":highest_width,
"system_bus_avg_theoretical_bandwidth":system_mem_theoretical_bandwidth,
"system_bus_avg_actual_bandwidth": system_mem_avg_work_rate,
"system_bus_max_actual_bandwidth": system_mem_max_work_rate
}
def infer_if_is_a_local_bus(self, block):
if block.type == "ic":
block_ic_mem_neighs = [el for el in block.get_neighs() if el.type == "mem"]
block_ic_dram_mem_neighs = [el for el in block.get_neighs() if el.subtype == "dram"]
if not len(block_ic_mem_neighs) == len(block_ic_dram_mem_neighs):
return True
return False
# find the number buses that do not have dram connected to them.
# Note that it will be better if we have already set the system bus and not infereing it.
# TODO for later
def infer_local_buses_attr(self):
attr_val = {}
# get all the local buses
local_buses = []
for block in self.dp_rep.get_blocks():
if self.infer_if_is_a_local_bus(block):
local_buses.append(block)
# get all the frequenies
freq_list = []
for bus in local_buses:
freq_list.append(bus.get_block_freq())
# get all the bus widths
bus_width_list = []
for bus in local_buses:
bus_width_list.append(bus.get_block_bus_width())
bus_bandwidth_list = []
for bus in local_buses:
bus_bandwidth_list.append(bus.get_block_bus_width()*bus.get_block_freq())
local_buses_avg_work_rate_list = []
local_buses_max_work_rate_list = []
for bus in local_buses:
work_rate = []
for pipe_cluster in bus.get_pipe_clusters():
pathlet_phase_work_rate = pipe_cluster.get_pathlet_phase_work_rate()
for pathlet, phase_work_rate in pathlet_phase_work_rate.items():
if not pathlet.get_out_pipe().get_slave().subtype == "dram":
work_rate.extend(list(phase_work_rate.values()))
local_buses_avg_work_rate_list.append(sum(work_rate)/len(work_rate))
local_buses_max_work_rate_list.append(max(work_rate))
local_channels_avg_work_rate_list = []
local_channels_max_work_rate_list = []
for bus in local_buses:
for pipe_cluster in bus.get_pipe_clusters():
work_rate = []
pathlet_phase_work_rate = pipe_cluster.get_pathlet_phase_work_rate()
for pathlet, phase_work_rate in pathlet_phase_work_rate.items():
if not pathlet.get_out_pipe().get_slave().subtype == "dram":
work_rate.extend(list(phase_work_rate.values()))
if len(work_rate) == 0:
continue
local_channels_avg_work_rate_list.append(sum(work_rate)/max(len(work_rate),1))
local_channels_max_work_rate_list.append(max(work_rate))
local_channels_cnt_per_bus = {}
for bus in local_buses:
local_channels_cnt_per_bus[bus] =0
work_rate = []
for pipe_cluster in bus.get_pipe_clusters():
pathlet_phase_work_rate = pipe_cluster.get_pathlet_phase_work_rate()
for pathlet, phase_work_rate in pathlet_phase_work_rate.items():
if not pathlet.get_out_pipe().get_slave().subtype == "dram":
work_rate.extend(list(phase_work_rate.values()))
if len(work_rate) == 0:
continue
local_channels_cnt_per_bus[bus] +=1
attr_val["local_bus_count"] = len(local_buses)
if len(local_buses) == 0:
attr_val["avg_freq"] = 0
attr_val["local_bus_avg_freq"] = 0
attr_val["local_bus_avg_bus_width"] = 0
attr_val["local_bus_avg_theoretical_bandwidth"] = 0
attr_val["local_bus_avg_actual_bandwidth"] = 0
attr_val["local_bus_max_actual_bandwidth"] = 0
attr_val["local_bus_cnt"] = 0
attr_val["local_channel_avg_actual_bandwidth"] = 0
attr_val["local_channel_max_actual_bandwidth"] = 0
attr_val["local_channel_count_per_bus_avg"] = 0
else:
attr_val["avg_freq"] = sum(freq_list) / len(freq_list)
attr_val["local_bus_avg_freq"] = sum(freq_list) / len(freq_list)
attr_val["local_bus_avg_bus_width"] = sum(bus_width_list)/len(freq_list)
attr_val["local_bus_avg_theoretical_bandwidth"] = sum(bus_bandwidth_list)/len(bus_bandwidth_list)
attr_val["local_bus_avg_actual_bandwidth"] = sum(local_buses_avg_work_rate_list)/len(local_buses_avg_work_rate_list)
# getting average of max
attr_val["local_bus_max_actual_bandwidth"] = sum(local_buses_max_work_rate_list)/len(local_buses_max_work_rate_list)
attr_val["local_bus_cnt"] = len(bus_width_list)
attr_val["local_channel_avg_actual_bandwidth"] = st.mean(local_channels_avg_work_rate_list)
attr_val["local_channel_max_actual_bandwidth"] = st.mean(local_channels_max_work_rate_list)
attr_val["local_channel_count_per_bus_avg"] = st.mean(list(local_channels_cnt_per_bus.values()))
if len(local_buses) in [0,1]:
attr_val["local_bus_freq_std"] = 0
attr_val["local_bus_freq_coeff_var"] = 0
attr_val["local_bus_bus_width_std"] = 0
attr_val["local_bus_bus_width_coeff_var"] = 0
attr_val["local_bus_actual_bandwidth_std"] = 0
attr_val["local_bus_actual_bandwidth_coeff_var"] = 0
attr_val["local_channel_actual_bandwidth_std"] = 0
attr_val["local_channel_actual_bandwidth_coeff_var"] = 0
attr_val["local_channel_count_per_bus_std"] = 0
attr_val["local_channel_count_per_bus_coeff_var"] = 0
else:
attr_val["local_bus_freq_std"] = st.stdev(freq_list)
attr_val["local_bus_freq_coeff_var"] = st.stdev(freq_list)/st.mean(freq_list)
attr_val["local_bus_bus_width_std"] = st.stdev(bus_width_list)
attr_val["local_bus_bus_width_coeff_var"] = st.stdev(bus_width_list)/st.mean(bus_width_list)
attr_val["local_bus_actual_bandwidth_std"] = st.stdev(local_buses_avg_work_rate_list)
attr_val["local_bus_actual_bandwidth_coeff_var"] = st.stdev(local_buses_avg_work_rate_list)/st.mean(local_buses_avg_work_rate_list)
attr_val["local_channel_actual_bandwidth_std"] = st.stdev(local_channels_avg_work_rate_list)
attr_val["local_channel_actual_bandwidth_coeff_var"] = st.stdev(local_channels_avg_work_rate_list)/st.mean(local_channels_avg_work_rate_list)
attr_val["local_channel_count_per_bus_std"] = st.stdev(list(local_channels_cnt_per_bus.values()))
attr_val["local_channel_count_per_bus_coeff_var"] = st.stdev(list(local_channels_cnt_per_bus.values()))/st.mean(list(local_channels_cnt_per_bus.values()))
return attr_val
# iterate through all the design points and
# collect their stats
def collect_stats(self):
for type, id in self.dp_rep.get_designs_SOCs():
# level 1 reduction for intra design questions
self.intra_design_reduction(type, id)
# level 2 questions for across/inter design questions
self.inter_design_reduction(type, id)
# level 1 reduction for intra design questions
def intra_design_reduction(self, SOC_type, SOC_id):
kernel_latency_dict = {}
latency_list = []
kernel_metric_values = defaultdict(lambda: defaultdict(list))
for dp in self.sim_dp_container.design_point_list:
for kernel_ in dp.get_kernels():
for metric in config.all_metrics:
kernel_metric_values[kernel_.get_task_name()][metric].append\
(kernel_.stats.get_metric(metric))
for kernel in self.__kernels:
for metric in config.all_metrics:
kernel.stats.set_stats_directly(metric,
self.reduce(kernel_metric_values[kernel.get_task_name()][metric]))
def get_kernels(self):
return self.__kernels
# Functionality: level 2 questions for across/inter design questions
def inter_design_reduction(self, SOC_type, SOC_id):
for metric_name in config.all_metrics:
self.set_SOC_metric_value(metric_name, SOC_type, SOC_id)
self.set_system_complex_metric(metric_name) # data per System
# hot = longest latency
def get_hot_kernel_SOC(self, SOC_type, SOC_id, metric="latency", krnel_rank=0):
kernels_on_SOC = [kernel for kernel in self.__kernels if kernel.SOC_type == SOC_type and kernel.SOC_id == SOC_id]
for k in kernels_on_SOC:
if (k.stats.get_metric(metric) is None):
print("metric is " + metric)
sorted_kernels_hot_to_cold = sorted(kernels_on_SOC, key=lambda kernel: kernel.stats.get_metric(metric), reverse=True)
return sorted_kernels_hot_to_cold[krnel_rank]
# get the hot kernels if the system. Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
# Variables:
# knrle_rank: rank of the kernel to pick once we have already sorted the kernels
# based on hot ness. 0, means the hottest and higher values mean colder ones.
# We use this value to sometimes target less hot kernels if the hot kernel
# can not be improved any mot less hot kernels if the hot kernel
# can not be improved any more.
def get_hot_kernel_system_complex(self, metric="latency", krnel_rank=0):
hot_krnel_list = []
for SOC_type, SOC_id in self.get_designs_SOCs():
hot_krnel_list.append(self.get_hot_kernel_SOC(SOC_type, SOC_id, metric, krnel_rank))
return sorted(hot_krnel_list, key=lambda kernel: kernel.stats.get_metric(metric), reverse=True)[0]
# sort the blocks for a kernel based how much impact they have on a metric
def get_hot_block_of_krnel_sorted(self, krnl_task_name, metric="latency"):
# find the hottest kernel
#hot_krnel = self.get_hot_kernel_SOC(SOC_type, SOC_id, metric, krnel_rank)
krnel_of_interest = [krnel for krnel in self.__kernels if krnel.get_task_name() == krnl_task_name]
assert(len(krnel_of_interest) == 1), "can't have no krnel with this name or more than one"
krnl = krnel_of_interest[0]
# find the hot block accordingly
# TODO: this is not quit right since
# hot kernel of different designs might have different
# block bottlenecks, but here we just use the
# the block bottleneck of the representative design
# since self.__kernels are set to this designs kernels
kernel_blck_sorted : Block = krnl.stats.get_block_sorted(metric)
return kernel_blck_sorted
# -------------------------------------------
# Functionality:
# get the hot block among the blocks that a kernel resides in based how much impact they have on a metric.
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
# -------------------------------------------
def get_hot_block_of_krnel(self, krnl_task_name, metric="latency"):
# find the hottest kernel
#hot_krnel = self.get_hot_kernel_SOC(SOC_type, SOC_id, metric, krnel_rank)
krnel_of_interest = [krnel for krnel in self.__kernels if krnel.get_task_name() == krnl_task_name]
assert(len(krnel_of_interest) == 1), "can't have no krnel with this name or more than one"
krnl = krnel_of_interest[0]
# find the hot block accordingly
# TODO: this is not quit right since
# hot kernel of different designs might have different
# block bottlenecks, but here we just use the
# the block bottleneck of the representative design
# since self.__kernels are set to this designs kernels
kernel_blck_bottleneck: Block = krnl.stats.get_block_bottleneck(metric)
return kernel_blck_bottleneck
# -------------------------------------------
# Functionality:
# get the hot block among the blocks of the entire SOC based on the metric and kernel rank.
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
# Variables:
# krnel_rank: rank of the kernel to pick once we have already sorted the kernels
# based on hot ness. 0, means the hottest and higher values mean colder ones.
# We use this value to sometimes target less hot kernels if the hot kernel
# can not be improved any mot less hot kernels if the hot kernel
# can not be improved any more.
# -------------------------------------------
def get_hot_block_SOC(self, SOC_type, SOC_id, metric="latency", krnel_rank=0):
# find the hottest kernel
hot_krnel = self.get_hot_kernel_SOC(SOC_type, SOC_id, metric, krnel_rank)
# find the hot block accordingly
# TODO: this is not quit right since
# hot kernel of different designs might have different
# block bottlenecks, but here we just use the
# the block bottleneck of the representative design
# since self.__kernels are set to this designs kernels
hot_kernel_blck_bottleneck:Block = hot_krnel.stats.get_block_bottleneck(metric)
return hot_kernel_blck_bottleneck
# corresponding block bottleneck. We need this since we make a copy of the the sim_dp,
# and hence, sim_dp and ex_dp won't be synced any more
#coress_hot_kernel_blck_bottleneck = self.find_cores_hot_kernel_blck_bottleneck(ex_dp, hot_kernel_blck_bottleneck)
#return cores_hot_kernel_blck_bottleneck
# -------------------------------------------
# Functionality:
# get the hot block among the blocks of the entire system complex based on the metric and kernel rank.
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
# Variables:
# krnel_rank: rank of the kernel to pick once we have already sorted the kernels
# based on hot ness. 0, means the hottest and higher values mean colder ones.
# We use this value to sometimes target less hot kernels if the hot kernel
# can not be improved any mot less hot kernels if the hot kernel
# can not be improved any more.
# -------------------------------------------
def get_hot_block_system_complex(self, metric="latency", krnel_rank=0):
hot_blck_list = []
for SOC_type, SOC_id in self.get_designs_SOCs():
hot_blck_list.append(self.get_hot_block_SOC(SOC_type, SOC_id, metric, krnel_rank))
return sorted(hot_blck_list, key=lambda blck: blck.get_metric(metric), reverse=True)[0]
# -------------------------------------------
# Functionality:
# calculating the metric (power,performance,area) value
# Variables:
# metric_type: which metric to calculate for
# SOC_type: type of the SOC, since we can accept multi SOC designs
# SOC_id: id of the SOC to target
# -------------------------------------------
def calc_SOC_metric_value(self, metric_type, SOC_type, SOC_id):
self.unreduced_results = []
# call dp_stats of each design and then reduce
for dp in self.sim_dp_container.design_point_list:
self.unreduced_results.append(dp.dp_stats.get_SOC_metric_value(metric_type, SOC_type, SOC_id))
return self.reduce(self.unreduced_results)
# -------------------------------------------
# Functionality:
# calculating the area value
# Variables:
# type: mem, ic, pe
# SOC_type: type of the SOC, since we can accept multi SOC designs
# SOC_id: id of the SOC to target
# -------------------------------------------
def calc_SOC_area_base_on_type(self, type_, SOC_type, SOC_id):
area_list = []
for dp in self.sim_dp_container.design_point_list:
area_list.append(dp.dp_stats.get_SOC_area_base_on_type(type_, SOC_type, SOC_id))
return self.reduce(area_list)
def calc_SOC_area_base_on_subtype(self, subtype_, SOC_type, SOC_id):
area_list = []
for dp in self.sim_dp_container.design_point_list:
area_list.append(dp.dp_stats.get_SOC_area_base_on_subtype(subtype_, SOC_type, SOC_id))
return self.reduce(area_list)
def set_SOC_metric_value(self,metric_type, SOC_type, SOC_id):
assert(metric_type in config.all_metrics), metric_type + " is not supported"
if metric_type == "area":
for block_type in ["mem", "ic", "pe"]:
self.SOC_area_dict[block_type][SOC_type][SOC_id] = self.calc_SOC_area_base_on_type(block_type, SOC_type, SOC_id)
for block_subtype in ["dram", "sram", "ic", "ip", "gpp"]:
self.SOC_area_subtype_dict[block_subtype][SOC_type][SOC_id] = self.calc_SOC_area_base_on_subtype(block_subtype, SOC_type, SOC_id)
self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_metric_value(metric_type, SOC_type, SOC_id)
def set_system_complex_metric(self, metric_type):
type_id_list = self.dp_rep.get_designs_SOCs()
# only corner case is for area as
# we want area specific even to the blocks
if (metric_type == "area"):
for block_type in ["pe", "mem", "ic"]:
for type_, id_ in type_id_list:
self.system_complex_area_dict[block_type] = sum([self.get_SOC_area_base_on_type(block_type, type_, id_)
for type_, id_ in type_id_list])
self.system_complex_area_dram_non_dram["non_dram"] = 0
for block_subtype in ["sram", "ic", "gpp", "ip"]:
for type_, id_ in type_id_list:
self.system_complex_area_dram_non_dram["non_dram"] += sum([self.get_SOC_area_base_on_subtype(block_subtype, type_, id_)
for type_, id_ in type_id_list])
for block_subtype in ["dram"]:
for type_, id_ in type_id_list:
self.system_complex_area_dram_non_dram["dram"] = sum([self.get_SOC_area_base_on_subtype(block_subtype, type_, id_)
for type_, id_ in type_id_list])
if metric_type in ["area", "energy", "cost"]:
self.system_complex_metric_dict[metric_type] = sum([self.get_SOC_metric_value(metric_type, type_, id_)
for type_, id_ in type_id_list])
elif metric_type in ["latency"]:
self.system_complex_metric_dict[metric_type] = self.operate_on_dicionary_values([self.get_SOC_metric_value(metric_type, type_, id_)
for type_, id_ in type_id_list], operator.add)
elif metric_type in ["power"]:
self.system_complex_metric_dict[metric_type] = max([self.get_SOC_metric_value(metric_type, type_, id_)
for type_, id_ in type_id_list])
else:
raise Exception("metric_type:" + metric_type + " is not supported")
# ------------------------
# getters
# ------------------------
# sort kernels. At the moment we just sort based on latency.
def get_kernels_sort(self):
def get_kernels_sort(self):
sorted_kernels_hot_to_cold = sorted(self.__kernels, key=lambda kernel: kernel.stats.latency, reverse=True)
return sorted_kernels_hot_to_cold
# return the metric of interest for the SOC. metric_type is the metric you are interested in
def get_SOC_metric_value(self, metric_type, SOC_type, SOC_id):
return self.SOC_metric_dict[metric_type][SOC_type][SOC_id]
def get_SOC_area_base_on_type(self, block_type, SOC_type, SOC_id):
assert(block_type in ["pe", "ic", "mem"]), "block_type" + block_type + " is not supported"
return self.SOC_area_dict[block_type][SOC_type][SOC_id]
def get_SOC_area_base_on_subtype(self, block_subtype, SOC_type, SOC_id):
assert(block_subtype in ["dram", "sram", "gpp", "ip", "ic"]), "block_subtype" + block_subtype + " is not supported"
if block_subtype not in self.SOC_area_subtype_dict.keys(): # this element does not exist
return 0
return self.SOC_area_subtype_dict[block_subtype][SOC_type][SOC_id]
# return the metric of interest for the system complex. metric_type is the metric you are interested in.
# Note that system complex can contain multiple SOCs.
def get_system_complex_metric(self, metric_type):
return self.system_complex_metric_dict[metric_type]
def get_system_complex_area_stacked_dram(self):
return self.system_complex_area_dram_non_dram
# get system_complex area. type_ is selected from ("pe", "mem", "ic")
def get_system_complex_area_base_on_type(self, type_):
return self.system_complex_area_type[type_]
def get_designs_SOCs(self):
return self.dp_rep.get_designs_SOCs()
# check if dp_rep is meeting the budget
def workload_fits_budget_for_metric(self, workload, metric_name, budget_coeff):
for type, id in self.dp_rep.get_designs_SOCs():
if not all(self.fits_budget_for_metric_and_workload(type, id, metric_name, workload, 1)):
return False
return True
# check if dp_rep is meeting the budget
def workload_fits_budget(self, workload, budget_coeff):
for type, id in self.dp_rep.get_designs_SOCs():
for metric_name in self.database.get_budgetted_metric_names(type):
if not all(self.fits_budget_for_metric_and_workload(type, id, metric_name, workload, 1)):
return False
return True
# check if dp_rep is meeting the budget
def fits_budget(self, budget_coeff):
for type, id in self.dp_rep.get_designs_SOCs():
for metric_name in self.database.get_budgetted_metric_names(type):
if not all(self.fits_budget_for_metric(type, id, metric_name, 1)):
return False
return True
def fits_budget_for_metric_for_SOC(self, metric_name, budget_coeff):
for type, id in self.dp_rep.get_designs_SOCs():
if not all(self.fits_budget_for_metric(type, id, metric_name, 1)):
return False
return True
# returns a list of values
def fits_budget_for_metric_and_workload(self, type, id, metric_name, workload, budget_coeff):
dist = self.normalized_distance_for_workload(type, id, metric_name, workload)
if not isinstance(dist, list):
dist = [dist]
return [dist_el<.001 for dist_el in dist]
# returns a list of values
def fits_budget_for_metric(self, type, id, metric_name, budget_coeff):
dist = self.normalized_distance(type, id, metric_name)
if not isinstance(dist, list):
dist = [dist]
return [dist_el<.001 for dist_el in dist]
def normalized_distance_for_workload(self, type, id, metric_name, dampening_coeff=1):
if config.dram_stacked:
return self.normalized_distance_for_workload_for_stacked_dram(type, id, metric_name, dampening_coeff)
else:
return self.normalized_distance_for_workload_for_non_stacked_dram(type, id, metric_name, dampening_coeff)
# normalized the metric to the budget
def normalized_distance_for_workload_for_non_stacked_dram(self, type, id, metric_name, workload, dampening_coeff=1):
metric_val = self.get_SOC_metric_value(metric_name, type, id)
if isinstance(metric_val, dict):
value_list= []
for workload_, val in metric_val.items():
if not (workload == workload_):
continue
dict_ = self.database.get_ideal_metric_value(metric_name, type)
value_list.append((val - dict_[workload])/(dampening_coeff*dict_[workload]))
return value_list
else:
return [(metric_val - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))]
def normalized_distance_for_workload_for_stacked_dram(self, type, id, metric_name, workload, dampening_coeff=1):
metric_val = self.get_SOC_metric_value(metric_name, type, id)
if metric_name == 'latency':
value_list= []
for workload_, val in metric_val.items():
if not (workload == workload_):
continue
dict_ = self.database.get_ideal_metric_value(metric_name, type)
value_list.append((val - dict_[workload_])/(dampening_coeff*dict_[workload_]))
return value_list
elif metric_name == "area":
# get area aggregation of all the SOC minus dram and normalize it
subtypes_no_dram = ["gpp", "ip", "ic", "sram"]
area_no_dram = 0
for el in subtypes_no_dram:
area_no_dram += self.get_SOC_area_base_on_subtype(el, type, id)
area_no_dram_norm = (area_no_dram - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))
# get dram area and normalize it
area_dram = self.get_SOC_area_base_on_subtype("dram", type, id)
area_dram_norm = [(area_dram - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))]
return [area_no_dram_norm, area_dram]
else:
return [(metric_val - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))]
def normalized_distance(self, type, id, metric_name, dampening_coeff=1):
if config.dram_stacked:
return self.normalized_distance_for_stacked_dram(type, id, metric_name, dampening_coeff)
else:
return self.normalized_distance_for_non_stacked_dram(type, id, metric_name, dampening_coeff)
# normalized the metric to the budget
def normalized_distance_for_non_stacked_dram(self, type, id, metric_name, dampening_coeff=1):
metric_val = self.get_SOC_metric_value(metric_name, type, id)
if isinstance(metric_val, dict):
value_list= []
for workload, val in metric_val.items():
dict_ = self.database.get_ideal_metric_value(metric_name, type)
value_list.append((val - dict_[workload])/(dampening_coeff*dict_[workload]))
return value_list
else:
return [(metric_val - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))]
def normalized_distance_for_stacked_dram(self, type, id, metric_name, dampening_coeff=1):
metric_val = self.get_SOC_metric_value(metric_name, type, id)
if metric_name == 'latency':
value_list= []
for workload, val in metric_val.items():
dict_ = self.database.get_ideal_metric_value(metric_name, type)
value_list.append((val - dict_[workload])/(dampening_coeff*dict_[workload]))
return value_list
elif metric_name == "area":
# get area aggregation of all the SOC minus dram and normalize it
subtypes_no_dram = ["gpp", "ip", "ic", "sram"]
area_no_dram = 0
for el in subtypes_no_dram:
area_no_dram += self.get_SOC_area_base_on_subtype(el, type, id)
area_no_dram_norm = (area_no_dram - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))
# get dram area and normalize it
area_dram = self.get_SOC_area_base_on_subtype("dram", type, id)
area_dram_norm = (area_dram - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))
return [area_no_dram_norm, area_dram_norm]
else:
return [(metric_val - self.database.get_ideal_metric_value(metric_name, type))/ (dampening_coeff*self.database.get_ideal_metric_value(metric_name, type))]
# normalized to the budget
def dist_to_goal_per_metric(self, metric_name, mode):
dist_list = []
for type, id in self.dp_rep.get_designs_SOCs():
meet_the_budgets = self.fits_budget_for_metric(type, id, metric_name, 1)
for idx, meet_the_budget in enumerate(meet_the_budgets):
if meet_the_budget:
if mode == "eliminate":
dist_list.append(0.000000001)
elif mode == "dampen" and meet_the_budget:
norm_dist = [math.fabs(el) for el in
self.normalized_distance(type, id, metric_name, config.annealing_dampening_coef)]
dist_list.append(math.fabs(norm_dist[idx]))
elif mode == "simple":
norm_dist = [math.fabs(el) for el in
self.normalized_distance(type, id, metric_name, 1)]
dist_list.append(math.fabs(norm_dist[idx]))
else:
print("mode: " + mode + " is not defined for dist_to_goal_per_metric")
exit(0)
else:
norm_dist = [math.fabs(el) for el in
self.normalized_distance(type, id, metric_name, 1)]
dist_list.append(math.fabs(norm_dist[idx]))
city_dist = sum(dist_list)
return city_dist
# check if dp_rep is meeting the budget
# modes: {"simple", "eliminate", "dampen"}.
# Simple: just calculates the city distance
# eliminates: eliminate the metric that has already met the budget
# dampen: dampens the impact of the metric that has already met the budget
def dist_to_goal(self, metrics_to_look_into=["all"], mode="simple"): # mode simple, just calculate
if metrics_to_look_into == ["all"]:
metrics_to_look_into = self.database.get_budgetted_metric_names_all_SOCs() + self.database.get_other_metric_names_all_SOCs() # which metrics to use for distance calculation
dist_list = []
for metric_name in metrics_to_look_into:
dist_list.append(self.dist_to_goal_per_metric(metric_name, mode))
city_dist = sum(dist_list) # we use city distance to allow for probability prioritizing
return city_dist
# todo: change, right now it only uses the reduce value
def __lt__(self, other):
comp_list = []
for metric in config.objectives:
comp_list.append(self.get_system_complex_metric(metric) < other.get_system_complex_metric(metric))
return all(comp_list)
# todo: change, right now it only uses the reduce value
def __gt__(self, other):
comp_list = []
for metric in config.objectives:
comp_list.append(self.get_system_complex_metric(metric) > other.get_system_complex_metric(metric))
return all(comp_list)
# This module emulates the simulated design point.
# It contains the information for the simulation of a design point
class SimDesignPoint(ExDesignPoint):
def __init__(self, hardware_graph, workload_to_hardware_map=[], workload_to_hardware_schedule=[]):
# primitive variables
self.__workload_to_hardware_map:WorkloadToHardwareMap = None
self.__workload_to_hardware_schedule:WorkloadToPEBlockSchedule = None
self.hardware_graph = hardware_graph # contains all the hardware blocks + their topology (how they are connected)
self.__hardware, self.__workload, self.__kernels = [[]]*3
# bootstrap the design and it's stats
self.reset_design(workload_to_hardware_map, workload_to_hardware_schedule)
self.SOC_phase_energy_dict = defaultdict(dict) # energy associated with each phase
self.phase_latency_dict = {} # duration (time) for each phase.
self.dp_stats = None # design point statistics
self.block_phase_work_dict = {} # work done by the block as the system goes through different phases
self.block_phase_utilization_dict = {} # utilization done by the block as the system goes through different phases
self.pipe_cluster_path_phase_work_rate_dict = {}
self.parallel_kernels = {}
self.krnl_phase_present = {}
self.krnl_phase_present_operating_state = {}
self.phase_krnl_present = {}
self.iteration_number = 0 # the iteration which the simulation is done
self.population_observed_number = 0
self.population_generated_number = 0
self.depth_number = 0 # the depth (within on iteration) which the simulation is done
self.simulation_time = 0 # how long did it take to do the simulation
self.serial_design_time = 0
self.par_speedup_time = 0
if config.use_cacti:
self.cacti_hndlr = cact_handlr.CactiHndlr(config.cact_bin_addr, config.cacti_param_addr,
config.cacti_data_log_file, config.cacti_input_col_order,
config.cacti_output_col_order)
for block in self.get_blocks():
self.block_phase_work_dict[block] = {}
self.block_phase_utilization_dict[block] = {}
def set_serial_design_time(self, serial_design_time):
self.serial_design_time = serial_design_time
def get_serial_design_time(self):
return self.serial_design_time
def set_par_speedup(self, speedup):
self.par_speedup_time = speedup
def get_par_speedup(self):
return self.par_speedup_time
def set_simulation_time(self, simulation_time):
self.simulation_time= simulation_time
def get_simulation_time(self):
return self.simulation_time
def set_population_generation_cnt(self, generation_cnt):
self.population_generation_cnt = generation_cnt
def set_total_iteration_cnt(self, total_iteration):
self.total_iteration_cnt = total_iteration
def set_population_observed_number(self, population_observed_number):
self.population_observed_number = population_observed_number
def set_population_generated_number(self, population_generated_number):
self.population_generated_number = population_generated_number
def set_depth_number(self, depth_number):
self.depth_number = depth_number
def get_depth_number(self):
return self.depth_number
def get_population_generation_cnt(self):
return self.population_generation_cnt
def get_total_iteration_cnt(self):
return self.total_iteration_cnt
def get_population_observed_number(self):
return self.population_observed_number
def get_population_generated_number(self):
return self.population_generated_number
def get_tasks_parallel_task_dynamically(self, task):
if task.is_task_dummy():
return []
krnl = self.get_kernel_by_task_name(task)
phases_present = self.krnl_phase_present[krnl]
parallel_krnls = []
for phase_ in phases_present:
parallel_krnls.extend(self.phase_krnl_present[phase_])
# get_rid_of duplicates
parallel_tasks = set([el.get_task_name() for el in set(parallel_krnls) if not(task.get_name() == el.get_task_name())])
return list(parallel_tasks)
def get_tasks_using_the_different_pipe_cluster(self, task, block):
task_pipe_clusters = block.get_pipe_clusters_of_task(task)
tasks_of_block = block.get_tasks_of_block()
results = []
for task_ in tasks_of_block:
if task == task_:
continue
task__pipe_clusters = block.get_pipe_clusters_of_task(task_)
if len(list(set(task_pipe_clusters) - set(task__pipe_clusters))) == len(task_pipe_clusters):
results.append(task_.get_name())
return results
# Log the BW data about all the connections it the system
def dump_mem_bus_connection_bw(self, result_folder): # batch means that all the blocks of similar type have similar props
file_name = "bus_mem_connection_max_bw.txt"
file_addr = os.path.join(result_folder, file_name)
buses = self.get_blocks_by_type("ic")
with open(file_addr, "a+") as output:
output.write("MasterInstance" +"," + "SlaveInstance" + ","+ "bus_bandwidth" + "," + "mode" + "\n")
for bus in buses:
connectd_pes = [block_ for block_ in bus.get_neighs() if block_.type =="pe" ] # PEs connected to bus
connectd_mems = [block_ for block_ in bus.get_neighs() if block_.type =="mem" ] # memories connected to the bus
connectd_ics = [block_ for block_ in bus.get_neighs() if block_.type =="ic"]
for ic in connectd_ics:
for mode in ["read", "write"]:
output.write(ic.instance_name + "," + bus.instance_name + "," +
str(ic.peak_work_rate) + "," + mode + "\n")
for pe in connectd_pes:
for mode in ["read", "write"]:
output.write(pe.instance_name + "," + bus.instance_name + "," +
str(bus.peak_work_rate) + "," + mode + "\n")
for mem in connectd_mems:
for mode in ["read", "write"]:
output.write(bus.instance_name + "," + mem.instance_name + ","+
str(mem.peak_work_rate) + "," + mode + "\n")
# -----------------------------------------
# -----------------------------------------
# CACTI handling functions
# -----------------------------------------
# -----------------------------------------
# Conversion of memory type (naming) from FARSI to CACTI
def FARSI_to_cacti_mem_type_converter(self, mem_subtype):
if mem_subtype == "dram":
return "main memory"
elif mem_subtype == "sram":
return "ram"
# Conversion of memory type (naming) from FARSI to CACTI
def FARSI_to_cacti_cell_type_converter(self, mem_subtype):
if mem_subtype == "dram":
#return "lp-dram"
return "comm-dram"
elif mem_subtype == "sram":
return "itrs-lop"
# run cacti to get results
def run_and_collect_cacti_data(self, blk, database):
tech_node = {}
tech_node["energy"] = 1
tech_node["area"] = 1
sw_hw_database_population = database.db_input.sw_hw_database_population
if "misc_knobs" in sw_hw_database_population.keys():
misc_knobs = sw_hw_database_population["misc_knobs"]
if "tech_node_SF" in misc_knobs.keys():
tech_node = misc_knobs["tech_node_SF"]
if not blk.type == "mem":
print("Only memory blocks supported in CACTI")
exit(0)
# prime cacti
mem_bytes = max(blk.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes)
subtype = blk.subtype
mem_bytes = (math.ceil(mem_bytes/config.min_mem_size[subtype]))*config.min_mem_size[subtype] # modulo calculation
#subtype = "sram" # TODO: change later to sram/dram
mem_subtype = self.FARSI_to_cacti_mem_type_converter(subtype)
cell_type = self.FARSI_to_cacti_cell_type_converter(subtype)
self.cacti_hndlr.set_cur_mem_type(mem_subtype)
self.cacti_hndlr.set_cur_mem_size(mem_bytes)
self.cacti_hndlr.set_cur_cell_type(cell_type)
# run cacti
try:
cacti_area_energy_results = self.cacti_hndlr.collect_cati_data()
except Exception as e:
print("Using cacti, the following memory config tried and failed")
print(self.cacti_hndlr.get_config())
raise e
read_energy_per_byte = float(cacti_area_energy_results['Dynamic read energy (nJ)']) * (10 ** -9) / 16
write_energy_per_byte = float(cacti_area_energy_results['Dynamic write energy (nJ)']) * (10 ** -9) / 16
area = float(cacti_area_energy_results['Area (mm2)']) * (10 ** -6)
read_energy_per_byte *= tech_node["energy"]["non_gpp"]
write_energy_per_byte *= tech_node["energy"]["non_gpp"]
area *= tech_node["area"]["mem"]
# log values
self.cacti_hndlr.cacti_data_container.insert(list(zip(config.cacti_input_col_order +
config.cacti_output_col_order,
[mem_subtype, mem_bytes, read_energy_per_byte, write_energy_per_byte, area])))
return read_energy_per_byte, write_energy_per_byte, area
# either run or look into the cached data (from CACTI) to get energy/area data
def collect_cacti_data(self, blk, database):
if blk.type == "ic" :
return 0,0,0,1
elif blk.type == "mem":
mem_bytes = max(blk.get_area_in_bytes(), config.cacti_min_memory_size_in_bytes) # to make sure we don't go smaller than cacti's minimum size
mem_subtype = self.FARSI_to_cacti_mem_type_converter(blk.subtype)
mem_bytes = (math.ceil(mem_bytes / config.min_mem_size[blk.subtype])) * config.min_mem_size[blk.subtype] # modulo calculation
#mem_subtype = "ram" #choose from ["main memory", "ram"]
found_results, read_energy_per_byte, write_energy_per_byte, area = \
self.cacti_hndlr.cacti_data_container.find(list(zip(config.cacti_input_col_order,[mem_subtype, mem_bytes])))
if not found_results:
read_energy_per_byte, write_energy_per_byte, area = self.run_and_collect_cacti_data(blk, database)
#read_energy_per_byte *= tech_node["energy"]
#write_energy_per_byte *= tech_node["energy"]
#area *= tech_node["area"]
area_per_byte = area/mem_bytes
return read_energy_per_byte, write_energy_per_byte, area, area_per_byte
# For each kernel, update the energy and power using cacti
def cacti_update_energy_area_of_kernel(self, krnl, database):
# iterate through block/phases, collect data and insert them up
blk_area_dict = {}
for blk, phase_metric in krnl.block_phase_energy_dict.items():
# only for memory and ic
if blk.type not in ["mem", "ic"]:
blk_area_dict[blk] = krnl.stats.get_block_area()[blk]
continue
read_energy_per_byte, write_energy_per_byte, area, area_per_byte = self.collect_cacti_data(blk, database)
for phase, metric in phase_metric.items():
krnl.block_phase_energy_dict[blk][phase] = krnl.block_phase_read_dict[blk][
phase] * read_energy_per_byte
krnl.block_phase_energy_dict[blk][phase] += krnl.block_phase_write_dict[blk][
phase] * write_energy_per_byte
krnl.block_phase_area_dict[blk][phase] = area
blk_area_dict[blk] = area
# apply aggregates, which is iterate through every phase, scratch their values, and aggregates all the block energies
# areas.
krnl.stats.phase_energy_dict = krnl.aggregate_energy_of_for_every_phase()
krnl.stats.phase_area_dict = krnl.aggregate_area_of_for_every_phase()
"""
# for debugging; delete later
for el in krnl.stats.get_block_area().keys():
if el not in blk_area_dict.keys():
print(" for debugging now delete later")
exit(0)
"""
krnl.stats.set_block_area(blk_area_dict)
krnl.stats.set_stats() # do not call it on set_stats directly, as it repopoluates without cacti
return "_"
# For each block, get energy area
# at the moment, only setting up area. TODO: check whether energy matters
def cacti_update_area_of_block(self, block, database):
if block.type not in ["mem", "ic"]:
return
read_energy_per_byte, write_energy_per_byte, area, area_per_byte = self.collect_cacti_data(block, database)
block.set_area_directly(area)
#block.update_area_energy_power_rate(energy_per_byte, area_per_byte)
# update the design energy (after you have already updated the kernels energy)
def cacti_update_energy_area_of_design(self):
# resetting all first
for soc, phase_value in self.SOC_phase_energy_dict.items():
for phase, value in self.SOC_phase_energy_dict[soc].items():
self.SOC_phase_energy_dict[soc][phase] = 0
# iterate through SOCs and update
for soc, phase_value in self.SOC_phase_energy_dict.items():
for phase, value in self.SOC_phase_energy_dict[soc].items():
SOC_type = soc[0]
SOC_id = soc[1]
for kernel in self.get_kernels():
if kernel.SOC_type == SOC_type and kernel.SOC_id == SOC_id:
if phase in kernel.stats.phase_energy_dict.keys():
self.SOC_phase_energy_dict[(SOC_type, SOC_id)][phase] += kernel.stats.phase_energy_dict[phase]
def correct_power_area_with_cacti(self, database):
# bellow dictionaries used for debugging purposes. You can delete them later
krnl_ratio_phase = {} # for debugging delete later
# update in 3 stages
# (1) fix kernel energy first
for krnl in self.__kernels:
krnl_ratio_phase[krnl] = self.cacti_update_energy_area_of_kernel(krnl, database)
# (2) fix the block's area
for block in self.get_blocks():
self.cacti_update_area_of_block(block, database)
# (3) update/fix the entire design accordingly
self.cacti_update_energy_area_of_design()
def get_hardware_graph(self):
return self.hardware_graph
# collect (profile) design points stats.
def collect_dp_stats(self, database):
self.dp_stats = DPStats(self, database)
def get_designs_SOCs(self):
blocks = self.get_workload_to_hardware_map().get_blocks()
designs_SOCs = []
for block in blocks:
if (block.SOC_type, block.SOC_id) not in designs_SOCs:
designs_SOCs.append((block.SOC_type, block.SOC_id))
return designs_SOCs
# This is used for idle power calculations
def get_blocks(self):
blocks = self.get_workload_to_hardware_map().get_blocks()
return blocks
# It is a wrapper around reset_design that includes all the necessary work to clear the stats
# and start the simulation again (used for changing power-knobs)
def reset_design_wrapper(self):
# We do not want to lose this information! Since it is going to be the same
# and we do not have any other way to retain them
self.SOC_phase_energy_dict = defaultdict(dict)
self.phase_latency_dict = {}
self.dp_stats = None
# bootstrap the design from scratch
def reset_design(self, workload_to_hardware_map=[], workload_to_hardware_schedule=[]):
def update_kernels(self_):
self_.__kernels = []
for task_to_blocks_map in self.__workload_to_hardware_map.tasks_to_blocks_map_list:
task = task_to_blocks_map.task
self_.__kernels.append(Kernel(self_.__workload_to_hardware_map.get_by_task(task)))#,
# self_.__workload_to_hardware_schedule.get_by_task(task)))
if workload_to_hardware_map:
self.__workload_to_hardware_map = workload_to_hardware_map
if workload_to_hardware_schedule:
self.__workload_to_hardware_schedule = workload_to_hardware_schedule
update_kernels(self)
def get_workload_to_hardware_map(self):
return self.__workload_to_hardware_map
def get_workload_to_hardware_schedule(self):
return self.__workload_to_hardware_schedule
def get_kernels(self):
return self.__kernels
def get_kernel_by_task_name(self, task:Task):
return list(filter(lambda kernel: task.name == kernel.task_name, self.get_kernels()))[0]
def get_kernels(self):
return self.__kernels
def get_workload_to_hardware_map(self):
return self.__workload_to_hardware_map
# design point statistics (stats). This class contains the profiling information for a simulated design.
# Note that the difference between system complex and SOC is that a system complex can contain multiple SOCs.
class DPStats:
def __init__(self, sim_dp: SimDesignPoint, database):
self.comparison_mode = "latency" # metric to compare designs against one another
self.dp = sim_dp # simulated design point object
self.__kernels = self.dp.get_kernels() # design kernels
self.SOC_area_dict = defaultdict(lambda: defaultdict(dict)) # area of pes
self.SOC_area_subtype_dict = defaultdict(lambda: defaultdict(dict)) # area of pes
self.system_complex_area_dict = defaultdict() # system complex area values (for memory, PEs, buses)
self.power_duration_list = defaultdict(lambda: defaultdict(dict)) # power and duration of the power list
self.SOC_metric_dict = defaultdict(lambda: defaultdict(dict)) # dictionary containing various metrics for the SOC
self.system_complex_metric_dict = defaultdict(lambda: defaultdict(dict)) # dictionary containing the system complex metrics
self.database = database
self.pipe_cluster_pathlet_phase_work_rate_dict = {}
for pipe_cluster in self.dp.get_hardware_graph().get_pipe_clusters():
self.pipe_cluster_pathlet_phase_work_rate_dict[pipe_cluster] = pipe_cluster.get_pathlet_phase_work_rate()
self.pipe_cluster_pathlet_phase_latency_dict = {}
for pipe_cluster in self.dp.get_hardware_graph().get_pipe_clusters():
self.pipe_cluster_pathlet_phase_latency_dict[pipe_cluster] = pipe_cluster.get_pathlet_phase_latency()
use_slack_management_estimation = config.use_slack_management_estimation
# collect the data
self.collect_stats(use_slack_management_estimation)
# write the results into a file
def dump_stats(self, des_folder, mode="light_weight"):
file_name = config.verification_result_file
file_addr = os.path.join(des_folder, file_name)
for type, id in self.dp.get_designs_SOCs():
ic_count = len(self.dp.get_workload_to_hardware_map().get_blocks_by_type("ic"))
mem_count = len(self.dp.get_workload_to_hardware_map().get_blocks_by_type("mem"))
pe_count = len(self.dp.get_workload_to_hardware_map().get_blocks_by_type("pe"))
with open(file_addr, "w+") as output:
routing_complexity = self.dp.get_hardware_graph().get_routing_complexity()
simple_topology = self.dp.get_hardware_graph().get_simplified_topology_code()
blk_cnt = sum([int(el) for el in simple_topology.split("_")])
bus_cnt = [int(el) for el in simple_topology.split("_")][0]
mem_cnt = [int(el) for el in simple_topology.split("_")][1]
pe_cnt = [int(el) for el in simple_topology.split("_")][2]
task_cnt = len(list(self.dp.krnl_phase_present.keys()))
channel_cnt = self.dp.get_hardware_graph().get_number_of_channels()
output.write("{\n")
output.write("\"FARSI_predicted_latency\": "+ str(max(list(self.get_system_complex_metric("latency").values()))) +",\n")
output.write("\"FARSI_predicted_energy\": "+ str(self.get_system_complex_metric("energy")) +",\n")
output.write("\"FARSI_predicted_power\": "+ str(self.get_system_complex_metric("power")) +",\n")
output.write("\"FARSI_predicted_area\": "+ str(self.get_system_complex_metric("area")) +",\n")
#output.write("\"config_code\": "+ str(ic_count) + str(mem_count) + str(pe_count)+",\n")
#output.write("\"config_code\": "+ self.dp.get_hardware_graph().get_config_code() +",\n")
output.write("\"simplified_topology_code\": "+ self.dp.get_hardware_graph().get_simplified_topology_code() +",\n")
output.write("\"blk_cnt\": "+ str(blk_cnt) +",\n")
output.write("\"pe_cnt\": "+ str(pe_cnt) +",\n")
output.write("\"mem_cnt\": "+ str(mem_cnt) +",\n")
output.write("\"bus_cnt\": "+ str(bus_cnt) +",\n")
output.write("\"task_cnt\": "+ str(task_cnt) +",\n")
output.write("\"routing_complexity\": "+ str(routing_complexity) +",\n")
output.write("\"channel_cnt\": "+ str(channel_cnt) +",\n")
output.write("\"FARSI simulation time\": " + str(self.dp.get_simulation_time()) + ",\n")
# Function: profile the simulated design, collecting information about
# latency, power, area, and phasal behavior
# This is called within the constructor
def collect_stats(self, use_slack_management_estimation=False):
for type, id in self.dp.get_designs_SOCs():
for metric_name in config.all_metrics:
self.set_SOC_metric_value(metric_name, type, id) # data per SoC
self.set_system_complex_metric(metric_name) # data per System
# estimate the behavior if slack management applied
for type, id in self.dp.get_designs_SOCs():
if use_slack_management_estimation:
values_changed = self.apply_slack_management_estimation_improved(type, id)
if values_changed:
for type, id in self.dp.get_designs_SOCs():
for metric_name in config.all_metrics:
self.set_system_complex_metric(metric_name) # data per System
# Functionality:
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
def get_hot_kernel_SOC(self, SOC_type, SOC_id, metric="latency", krnel_rank=0):
kernels_on_SOC = [kernel for kernel in self.__kernels if kernel.SOC_type == SOC_type and kernel.SOC_id == SOC_id]
sorted_kernels_hot_to_cold = sorted(kernels_on_SOC, key=lambda kernel: kernel.stats.get_metric(metric), reverse=True)
return sorted_kernels_hot_to_cold[krnel_rank]
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
def get_hot_kernel_system_complex(self, metric="latency", krnel_rank=0):
hot_krnel_list = []
for SOC_type, SOC_id in self.get_designs_SOCs():
hot_krnel_list.append(self.get_hot_kernel_SOC(SOC_type, SOC_id, metric, krnel_rank))
return sorted(hot_krnel_list, key=lambda kernel: kernel.stats.get_metric(metric), reverse=True)[0]
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
def get_hot_block_SOC(self, SOC_type, SOC_id, metric="latency", krnel_rank=0):
# find the hottest kernel
hot_krnel = self.get_hot_kernel_SOC(SOC_type, SOC_id, metric, krnel_rank)
hot_kernel_blck_bottleneck:Block = hot_krnel.stats.get_block_bottleneck(metric)
# corresponding block bottleneck. We need this since we make a copy of the the sim_dp,
# and hence, sim_dp and ex_dp won't be synced any more
return hot_kernel_blck_bottleneck
# get hot blocks of the system
# Hot means the bottleneck or rather the
# most power/energy/area/performance consuming of the system. This is determined
# by the input argument metric.
# krnel_rank is the rank of the kernel to pick from once the kernels are sorted. 0 means the highest.
# This variable is used to unstuck the heuristic when necessary (e.g., when for example the hottest kernel
# modification is not helping the design, we move on to the second hottest)
def get_hot_block_system_complex(self, metric="latency", krnel_rank=0):
hot_blck_list = []
for SOC_type, SOC_id in self.get_designs_SOCs():
hot_blck_list.append(self.get_hot_block_SOC(SOC_type, SOC_id, metric, krnel_rank))
return sorted(hot_blck_list, key=lambda kernel: kernel.stats.get_metric(metric), reverse=True)[0]
# get kernels sorted based on latency
def get_kernels_sort(self):
sorted_kernels_hot_to_cold = sorted(self.__kernels, key=lambda kernel: kernel.stats.latency, reverse=True)
return sorted_kernels_hot_to_cold
# -----------------------------------------
# Calculate profiling information per SOC
# -----------------------------------------
def calc_SOC_latency(self, SOC_type, SOC_id):
kernels_on_SOC = [kernel for kernel in self.__kernels if kernel.SOC_type == SOC_type and kernel.SOC_id == SOC_id]
workload_latency_dict = {}
for workload, last_task in self.database.get_workloads_last_task().items():
kernel = self.dp.get_kernel_by_task_name(self.dp.get_task_by_name(last_task))
workload_latency_dict[workload] = kernel.get_completion_time() #kernel.stats.latency + kernel.starting_time
return workload_latency_dict
# calculate SOC energy
def calc_SOC_energy(self, SOC_type, SOC_id):
phase_total_energy = {}
return sum(list(self.dp.SOC_phase_energy_dict[(SOC_type, SOC_id)].values()))
# if estimate_slack_management_effect is set to true,
# we estimate what will happen if we can introduce slacks in order to reduce power
def calc_SOC_power(self, SOC_type, SOC_id, estimate_slack_management_effect=False):
self.power_duration_list[SOC_type][SOC_id] = []
sorted_listified_phase_latency_dict = sorted(self.dp.phase_latency_dict.items(), key=operator.itemgetter(0))
sorted_latencys = [latency for phase,latency in sorted_listified_phase_latency_dict]
sorted_phase_latency_dict = collections.OrderedDict(sorted_listified_phase_latency_dict)
# get the energy first
SOC_phase_energy_dict = self.dp.SOC_phase_energy_dict[(SOC_type, SOC_id)]
sorted_listified_phase_energy_dict = sorted(SOC_phase_energy_dict.items(), key=operator.itemgetter(0))
sorted_phase_energy_dict = collections.OrderedDict(sorted_listified_phase_energy_dict)
# convert to power by slicing the time with the smallest duration within which power should be
# calculated with (PWP)
phase_bounds_lists = slice_phases_with_PWP(sorted_phase_latency_dict)
power_list = [] # list of power values collected based on the power collection freq
power_duration_list = []
for lower_bnd, upper_bnd in phase_bounds_lists:
if sum(sorted_latencys[lower_bnd:upper_bnd])>0:
power_this_phase = sum(list(sorted_phase_energy_dict.values())[lower_bnd:upper_bnd])/sum(sorted_latencys[lower_bnd:upper_bnd])
power_list.append(power_this_phase)
self.power_duration_list[SOC_type][SOC_id].append((power_this_phase, sum(sorted_latencys[lower_bnd:upper_bnd])))
else:
power_list.append(0)
power_duration_list.append((0,0))
power = max(power_list)
return power
# estimate what happens if we can manage slack by optimizing kernel scheduling.
# note that this is just a estimation. Actually scheduling needs to be applied to get exact numbers.
# note that if we apply slack, the comparison with the higher fidelity simulation
# will be considerably hurt (since the higher fidelity simulation doesn't typically apply the slack
# management)
def apply_slack_management_estimation_improved(self, SOC_type, SOC_id):
power_duration_list = self.power_duration_list[SOC_type][SOC_id]
# relax power if possible
total_latency = sum([duration for power, duration in power_duration_list])
slack = self.database.get_budget("latency", "glass") - total_latency
power_duration_recalculated = copy.deepcopy(power_duration_list)
values_changed = False # indicating whether slack was used to modify any value
while slack > 0 and (self.fits_budget_per_metric(SOC_type, SOC_id, "latency", 1) and
not self.fits_budget_per_metric(SOC_type, SOC_id, "power", 1)):
power_duration_sorted = sorted(power_duration_recalculated, key=lambda x: x[0])
idx = power_duration_recalculated.index(power_duration_sorted[-1])
power, duration = power_duration_recalculated[idx]
slack_used = min(.0005, slack)
slack = slack - slack_used
duration_with_slack = duration + slack_used
power_duration_recalculated[idx] = ((power * duration) / duration_with_slack, duration_with_slack)
values_changed = True
power = max([power for power, duration in power_duration_recalculated])
self.SOC_metric_dict["power"][SOC_type][SOC_id] = power
self.SOC_metric_dict["latency"][SOC_type][SOC_id] = sum([duration for power, duration in power_duration_recalculated])
return values_changed
# get total area of an soc (type is not supported yet)
def calc_SOC_area_base_on_type(self, type_, SOC_type, SOC_id):
blocks = self.dp.get_workload_to_hardware_map().get_blocks()
total_area= sum([block.get_area() for block in blocks if block.SOC_type == SOC_type
and block.SOC_id == SOC_id and block.type == type_])
return total_area
# get total area of an soc (type is not supported yet)
def calc_SOC_area_base_on_subtype(self, subtype_, SOC_type, SOC_id):
blocks = self.dp.get_workload_to_hardware_map().get_blocks()
total_area = 0
for block in blocks:
if block.SOC_type == SOC_type and block.SOC_id == SOC_id and block.subtype == subtype_:
total_area += block.get_area()
return total_area
# get total area of an soc
# Variables:
# SOC_type:the type of SOC you need information for
# SOC_id: id of the SOC you are interested in
def calc_SOC_area(self, SOC_type, SOC_id):
blocks = self.dp.get_workload_to_hardware_map().get_blocks()
# note: we can't use phase_area_dict for this, since:
# 1. we double count the statically 2. if a memory is shared, we would be double counting it
total_area= sum([block.get_area() for block in blocks if block.SOC_type == SOC_type and block.SOC_id == SOC_id])
return total_area
# the cost model associated with a PE.
# This will help us calculate the financial cost
# of using a specific PE
def PE_cost_model(self, task_normalized_work, block_type, model_type="linear"):
if model_type == "linear":
return task_normalized_work*self.database.db_input.porting_effort[block_type]
else:
print("this cost model is not defined")
exit(0)
# the cost model associated with a MEM.
# This will help us calculate the financial cost
# of using a specific MEM
def MEM_cost_model(self, task_normalized_work, block, block_type, model_type="linear"):
if model_type == "linear":
return task_normalized_work*self.database.db_input.porting_effort[block_type]*block.get_num_of_banks()
else:
print("this cost model is not defined")
exit(0)
# the cost model associated with a IC.
# This will help us calculate the financial cost
# of using a specific IC.
def IC_cost_model(self, task_normalized_work, block, block_type, model_type="linear"):
if model_type == "linear":
return task_normalized_work*self.database.db_input.porting_effort[block_type]
else:
print("this cost model is not defined")
exit(0)
# calculate the development cost of an SOC
def calc_SOC_dev_cost(self, SOC_type, SOC_id):
blocks = self.dp.get_workload_to_hardware_map().get_blocks()
all_kernels = self.get_kernels_sort()
# find the simplest task's work (simple = task with the least amount of work)
krnl_work_list = [] #contains the list of works associated with different kernels (excluding dummy tasks)
for krnl in all_kernels:
krnl_task = krnl.get_task()
if not krnl_task.is_task_dummy():
krnl_work_list.append(krnl_task.get_self_task_work())
simplest_task_work = min(krnl_work_list)
num_of_tasks = len(all_kernels)
dev_cost = 0
# iterate through each block and add the cost
for block in blocks:
if block.type == "pe" :
# for IPs
if block.subtype == "ip":
tasks = block.get_tasks_of_block()
task_work = max([task.get_self_task_work() for task in tasks]) # use max incase multiple task are mapped
task_normalized_work = task_work/simplest_task_work
dev_cost += self.PE_cost_model(task_normalized_work, "ip")
# for GPPS
elif block.subtype == "gpp":
# for DSPs
if "G3" in block.instance_name:
for task in block.get_tasks_of_block():
task_work = task.get_self_task_work()
task_normalized_work = task_work/simplest_task_work
dev_cost += self.PE_cost_model(task_normalized_work, "dsp")
# for ARM
elif "A53" in block.instance_name or "ARM" in block.instance_name:
for task in block.get_tasks_of_block():
task_work = task.get_self_task_work()
task_normalized_work = task_work/simplest_task_work
dev_cost += self.PE_cost_model(task_normalized_work, "arm")
else:
print("cost model for this GPP is not defined")
exit(0)
elif block.type == "mem":
task_normalized_work = 1 # treat it as the simplest task work
dev_cost += self.MEM_cost_model(task_normalized_work, block, "mem")
elif block.type == "ic":
task_normalized_work = 1 # treat it as the simplest task work
dev_cost += self.IC_cost_model(task_normalized_work, block, "mem")
else:
print("cost model for ip" + block.instance_name + " is not defined")
exit(0)
pes = [blk for blk in blocks if blk.type == "pe"]
mems = [blk for blk in blocks if blk.type == "mem"]
for pe in pes:
pe_tasks = [el.get_name() for el in pe.get_tasks_of_block()]
for mem in mems:
mem_tasks = [el.get_name() for el in mem.get_tasks_of_block()]
task_share_cnt = len(pe_tasks) - len(list(set(pe_tasks) - set(mem_tasks)))
if task_share_cnt == 0: # this condition to avoid finding paths between vertecies, which is pretty comp intensive
continue
path_length = len(self.dp.get_hardware_graph().get_path_between_two_vertecies(pe, mem))
#path_length = len(self.dp.get_hardware_graph().get_shortest_path(pe, mem, [], []))
effort = self.database.db_input.porting_effort["ic"]/10
dev_cost += (path_length*task_share_cnt)*.1
return dev_cost
# pb_type: processing block type
def get_SOC_s_specific_area(self, SOC_type, SOC_id, pb_type):
assert(pb_type in ["pe", "ic", "mem"]) , "block type " + pb_type + " is not supported"
return self.SOC_area_dict[pb_type][SOC_type][SOC_id]
# --------
# setters
# --------
def set_SOC_metric_value(self,metric_type, SOC_type, SOC_id):
if metric_type == "area":
self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_area(SOC_type, SOC_id)
for block_type in ["pe", "mem", "ic"]:
self.SOC_area_dict[block_type][SOC_type][SOC_id] = self.calc_SOC_area_base_on_type(block_type, SOC_type, SOC_id)
for block_subtype in ["sram", "dram", "ic", "gpp", "ip"]:
self.SOC_area_subtype_dict[block_subtype][SOC_type][SOC_id] = self.calc_SOC_area_base_on_subtype(block_subtype, SOC_type, SOC_id)
elif metric_type == "cost":
#self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_area(SOC_type, SOC_id)
self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_dev_cost(SOC_type, SOC_id)
elif metric_type == "energy":
self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_energy(SOC_type, SOC_id)
elif metric_type == "power" :
self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_power(SOC_type, SOC_id)
elif metric_type == "latency":
self.SOC_metric_dict[metric_type][SOC_type][SOC_id] = self.calc_SOC_latency(SOC_type, SOC_id)
else:
raise Exception("metric_type:" + metric_type + " is not supported")
# helper function to apply an operator across two dictionaries
def operate_on_two_dic_values(self,dict1, dict2, operator):
dict_res = {}
for key in list(dict2.keys()) + list(dict1.keys()):
if key in dict1.keys() and dict2.keys():
dict_res[key] = operator(dict2[key], dict1[key])
else:
if key in dict1.keys():
dict_res[key] = dict1[key]
elif key in dict2.keys():
dict_res[key] = dict2[key]
return dict_res
def operate_on_dicionary_values(self, dictionaries, operator):
res = {}
for SOCs_latency in dictionaries:
#res = copy.deepcopy(self.operate_on_two_dic_values(res, SOCs_latency, operator))
#gc.disable()
res = cPickle.loads(cPickle.dumps(self.operate_on_two_dic_values(res, SOCs_latency, operator), -1))
#gc.enable()
return res
# set the metric (power, area, ...) for the entire system complex
def set_system_complex_metric(self, metric_type):
type_id_list = self.dp.get_designs_SOCs()
# the only spatial scenario is area
if metric_type == "area":
for block_type in ["pe", "mem", "ic"]:
for type_, id_ in type_id_list:
self.system_complex_area_dict[block_type] = sum([self.get_SOC_area_base_on_type(block_type, type_, id_)
for type_, id_ in type_id_list])
if metric_type in ["area", "energy", "cost"]:
self.system_complex_metric_dict[metric_type] = sum([self.get_SOC_metric_value(metric_type, type_, id_)
for type_, id_ in type_id_list])
elif metric_type in ["latency"]:
self.system_complex_metric_dict[metric_type] = self.operate_on_dicionary_values([self.get_SOC_metric_value(metric_type, type_, id_)
for type_, id_ in type_id_list], operator.add)
#return res
#self.system_complex_metric_dict[metric_type] = sum([self.get_SOC_metric_value(metric_type, type_, id_)
# for type_, id_ in type_id_list])
elif metric_type in ["power"]:
self.system_complex_metric_dict[metric_type] = max([self.get_SOC_metric_value(metric_type, type_, id_)
for type_, id_ in type_id_list])
else:
raise Exception("metric_type:" + metric_type + " is not supported")
# --------
# getters
# --------
def get_SOC_metric_value(self, metric_type, SOC_type, SOC_id):
assert(metric_type in config.all_metrics), metric_type + " not supported"
return self.SOC_metric_dict[metric_type][SOC_type][SOC_id]
def get_SOC_area_base_on_type(self, block_type, SOC_type, SOC_id):
assert(block_type in ["pe", "ic", "mem"]), "block_type" + block_type + " is not supported"
return self.SOC_area_dict[block_type][SOC_type][SOC_id]
def get_SOC_area_base_on_subtype(self, block_subtype, SOC_type, SOC_id):
assert(block_subtype in ["dram", "sram", "ic", "gpp", "ip"]), "block_subtype" + block_subtype + " is not supported"
return self.SOC_area_subtype_dict[block_subtype][SOC_type][SOC_id]
# get the simulation progress
def get_SOC_s_latency_sim_progress(self, SOC_type, SOC_id, progress_metrics):
kernels_on_SOC = [kernel for kernel in self.__kernels if kernel.SOC_type == SOC_type and kernel.SOC_id == SOC_id]
kernel_metric_value = {}
for kernel in kernels_on_SOC:
kernel_metric_value[kernel] = []
for metric in progress_metrics:
for kernel in kernels_on_SOC:
if metric == "latency":
kernel_metric_value[kernel].append((kernel.starting_time*10**3, kernel.stats.latency*10**3, kernel.stats.latency*10**3, "ms"))
return kernel_metric_value
# get the simulation progress
def get_SOC_s_latency_sim_progress(self, SOC_type, SOC_id, metric):
kernels_on_SOC = [kernel for kernel in self.__kernels if kernel.SOC_type == SOC_type and kernel.SOC_id == SOC_id]
kernel_metric_value = {}
for kernel in kernels_on_SOC:
kernel_metric_value[kernel] = []
for kernel in kernels_on_SOC:
if metric == "latency":
kernel_metric_value[kernel].append((kernel.starting_time*10**3, kernel.stats.latency*10**3, kernel.stats.latency*10**3, "ms"))
elif metric == "bytes":
kernel_metric_value[kernel].append((kernel.starting_time * 10 ** 3, kernel.stats.latency * 10 ** 3,
kernel.stats.latency* 10 ** 3, "bytes"))
return kernel_metric_value
def get_sim_progress(self, metric="latency"):
#for phase, krnls in self.dp.phase_krnl_present.items():
# accelerators_in_parallel = []
if metric == "latency":
return [self.get_SOC_s_latency_sim_progress(type, id, metric) for type, id in self.dp.get_designs_SOCs()]
if metric == "bytes":
pass
# returns the latency associated with the phases of the system execution
def get_phase_latency(self, SOC_type=1, SOC_id=1):
return self.dp.phase_latency_dict
# get utilization associated with the phases of the execution
def get_SOC_s_sim_utilization(self, SOC_type, SOC_id):
return self.dp.block_phase_utilization_dict
def get_SOC_s_pipe_cluster_pathlet_phase_work_rate(self, SOC_type, SOC_id):
return self.pipe_cluster_pathlet_phase_work_rate_dict
def get_SOC_s_pipe_cluster_pathlet_phase_latency(self, SOC_type, SOC_id):
return self.pipe_cluster_pathlet_phase_latency_dict
def get_SOC_s_pipe_cluster_path_phase_latency(self, SOC_type, SOC_id):
return self.pipe_cluster_pathlet_phase_latency_dict
# get work associated with the phases of the execution
def get_SOC_s_sim_work(self, SOC_type, SOC_id):
return self.dp.block_phase_work_dict
# get total (consider all SoCs') system metrics
def get_system_complex_metric(self, metric_type):
assert(metric_type in config.all_metrics), metric_type + " not supported"
assert(not (self.system_complex_metric_dict[metric_type] == -1)), metric_type + "not calculated"
return self.system_complex_metric_dict[metric_type]
# check if dp_rep is meeting the budget
def fits_budget(self, budget_coeff):
for type, id in self.dp.get_designs_SOCs():
for metric_name in self.database.get_budgetted_metric_names(type):
if not self.fits_budget_for_metric(type, id, metric_name):
return False
return True
def fits_budget_per_metric(self, metric_name, budget_coeff):
for type, id in self.dp.get_designs_SOCs():
if not self.fits_budget_for_metric(type, id, metric_name):
return False
return True
# whether the design fits the budget for the metric argument specified
# type, and id specify the relevant parameters of the SOC
# ignore budget_coeff for now
def fits_budget_for_metric(self, type, id, metric_name, budget_coeff):
return self.normalized_distance(type, id, metric_name) < .001
def __lt__(self, other):
comp_list = []
for metric in config.objectives:
comp_list.append(self.get_system_complex_metric(metric) < other.get_system_complex_metric(metric))
return all(comp_list)
def __gt__(self, other):
comp_list = []
for metric in config.objectives:
comp_list.append(self.get_system_complex_metric(metric) > other.get_system_complex_metric(metric))
return all(comp_list) | 51.700385 | 240 | 0.657114 |
73ea184bd7e584949bf21cf38f1acc7c8a33420e | 1,073 | py | Python | habitat/datasets/registration.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | habitat/datasets/registration.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | habitat/datasets/registration.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat.core.logging import logger
from habitat.core.registry import registry
from habitat.datasets.eqa import _try_register_mp3d_eqa_dataset
from habitat.datasets.object_nav import _try_register_objectnavdatasetv1
from habitat.datasets.pointnav import _try_register_pointnavdatasetv1
from habitat.datasets.vln import _try_register_r2r_vln_dataset
from habitat.datasets.pickplace import _try_register_pickplace_dataset
def make_dataset(id_dataset, **kwargs):
logger.info("Initializing dataset {}".format(id_dataset))
_dataset = registry.get_dataset(id_dataset)
assert _dataset is not None, "Could not find dataset {}".format(id_dataset)
return _dataset(**kwargs) # type: ignore
_try_register_objectnavdatasetv1()
_try_register_mp3d_eqa_dataset()
_try_register_pointnavdatasetv1()
_try_register_r2r_vln_dataset()
_try_register_pickplace_dataset()
| 37 | 79 | 0.825722 |
73ea7f5545f778bbefba83755e34695bd03c4bcf | 1,047 | py | Python | oscar/lib/python2.7/site-packages/django/conf/locale/es/formats.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/conf/locale/es/formats.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/conf/locale/es/formats.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\a\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| 30.794118 | 78 | 0.617956 |
73eac4a64b972c95b90219e7fe27b15c3929ed92 | 2,525 | py | Python | torchdata/datapipes/iter/util/hashchecker.py | Nayef211/data | 66b7ac07f75c45f1cc6aed71423fdb5d29a9648f | [
"BSD-3-Clause"
] | null | null | null | torchdata/datapipes/iter/util/hashchecker.py | Nayef211/data | 66b7ac07f75c45f1cc6aed71423fdb5d29a9648f | [
"BSD-3-Clause"
] | null | null | null | torchdata/datapipes/iter/util/hashchecker.py | Nayef211/data | 66b7ac07f75c45f1cc6aed71423fdb5d29a9648f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import hashlib
from torch.utils.data import IterDataPipe, functional_datapipe
@functional_datapipe("check_hash")
class HashCheckerIterDataPipe(IterDataPipe):
r"""
Iterable DataPipe that computes and checks the hash of each file, from an input
DataPipe of tuples of file name and data stream. If the hashes match the given hash
in the dictionary, it yields a tuple of file name and stream. Otherwise, it raises an error.
Args:
source_datapipe: a DataPipe with tuples of file name and data stream
hash_dict: a Dict that maps file names to their corresponding hashes
hash_type: the type of hash function to apply
rewind: rewind the stream after using the stream to compute the hash (this
does not work with non-seekable stream, e.g. HTTP)
Usage: dp = dp.check_hash({'train.py':'0d8b94d9fa9fb1ad89b9e3da9e1521495dca558fc5213b0fd7fd7b71c23f9921'})
"""
def __init__(self, source_datapipe, hash_dict, hash_type="sha256", rewind=True):
self.source_datapipe = source_datapipe
self.hash_dict = hash_dict
self.hash_type = hash_type
self.rewind = rewind
if self.hash_type not in ["sha256", "md5"]:
raise ValueError("Invalid hash_type requested, should be one of {}".format(["sha256", "md5"]))
def __iter__(self):
for file_name, stream in self.source_datapipe:
if self.hash_type == "sha256":
hash_func = hashlib.sha256()
else:
hash_func = hashlib.md5()
while True:
# Read by chunk to avoid filling memory
chunk = stream.read(1024 ** 2)
if not chunk:
break
hash_func.update(chunk)
# TODO(VitalyFedyunin): this will not work (or work crappy for non-seekable steams like http)
if self.rewind:
stream.seek(0)
if file_name not in self.hash_dict:
raise RuntimeError("Unspecified hash for file {}".format(file_name))
if hash_func.hexdigest() != self.hash_dict[file_name]:
raise RuntimeError(
"The hash {} of {} does not match. Delete the file manually and retry.".format(
hash_func.hexdigest(), file_name
)
)
yield (file_name, stream)
def __len__(self):
return len(self.source_datapipe)
| 38.257576 | 110 | 0.623762 |
73ead015bc1fd3d5738e3e0737aae5b68a27e1e5 | 2,228 | py | Python | services/web/server/tests/integration/fixtures/docker_registry.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | services/web/server/tests/integration/fixtures/docker_registry.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | 2 | 2018-05-13T09:10:57.000Z | 2019-03-06T08:10:40.000Z | services/web/server/tests/integration/fixtures/docker_registry.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | # pylint:disable=wildcard-import
# pylint:disable=unused-import
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import docker
import pytest
import tenacity
@pytest.fixture(scope="session")
def docker_registry():
# run the registry outside of the stack
docker_client = docker.from_env()
container = docker_client.containers.run("registry:2", ports={"5000":"5000"}, restart_policy={"Name":"always"}, detach=True)
host = "127.0.0.1"
port = 5000
url = "{host}:{port}".format(host=host, port=port)
# Wait until we can connect
assert _wait_till_registry_is_responsive(url)
# test the registry
docker_client = docker.from_env()
# get the hello world example from docker hub
hello_world_image = docker_client.images.pull("hello-world","latest")
# login to private registry
docker_client.login(registry=url, username="simcore")
# tag the image
repo = url + "/hello-world:dev"
assert hello_world_image.tag(repo) == True
# push the image to the private registry
docker_client.images.push(repo)
# wipe the images
docker_client.images.remove(image="hello-world:latest")
docker_client.images.remove(image=hello_world_image.id)
# pull the image from the private registry
private_image = docker_client.images.pull(repo)
docker_client.images.remove(image=private_image.id)
yield url
container.stop()
@tenacity.retry(wait=tenacity.wait_fixed(1), stop=tenacity.stop_after_delay(60))
def _wait_till_registry_is_responsive(url):
docker_client = docker.from_env()
docker_client.login(registry=url, username="simcore")
return True
#pull from itisfoundation/sleeper and push into local registry
@pytest.fixture(scope="session")
def sleeper_service(docker_registry):
client = docker.from_env()
image = client.images.pull("itisfoundation/sleeper", tag="1.0.0")
assert not image is None
repo = "{}/simcore/services/comp/itis/sleeper:1.0.0".format(docker_registry)
assert image.tag(repo) == True
client.images.push(repo)
image = client.images.pull(repo)
assert image
yield repo | 36.52459 | 129 | 0.710503 |
73eadc293ab984e3be57de562cacb75d0b432974 | 343 | py | Python | tklife/skel/common_widgets.py | Aesonus/TkLife | 8e8f585be7f522134b9a5746b22185c2394d3d8d | [
"MIT"
] | null | null | null | tklife/skel/common_widgets.py | Aesonus/TkLife | 8e8f585be7f522134b9a5746b22185c2394d3d8d | [
"MIT"
] | 4 | 2021-05-02T17:33:19.000Z | 2021-05-16T18:53:51.000Z | tklife/skel/common_widgets.py | Aesonus/TkLife | 8e8f585be7f522134b9a5746b22185c2394d3d8d | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from . import Skeleton
__all__ = ['SkelMain', 'SkelFrame', 'SkelLabelFrame', 'SkelToplevel']
class SkelMain(Skeleton, tk.Tk):
pass
class SkelFrame(Skeleton, ttk.Frame):
pass
class SkelLabelFrame(Skeleton, ttk.Labelframe):
pass
class SkelToplevel(Skeleton, tk.Toplevel):
pass
| 17.15 | 69 | 0.731778 |
73eaf39693ffbcbd2a663d07c85eeca2b4806492 | 72,741 | py | Python | Internet-Worm/venv_macos/lib/python3.8/site-packages/fontTools/feaLib/builder.py | Qiaozhi94/Python-Projects | aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4 | [
"MIT"
] | 2 | 2021-04-07T16:47:04.000Z | 2022-01-15T04:01:01.000Z | app/src/main/python/fontTools/feaLib/builder.py | KhunHtetzNaing/ColorFontTool | c447d5377c6c53e85731d65248ebb781f786276b | [
"MIT"
] | null | null | null | app/src/main/python/fontTools/feaLib/builder.py | KhunHtetzNaing/ColorFontTool | c447d5377c6c53e85731d65248ebb781f786276b | [
"MIT"
] | null | null | null | from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import binary2num, safeEval
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.ast import FeatureFile
from fontTools.otlLib import builder as otl
from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.ttLib import newTable, getTableModule
from fontTools.ttLib.tables import otBase, otTables
from collections import defaultdict, OrderedDict
import itertools
import logging
log = logging.getLogger(__name__)
def addOpenTypeFeatures(font, featurefile, tables=None):
"""Add features from a file to a font. Note that this replaces any features
currently present.
Args:
font (feaLib.ttLib.TTFont): The font object.
featurefile: Either a path or file object (in which case we
parse it into an AST), or a pre-parsed AST instance.
tables: If passed, restrict the set of affected tables to those in the
list.
"""
builder = Builder(font, featurefile)
builder.build(tables=tables)
def addOpenTypeFeaturesFromString(font, features, filename=None, tables=None):
"""Add features from a string to a font. Note that this replaces any
features currently present.
Args:
font (feaLib.ttLib.TTFont): The font object.
features: A string containing feature code.
filename: The directory containing ``filename`` is used as the root of
relative ``include()`` paths; if ``None`` is provided, the current
directory is assumed.
tables: If passed, restrict the set of affected tables to those in the
list.
"""
featurefile = UnicodeIO(tounicode(features))
if filename:
featurefile.name = filename
addOpenTypeFeatures(font, featurefile, tables=tables)
class Builder(object):
supportedTables = frozenset(Tag(tag) for tag in [
"BASE",
"GDEF",
"GPOS",
"GSUB",
"OS/2",
"head",
"hhea",
"name",
"vhea",
])
def __init__(self, font, featurefile):
self.font = font
# 'featurefile' can be either a path or file object (in which case we
# parse it into an AST), or a pre-parsed AST instance
if isinstance(featurefile, FeatureFile):
self.parseTree, self.file = featurefile, None
else:
self.parseTree, self.file = None, featurefile
self.glyphMap = font.getReverseGlyphMap()
self.default_language_systems_ = set()
self.script_ = None
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.language_systems = set()
self.seen_non_DFLT_script_ = False
self.named_lookups_ = {}
self.cur_lookup_ = None
self.cur_lookup_name_ = None
self.cur_feature_name_ = None
self.lookups_ = []
self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*]
self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp'
# for feature 'aalt'
self.aalt_features_ = [] # [(location, featureName)*], for 'aalt'
self.aalt_location_ = None
self.aalt_alternates_ = {}
# for 'featureNames'
self.featureNames_ = set()
self.featureNames_ids_ = {}
# for 'cvParameters'
self.cv_parameters_ = set()
self.cv_parameters_ids_ = {}
self.cv_num_named_params_ = {}
self.cv_characters_ = defaultdict(list)
# for feature 'size'
self.size_parameters_ = None
# for table 'head'
self.fontRevision_ = None # 2.71
# for table 'name'
self.names_ = []
# for table 'BASE'
self.base_horiz_axis_ = None
self.base_vert_axis_ = None
# for table 'GDEF'
self.attachPoints_ = {} # "a" --> {3, 7}
self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600}
self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7}
self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column))
self.markAttach_ = {} # "acute" --> (4, (file, line, column))
self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4
self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4
# for table 'OS/2'
self.os2_ = {}
# for table 'hhea'
self.hhea_ = {}
# for table 'vhea'
self.vhea_ = {}
def build(self, tables=None):
if self.parseTree is None:
self.parseTree = Parser(self.file, self.glyphMap).parse()
self.parseTree.build(self)
# by default, build all the supported tables
if tables is None:
tables = self.supportedTables
else:
tables = frozenset(tables)
unsupported = tables - self.supportedTables
if unsupported:
unsupported_string = ", ".join(sorted(unsupported))
raise NotImplementedError(
"The following tables were requested but are unsupported: "
f"{unsupported_string}."
)
if "GSUB" in tables:
self.build_feature_aalt_()
if "head" in tables:
self.build_head()
if "hhea" in tables:
self.build_hhea()
if "vhea" in tables:
self.build_vhea()
if "name" in tables:
self.build_name()
if "OS/2" in tables:
self.build_OS_2()
for tag in ('GPOS', 'GSUB'):
if tag not in tables:
continue
table = self.makeTable(tag)
if (table.ScriptList.ScriptCount > 0 or
table.FeatureList.FeatureCount > 0 or
table.LookupList.LookupCount > 0):
fontTable = self.font[tag] = newTable(tag)
fontTable.table = table
elif tag in self.font:
del self.font[tag]
if (any(tag in self.font for tag in ("GPOS", "GSUB")) and
"OS/2" in self.font):
self.font["OS/2"].usMaxContext = maxCtxFont(self.font)
if "GDEF" in tables:
gdef = self.buildGDEF()
if gdef:
self.font["GDEF"] = gdef
elif "GDEF" in self.font:
del self.font["GDEF"]
if "BASE" in tables:
base = self.buildBASE()
if base:
self.font["BASE"] = base
elif "BASE" in self.font:
del self.font["BASE"]
def get_chained_lookup_(self, location, builder_class):
result = builder_class(self.font, location)
result.lookupflag = self.lookupflag_
result.markFilterSet = self.lookupflag_markFilterSet_
self.lookups_.append(result)
return result
def add_lookup_to_feature_(self, lookup, feature_name):
for script, lang in self.language_systems:
key = (script, lang, feature_name)
self.features_.setdefault(key, []).append(lookup)
def get_lookup_(self, location, builder_class):
if (self.cur_lookup_ and
type(self.cur_lookup_) == builder_class and
self.cur_lookup_.lookupflag == self.lookupflag_ and
self.cur_lookup_.markFilterSet ==
self.lookupflag_markFilterSet_):
return self.cur_lookup_
if self.cur_lookup_name_ and self.cur_lookup_:
raise FeatureLibError(
"Within a named lookup block, all rules must be of "
"the same lookup type and flag", location)
self.cur_lookup_ = builder_class(self.font, location)
self.cur_lookup_.lookupflag = self.lookupflag_
self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_
self.lookups_.append(self.cur_lookup_)
if self.cur_lookup_name_:
# We are starting a lookup rule inside a named lookup block.
self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_
if self.cur_feature_name_:
# We are starting a lookup rule inside a feature. This includes
# lookup rules inside named lookups inside features.
self.add_lookup_to_feature_(self.cur_lookup_,
self.cur_feature_name_)
return self.cur_lookup_
def build_feature_aalt_(self):
if not self.aalt_features_ and not self.aalt_alternates_:
return
alternates = {g: set(a) for g, a in self.aalt_alternates_.items()}
for location, name in self.aalt_features_ + [(None, "aalt")]:
feature = [(script, lang, feature, lookups)
for (script, lang, feature), lookups
in self.features_.items()
if feature == name]
# "aalt" does not have to specify its own lookups, but it might.
if not feature and name != "aalt":
raise FeatureLibError("Feature %s has not been defined" % name,
location)
for script, lang, feature, lookups in feature:
for lookuplist in lookups:
if not isinstance(lookuplist, list):
lookuplist = [lookuplist]
for lookup in lookuplist:
for glyph, alts in lookup.getAlternateGlyphs().items():
alternates.setdefault(glyph, set()).update(alts)
single = {glyph: list(repl)[0] for glyph, repl in alternates.items()
if len(repl) == 1}
# TODO: Figure out the glyph alternate ordering used by makeotf.
# https://github.com/fonttools/fonttools/issues/836
multi = {glyph: sorted(repl, key=self.font.getGlyphID)
for glyph, repl in alternates.items()
if len(repl) > 1}
if not single and not multi:
return
self.features_ = {(script, lang, feature): lookups
for (script, lang, feature), lookups
in self.features_.items()
if feature != "aalt"}
old_lookups = self.lookups_
self.lookups_ = []
self.start_feature(self.aalt_location_, "aalt")
if single:
single_lookup = self.get_lookup_(location, SingleSubstBuilder)
single_lookup.mapping = single
if multi:
multi_lookup = self.get_lookup_(location, AlternateSubstBuilder)
multi_lookup.alternates = multi
self.end_feature()
self.lookups_.extend(old_lookups)
def build_head(self):
if not self.fontRevision_:
return
table = self.font.get("head")
if not table: # this only happens for unit tests
table = self.font["head"] = newTable("head")
table.decompile(b"\0" * 54, self.font)
table.tableVersion = 1.0
table.created = table.modified = 3406620153 # 2011-12-13 11:22:33
table.fontRevision = self.fontRevision_
def build_hhea(self):
if not self.hhea_:
return
table = self.font.get("hhea")
if not table: # this only happens for unit tests
table = self.font["hhea"] = newTable("hhea")
table.decompile(b"\0" * 36, self.font)
table.tableVersion = 0x00010000
if "caretoffset" in self.hhea_:
table.caretOffset = self.hhea_["caretoffset"]
if "ascender" in self.hhea_:
table.ascent = self.hhea_["ascender"]
if "descender" in self.hhea_:
table.descent = self.hhea_["descender"]
if "linegap" in self.hhea_:
table.lineGap = self.hhea_["linegap"]
def build_vhea(self):
if not self.vhea_:
return
table = self.font.get("vhea")
if not table: # this only happens for unit tests
table = self.font["vhea"] = newTable("vhea")
table.decompile(b"\0" * 36, self.font)
table.tableVersion = 0x00011000
if "verttypoascender" in self.vhea_:
table.ascent = self.vhea_["verttypoascender"]
if "verttypodescender" in self.vhea_:
table.descent = self.vhea_["verttypodescender"]
if "verttypolinegap" in self.vhea_:
table.lineGap = self.vhea_["verttypolinegap"]
def get_user_name_id(self, table):
# Try to find first unused font-specific name id
nameIDs = [name.nameID for name in table.names]
for user_name_id in range(256, 32767):
if user_name_id not in nameIDs:
return user_name_id
def buildFeatureParams(self, tag):
params = None
if tag == "size":
params = otTables.FeatureParamsSize()
params.DesignSize, params.SubfamilyID, params.RangeStart, \
params.RangeEnd = self.size_parameters_
if tag in self.featureNames_ids_:
params.SubfamilyNameID = self.featureNames_ids_[tag]
else:
params.SubfamilyNameID = 0
elif tag in self.featureNames_:
if not self.featureNames_ids_:
# name table wasn't selected among the tables to build; skip
pass
else:
assert tag in self.featureNames_ids_
params = otTables.FeatureParamsStylisticSet()
params.Version = 0
params.UINameID = self.featureNames_ids_[tag]
elif tag in self.cv_parameters_:
params = otTables.FeatureParamsCharacterVariants()
params.Format = 0
params.FeatUILabelNameID = self.cv_parameters_ids_.get(
(tag, 'FeatUILabelNameID'), 0)
params.FeatUITooltipTextNameID = self.cv_parameters_ids_.get(
(tag, 'FeatUITooltipTextNameID'), 0)
params.SampleTextNameID = self.cv_parameters_ids_.get(
(tag, 'SampleTextNameID'), 0)
params.NumNamedParameters = self.cv_num_named_params_.get(tag, 0)
params.FirstParamUILabelNameID = self.cv_parameters_ids_.get(
(tag, 'ParamUILabelNameID_0'), 0)
params.CharCount = len(self.cv_characters_[tag])
params.Character = self.cv_characters_[tag]
return params
def build_name(self):
if not self.names_:
return
table = self.font.get("name")
if not table: # this only happens for unit tests
table = self.font["name"] = newTable("name")
table.names = []
for name in self.names_:
nameID, platformID, platEncID, langID, string = name
# For featureNames block, nameID is 'feature tag'
# For cvParameters blocks, nameID is ('feature tag', 'block name')
if not isinstance(nameID, int):
tag = nameID
if tag in self.featureNames_:
if tag not in self.featureNames_ids_:
self.featureNames_ids_[tag] = self.get_user_name_id(table)
assert self.featureNames_ids_[tag] is not None
nameID = self.featureNames_ids_[tag]
elif tag[0] in self.cv_parameters_:
if tag not in self.cv_parameters_ids_:
self.cv_parameters_ids_[tag] = self.get_user_name_id(table)
assert self.cv_parameters_ids_[tag] is not None
nameID = self.cv_parameters_ids_[tag]
table.setName(string, nameID, platformID, platEncID, langID)
def build_OS_2(self):
if not self.os2_:
return
table = self.font.get("OS/2")
if not table: # this only happens for unit tests
table = self.font["OS/2"] = newTable("OS/2")
data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0)
table.decompile(data, self.font)
version = 0
if "fstype" in self.os2_:
table.fsType = self.os2_["fstype"]
if "panose" in self.os2_:
panose = getTableModule("OS/2").Panose()
panose.bFamilyType, panose.bSerifStyle, panose.bWeight,\
panose.bProportion, panose.bContrast, panose.bStrokeVariation,\
panose.bArmStyle, panose.bLetterForm, panose.bMidline, \
panose.bXHeight = self.os2_["panose"]
table.panose = panose
if "typoascender" in self.os2_:
table.sTypoAscender = self.os2_["typoascender"]
if "typodescender" in self.os2_:
table.sTypoDescender = self.os2_["typodescender"]
if "typolinegap" in self.os2_:
table.sTypoLineGap = self.os2_["typolinegap"]
if "winascent" in self.os2_:
table.usWinAscent = self.os2_["winascent"]
if "windescent" in self.os2_:
table.usWinDescent = self.os2_["windescent"]
if "vendor" in self.os2_:
table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''")
if "weightclass" in self.os2_:
table.usWeightClass = self.os2_["weightclass"]
if "widthclass" in self.os2_:
table.usWidthClass = self.os2_["widthclass"]
if "unicoderange" in self.os2_:
table.setUnicodeRanges(self.os2_["unicoderange"])
if "codepagerange" in self.os2_:
pages = self.build_codepages_(self.os2_["codepagerange"])
table.ulCodePageRange1, table.ulCodePageRange2 = pages
version = 1
if "xheight" in self.os2_:
table.sxHeight = self.os2_["xheight"]
version = 2
if "capheight" in self.os2_:
table.sCapHeight = self.os2_["capheight"]
version = 2
if "loweropsize" in self.os2_:
table.usLowerOpticalPointSize = self.os2_["loweropsize"]
version = 5
if "upperopsize" in self.os2_:
table.usUpperOpticalPointSize = self.os2_["upperopsize"]
version = 5
def checkattr(table, attrs):
for attr in attrs:
if not hasattr(table, attr):
setattr(table, attr, 0)
table.version = max(version, table.version)
# this only happens for unit tests
if version >= 1:
checkattr(table, ("ulCodePageRange1", "ulCodePageRange2"))
if version >= 2:
checkattr(table, ("sxHeight", "sCapHeight", "usDefaultChar",
"usBreakChar", "usMaxContext"))
if version >= 5:
checkattr(table, ("usLowerOpticalPointSize",
"usUpperOpticalPointSize"))
def build_codepages_(self, pages):
pages2bits = {
1252: 0, 1250: 1, 1251: 2, 1253: 3, 1254: 4, 1255: 5, 1256: 6,
1257: 7, 1258: 8, 874: 16, 932: 17, 936: 18, 949: 19, 950: 20,
1361: 21, 869: 48, 866: 49, 865: 50, 864: 51, 863: 52, 862: 53,
861: 54, 860: 55, 857: 56, 855: 57, 852: 58, 775: 59, 737: 60,
708: 61, 850: 62, 437: 63,
}
bits = [pages2bits[p] for p in pages if p in pages2bits]
pages = []
for i in range(2):
pages.append("")
for j in range(i * 32, (i + 1) * 32):
if j in bits:
pages[i] += "1"
else:
pages[i] += "0"
return [binary2num(p[::-1]) for p in pages]
def buildBASE(self):
if not self.base_horiz_axis_ and not self.base_vert_axis_:
return None
base = otTables.BASE()
base.Version = 0x00010000
base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_)
base.VertAxis = self.buildBASEAxis(self.base_vert_axis_)
result = newTable("BASE")
result.table = base
return result
def buildBASEAxis(self, axis):
if not axis:
return
bases, scripts = axis
axis = otTables.Axis()
axis.BaseTagList = otTables.BaseTagList()
axis.BaseTagList.BaselineTag = bases
axis.BaseTagList.BaseTagCount = len(bases)
axis.BaseScriptList = otTables.BaseScriptList()
axis.BaseScriptList.BaseScriptRecord = []
axis.BaseScriptList.BaseScriptCount = len(scripts)
for script in sorted(scripts):
record = otTables.BaseScriptRecord()
record.BaseScriptTag = script[0]
record.BaseScript = otTables.BaseScript()
record.BaseScript.BaseLangSysCount = 0
record.BaseScript.BaseValues = otTables.BaseValues()
record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1])
record.BaseScript.BaseValues.BaseCoord = []
record.BaseScript.BaseValues.BaseCoordCount = len(script[2])
for c in script[2]:
coord = otTables.BaseCoord()
coord.Format = 1
coord.Coordinate = c
record.BaseScript.BaseValues.BaseCoord.append(coord)
axis.BaseScriptList.BaseScriptRecord.append(record)
return axis
def buildGDEF(self):
gdef = otTables.GDEF()
gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_()
gdef.AttachList = \
otl.buildAttachList(self.attachPoints_, self.glyphMap)
gdef.LigCaretList = \
otl.buildLigCaretList(self.ligCaretCoords_, self.ligCaretPoints_,
self.glyphMap)
gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_()
gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_()
gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000
if any((gdef.GlyphClassDef, gdef.AttachList, gdef.LigCaretList,
gdef.MarkAttachClassDef, gdef.MarkGlyphSetsDef)):
result = newTable("GDEF")
result.table = gdef
return result
else:
return None
def buildGDEFGlyphClassDef_(self):
if self.glyphClassDefs_:
classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()}
else:
classes = {}
for lookup in self.lookups_:
classes.update(lookup.inferGlyphClasses())
for markClass in self.parseTree.markClasses.values():
for markClassDef in markClass.definitions:
for glyph in markClassDef.glyphSet():
classes[glyph] = 3
if classes:
result = otTables.GlyphClassDef()
result.classDefs = classes
return result
else:
return None
def buildGDEFMarkAttachClassDef_(self):
classDefs = {g: c for g, (c, _) in self.markAttach_.items()}
if not classDefs:
return None
result = otTables.MarkAttachClassDef()
result.classDefs = classDefs
return result
def buildGDEFMarkGlyphSetsDef_(self):
sets = []
for glyphs, id_ in sorted(self.markFilterSets_.items(),
key=lambda item: item[1]):
sets.append(glyphs)
return otl.buildMarkGlyphSetsDef(sets, self.glyphMap)
def buildLookups_(self, tag):
assert tag in ('GPOS', 'GSUB'), tag
for lookup in self.lookups_:
lookup.lookup_index = None
lookups = []
for lookup in self.lookups_:
if lookup.table != tag:
continue
lookup.lookup_index = len(lookups)
lookups.append(lookup)
return [l.build() for l in lookups]
def makeTable(self, tag):
table = getattr(otTables, tag, None)()
table.Version = 0x00010000
table.ScriptList = otTables.ScriptList()
table.ScriptList.ScriptRecord = []
table.FeatureList = otTables.FeatureList()
table.FeatureList.FeatureRecord = []
table.LookupList = otTables.LookupList()
table.LookupList.Lookup = self.buildLookups_(tag)
# Build a table for mapping (tag, lookup_indices) to feature_index.
# For example, ('liga', (2,3,7)) --> 23.
feature_indices = {}
required_feature_indices = {} # ('latn', 'DEU') --> 23
scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24
# Sort the feature table by feature tag:
# https://github.com/fonttools/fonttools/issues/568
sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1])
for key, lookups in sorted(self.features_.items(), key=sortFeatureTag):
script, lang, feature_tag = key
# l.lookup_index will be None when a lookup is not needed
# for the table under construction. For example, substitution
# rules will have no lookup_index while building GPOS tables.
lookup_indices = tuple([l.lookup_index for l in lookups
if l.lookup_index is not None])
size_feature = (tag == "GPOS" and feature_tag == "size")
if len(lookup_indices) == 0 and not size_feature:
continue
feature_key = (feature_tag, lookup_indices)
feature_index = feature_indices.get(feature_key)
if feature_index is None:
feature_index = len(table.FeatureList.FeatureRecord)
frec = otTables.FeatureRecord()
frec.FeatureTag = feature_tag
frec.Feature = otTables.Feature()
frec.Feature.FeatureParams = self.buildFeatureParams(
feature_tag)
frec.Feature.LookupListIndex = list(lookup_indices)
frec.Feature.LookupCount = len(lookup_indices)
table.FeatureList.FeatureRecord.append(frec)
feature_indices[feature_key] = feature_index
scripts.setdefault(script, {}).setdefault(lang, []).append(
feature_index)
if self.required_features_.get((script, lang)) == feature_tag:
required_feature_indices[(script, lang)] = feature_index
# Build ScriptList.
for script, lang_features in sorted(scripts.items()):
srec = otTables.ScriptRecord()
srec.ScriptTag = script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
for lang, feature_indices in sorted(lang_features.items()):
langrec = otTables.LangSysRecord()
langrec.LangSys = otTables.LangSys()
langrec.LangSys.LookupOrder = None
req_feature_index = \
required_feature_indices.get((script, lang))
if req_feature_index is None:
langrec.LangSys.ReqFeatureIndex = 0xFFFF
else:
langrec.LangSys.ReqFeatureIndex = req_feature_index
langrec.LangSys.FeatureIndex = [i for i in feature_indices
if i != req_feature_index]
langrec.LangSys.FeatureCount = \
len(langrec.LangSys.FeatureIndex)
if lang == "dflt":
srec.Script.DefaultLangSys = langrec.LangSys
else:
langrec.LangSysTag = lang
srec.Script.LangSysRecord.append(langrec)
srec.Script.LangSysCount = len(srec.Script.LangSysRecord)
table.ScriptList.ScriptRecord.append(srec)
table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord)
table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord)
table.LookupList.LookupCount = len(table.LookupList.Lookup)
return table
def add_language_system(self, location, script, language):
# OpenType Feature File Specification, section 4.b.i
if (script == "DFLT" and language == "dflt" and
self.default_language_systems_):
raise FeatureLibError(
'If "languagesystem DFLT dflt" is present, it must be '
'the first of the languagesystem statements', location)
if script == "DFLT":
if self.seen_non_DFLT_script_:
raise FeatureLibError(
'languagesystems using the "DFLT" script tag must '
"precede all other languagesystems",
location
)
else:
self.seen_non_DFLT_script_ = True
if (script, language) in self.default_language_systems_:
raise FeatureLibError(
'"languagesystem %s %s" has already been specified' %
(script.strip(), language.strip()), location)
self.default_language_systems_.add((script, language))
def get_default_language_systems_(self):
# OpenType Feature File specification, 4.b.i. languagesystem:
# If no "languagesystem" statement is present, then the
# implementation must behave exactly as though the following
# statement were present at the beginning of the feature file:
# languagesystem DFLT dflt;
if self.default_language_systems_:
return frozenset(self.default_language_systems_)
else:
return frozenset({('DFLT', 'dflt')})
def start_feature(self, location, name):
self.language_systems = self.get_default_language_systems_()
self.script_ = 'DFLT'
self.cur_lookup_ = None
self.cur_feature_name_ = name
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
if name == "aalt":
self.aalt_location_ = location
def end_feature(self):
assert self.cur_feature_name_ is not None
self.cur_feature_name_ = None
self.language_systems = None
self.cur_lookup_ = None
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def start_lookup_block(self, location, name):
if name in self.named_lookups_:
raise FeatureLibError(
'Lookup "%s" has already been defined' % name, location)
if self.cur_feature_name_ == "aalt":
raise FeatureLibError(
"Lookup blocks cannot be placed inside 'aalt' features; "
"move it out, and then refer to it with a lookup statement",
location)
self.cur_lookup_name_ = name
self.named_lookups_[name] = None
self.cur_lookup_ = None
if self.cur_feature_name_ is None:
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def end_lookup_block(self):
assert self.cur_lookup_name_ is not None
self.cur_lookup_name_ = None
self.cur_lookup_ = None
if self.cur_feature_name_ is None:
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def add_lookup_call(self, lookup_name):
assert lookup_name in self.named_lookups_, lookup_name
self.cur_lookup_ = None
lookup = self.named_lookups_[lookup_name]
self.add_lookup_to_feature_(lookup, self.cur_feature_name_)
def set_font_revision(self, location, revision):
self.fontRevision_ = revision
def set_language(self, location, language, include_default, required):
assert(len(language) == 4)
if self.cur_feature_name_ in ('aalt', 'size'):
raise FeatureLibError(
"Language statements are not allowed "
"within \"feature %s\"" % self.cur_feature_name_, location)
if self.cur_feature_name_ is None:
raise FeatureLibError(
"Language statements are not allowed "
"within standalone lookup blocks", location)
self.cur_lookup_ = None
key = (self.script_, language, self.cur_feature_name_)
lookups = self.features_.get((key[0], 'dflt', key[2]))
if (language == 'dflt' or include_default) and lookups:
self.features_[key] = lookups[:]
else:
self.features_[key] = []
self.language_systems = frozenset([(self.script_, language)])
if required:
key = (self.script_, language)
if key in self.required_features_:
raise FeatureLibError(
"Language %s (script %s) has already "
"specified feature %s as its required feature" % (
language.strip(), self.script_.strip(),
self.required_features_[key].strip()),
location)
self.required_features_[key] = self.cur_feature_name_
def getMarkAttachClass_(self, location, glyphs):
glyphs = frozenset(glyphs)
id_ = self.markAttachClassID_.get(glyphs)
if id_ is not None:
return id_
id_ = len(self.markAttachClassID_) + 1
self.markAttachClassID_[glyphs] = id_
for glyph in glyphs:
if glyph in self.markAttach_:
_, loc = self.markAttach_[glyph]
raise FeatureLibError(
"Glyph %s already has been assigned "
"a MarkAttachmentType at %s:%d:%d" % (
glyph, loc[0], loc[1], loc[2]),
location)
self.markAttach_[glyph] = (id_, location)
return id_
def getMarkFilterSet_(self, location, glyphs):
glyphs = frozenset(glyphs)
id_ = self.markFilterSets_.get(glyphs)
if id_ is not None:
return id_
id_ = len(self.markFilterSets_)
self.markFilterSets_[glyphs] = id_
return id_
def set_lookup_flag(self, location, value, markAttach, markFilter):
value = value & 0xFF
if markAttach:
markAttachClass = self.getMarkAttachClass_(location, markAttach)
value = value | (markAttachClass << 8)
if markFilter:
markFilterSet = self.getMarkFilterSet_(location, markFilter)
value = value | 0x10
self.lookupflag_markFilterSet_ = markFilterSet
else:
self.lookupflag_markFilterSet_ = None
self.lookupflag_ = value
def set_script(self, location, script):
if self.cur_feature_name_ in ('aalt', 'size'):
raise FeatureLibError(
"Script statements are not allowed "
"within \"feature %s\"" % self.cur_feature_name_, location)
if self.cur_feature_name_ is None:
raise FeatureLibError(
"Script statements are not allowed "
"within standalone lookup blocks", location)
if self.language_systems == {(script, 'dflt')}:
# Nothing to do.
return
self.cur_lookup_ = None
self.script_ = script
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.set_language(location, "dflt",
include_default=True, required=False)
def find_lookup_builders_(self, lookups):
"""Helper for building chain contextual substitutions
Given a list of lookup names, finds the LookupBuilder for each name.
If an input name is None, it gets mapped to a None LookupBuilder.
"""
lookup_builders = []
for lookuplist in lookups:
if lookuplist is not None:
lookup_builders.append([self.named_lookups_.get(l.name)
for l in lookuplist])
else:
lookup_builders.append(None)
return lookup_builders
def add_attach_points(self, location, glyphs, contourPoints):
for glyph in glyphs:
self.attachPoints_.setdefault(glyph, set()).update(contourPoints)
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append((prefix, glyphs, suffix,
self.find_lookup_builders_(lookups)))
def add_chain_context_subst(self, location,
prefix, glyphs, suffix, lookups):
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.substitutions.append((prefix, glyphs, suffix,
self.find_lookup_builders_(lookups)))
def add_alternate_subst(self, location,
prefix, glyph, suffix, replacement):
if self.cur_feature_name_ == "aalt":
alts = self.aalt_alternates_.setdefault(glyph, set())
alts.update(replacement)
return
if prefix or suffix:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
lookup = self.get_chained_lookup_(location, AlternateSubstBuilder)
chain.substitutions.append((prefix, [{glyph}], suffix, [lookup]))
else:
lookup = self.get_lookup_(location, AlternateSubstBuilder)
if glyph in lookup.alternates:
raise FeatureLibError(
'Already defined alternates for glyph "%s"' % glyph,
location)
lookup.alternates[glyph] = replacement
def add_feature_reference(self, location, featureName):
if self.cur_feature_name_ != "aalt":
raise FeatureLibError(
'Feature references are only allowed inside "feature aalt"',
location)
self.aalt_features_.append((location, featureName))
def add_featureName(self, tag):
self.featureNames_.add(tag)
def add_cv_parameter(self, tag):
self.cv_parameters_.add(tag)
def add_to_cv_num_named_params(self, tag):
"""Adds new items to ``self.cv_num_named_params_``
or increments the count of existing items."""
if tag in self.cv_num_named_params_:
self.cv_num_named_params_[tag] += 1
else:
self.cv_num_named_params_[tag] = 1
def add_cv_character(self, character, tag):
self.cv_characters_[tag].append(character)
def set_base_axis(self, bases, scripts, vertical):
if vertical:
self.base_vert_axis_ = (bases, scripts)
else:
self.base_horiz_axis_ = (bases, scripts)
def set_size_parameters(self, location, DesignSize, SubfamilyID,
RangeStart, RangeEnd):
if self.cur_feature_name_ != 'size':
raise FeatureLibError(
"Parameters statements are not allowed "
"within \"feature %s\"" % self.cur_feature_name_, location)
self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd]
for script, lang in self.language_systems:
key = (script, lang, self.cur_feature_name_)
self.features_.setdefault(key, [])
def add_ligature_subst(self, location,
prefix, glyphs, suffix, replacement, forceChain):
if prefix or suffix or forceChain:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
lookup = self.get_chained_lookup_(location, LigatureSubstBuilder)
chain.substitutions.append((prefix, glyphs, suffix, [lookup]))
else:
lookup = self.get_lookup_(location, LigatureSubstBuilder)
# OpenType feature file syntax, section 5.d, "Ligature substitution":
# "Since the OpenType specification does not allow ligature
# substitutions to be specified on target sequences that contain
# glyph classes, the implementation software will enumerate
# all specific glyph sequences if glyph classes are detected"
for g in sorted(itertools.product(*glyphs)):
lookup.ligatures[g] = replacement
def add_multiple_subst(self, location,
prefix, glyph, suffix, replacements, forceChain=False):
if prefix or suffix or forceChain:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = self.get_chained_lookup_(location, MultipleSubstBuilder)
sub.mapping[glyph] = replacements
chain.substitutions.append((prefix, [{glyph}], suffix, [sub]))
return
lookup = self.get_lookup_(location, MultipleSubstBuilder)
if glyph in lookup.mapping:
if replacements == lookup.mapping[glyph]:
log.info(
'Removing duplicate multiple substitution from glyph'
' "%s" to %s%s',
glyph, replacements,
' at {}:{}:{}'.format(*location) if location else '',
)
else:
raise FeatureLibError(
'Already defined substitution for glyph "%s"' % glyph,
location)
lookup.mapping[glyph] = replacements
def add_reverse_chain_single_subst(self, location, old_prefix,
old_suffix, mapping):
lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder)
lookup.substitutions.append((old_prefix, old_suffix, mapping))
def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
if self.cur_feature_name_ == "aalt":
for (from_glyph, to_glyph) in mapping.items():
alts = self.aalt_alternates_.setdefault(from_glyph, set())
alts.add(to_glyph)
return
if prefix or suffix or forceChain:
self.add_single_subst_chained_(location, prefix, suffix, mapping)
return
lookup = self.get_lookup_(location, SingleSubstBuilder)
for (from_glyph, to_glyph) in mapping.items():
if from_glyph in lookup.mapping:
if to_glyph == lookup.mapping[from_glyph]:
log.info(
'Removing duplicate single substitution from glyph'
' "%s" to "%s" at %s:%i:%i',
from_glyph, to_glyph, *location,
)
else:
raise FeatureLibError(
'Already defined rule for replacing glyph "%s" by "%s"' %
(from_glyph, lookup.mapping[from_glyph]),
location)
lookup.mapping[from_glyph] = to_glyph
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
# https://github.com/fonttools/fonttools/issues/512
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_single_subst(set(mapping.keys()))
if sub is None:
sub = self.get_chained_lookup_(location, SingleSubstBuilder)
sub.mapping.update(mapping)
chain.substitutions.append((prefix, [mapping.keys()], suffix, [sub]))
def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor):
lookup = self.get_lookup_(location, CursivePosBuilder)
lookup.add_attachment(
location, glyphclass,
makeOpenTypeAnchor(entryAnchor),
makeOpenTypeAnchor(exitAnchor))
def add_marks_(self, location, lookupBuilder, marks):
"""Helper for add_mark_{base,liga,mark}_pos."""
for _, markClass in marks:
for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks:
otMarkAnchor = makeOpenTypeAnchor(markClassDef.anchor)
lookupBuilder.marks[mark] = (
markClass.name, otMarkAnchor)
else:
existingMarkClass = lookupBuilder.marks[mark][0]
if markClass.name != existingMarkClass:
raise FeatureLibError(
"Glyph %s cannot be in both @%s and @%s" % (
mark, existingMarkClass, markClass.name),
location)
def add_mark_base_pos(self, location, bases, marks):
builder = self.get_lookup_(location, MarkBasePosBuilder)
self.add_marks_(location, builder, marks)
for baseAnchor, markClass in marks:
otBaseAnchor = makeOpenTypeAnchor(baseAnchor)
for base in bases:
builder.bases.setdefault(base, {})[markClass.name] = (
otBaseAnchor)
def add_mark_lig_pos(self, location, ligatures, components):
builder = self.get_lookup_(location, MarkLigPosBuilder)
componentAnchors = []
for marks in components:
anchors = {}
self.add_marks_(location, builder, marks)
for ligAnchor, markClass in marks:
anchors[markClass.name] = makeOpenTypeAnchor(ligAnchor)
componentAnchors.append(anchors)
for glyph in ligatures:
builder.ligatures[glyph] = componentAnchors
def add_mark_mark_pos(self, location, baseMarks, marks):
builder = self.get_lookup_(location, MarkMarkPosBuilder)
self.add_marks_(location, builder, marks)
for baseAnchor, markClass in marks:
otBaseAnchor = makeOpenTypeAnchor(baseAnchor)
for baseMark in baseMarks:
builder.baseMarks.setdefault(baseMark, {})[markClass.name] = (
otBaseAnchor)
def add_class_pair_pos(self, location, glyphclass1, value1,
glyphclass2, value2):
lookup = self.get_lookup_(location, PairPosBuilder)
lookup.addClassPair(location, glyphclass1, value1, glyphclass2, value2)
def add_subtable_break(self, location):
self.cur_lookup_.add_subtable_break(location)
def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2):
lookup = self.get_lookup_(location, PairPosBuilder)
lookup.addGlyphPair(location, glyph1, value1, glyph2, value2)
def add_single_pos(self, location, prefix, suffix, pos, forceChain):
if prefix or suffix or forceChain:
self.add_single_pos_chained_(location, prefix, suffix, pos)
else:
lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos:
for glyph in glyphs:
lookup.add_pos(location, glyph, value)
def add_single_pos_chained_(self, location, prefix, suffix, pos):
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
for _, _, _, lookups in chain.rules:
targets.extend(lookups)
subs = []
for glyphs, value in pos:
if value is None:
subs.append(None)
continue
otValue, _ = makeOpenTypeValueRecord(value, pairPosContext=False)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
targets.append(sub)
for glyph in glyphs:
sub.add_pos(location, glyph, value)
subs.append(sub)
assert len(pos) == len(subs), (pos, subs)
chain.rules.append(
(prefix, [g for g, v in pos], suffix, subs))
def setGlyphClass_(self, location, glyph, glyphClass):
oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None))
if oldClass and oldClass != glyphClass:
raise FeatureLibError(
"Glyph %s was assigned to a different class at %s:%s:%s" %
(glyph, oldLocation[0], oldLocation[1], oldLocation[2]),
location)
self.glyphClassDefs_[glyph] = (glyphClass, location)
def add_glyphClassDef(self, location, baseGlyphs, ligatureGlyphs,
markGlyphs, componentGlyphs):
for glyph in baseGlyphs:
self.setGlyphClass_(location, glyph, 1)
for glyph in ligatureGlyphs:
self.setGlyphClass_(location, glyph, 2)
for glyph in markGlyphs:
self.setGlyphClass_(location, glyph, 3)
for glyph in componentGlyphs:
self.setGlyphClass_(location, glyph, 4)
def add_ligatureCaretByIndex_(self, location, glyphs, carets):
for glyph in glyphs:
if glyph not in self.ligCaretPoints_:
self.ligCaretPoints_[glyph] = carets
def add_ligatureCaretByPos_(self, location, glyphs, carets):
for glyph in glyphs:
if glyph not in self.ligCaretCoords_:
self.ligCaretCoords_[glyph] = carets
def add_name_record(self, location, nameID, platformID, platEncID,
langID, string):
self.names_.append([nameID, platformID, platEncID, langID, string])
def add_os2_field(self, key, value):
self.os2_[key] = value
def add_hhea_field(self, key, value):
self.hhea_[key] = value
def add_vhea_field(self, key, value):
self.vhea_[key] = value
def makeOpenTypeAnchor(anchor):
"""ast.Anchor --> otTables.Anchor"""
if anchor is None:
return None
deviceX, deviceY = None, None
if anchor.xDeviceTable is not None:
deviceX = otl.buildDevice(dict(anchor.xDeviceTable))
if anchor.yDeviceTable is not None:
deviceY = otl.buildDevice(dict(anchor.yDeviceTable))
return otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint,
deviceX, deviceY)
_VALUEREC_ATTRS = {
name[0].lower() + name[1:]: (name, isDevice)
for _, name, isDevice, _ in otBase.valueRecordFormat
if not name.startswith("Reserved")
}
def makeOpenTypeValueRecord(v, pairPosContext):
"""ast.ValueRecord --> (otBase.ValueRecord, int ValueFormat)"""
if not v:
return None, 0
vr = {}
for astName, (otName, isDevice) in _VALUEREC_ATTRS.items():
val = getattr(v, astName, None)
if val:
vr[otName] = otl.buildDevice(dict(val)) if isDevice else val
if pairPosContext and not vr:
vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0}
valRec = otl.buildValue(vr)
return valRec, valRec.getFormat()
class LookupBuilder(object):
SUBTABLE_BREAK_ = "SUBTABLE_BREAK"
def __init__(self, font, location, table, lookup_type):
self.font = font
self.glyphMap = font.getReverseGlyphMap()
self.location = location
self.table, self.lookup_type = table, lookup_type
self.lookupflag = 0
self.markFilterSet = None
self.lookup_index = None # assigned when making final tables
assert table in ('GPOS', 'GSUB')
def equals(self, other):
return (isinstance(other, self.__class__) and
self.table == other.table and
self.lookupflag == other.lookupflag and
self.markFilterSet == other.markFilterSet)
def inferGlyphClasses(self):
"""Infers glyph glasses for the GDEF table, such as {"cedilla":3}."""
return {}
def getAlternateGlyphs(self):
"""Helper for building 'aalt' features."""
return {}
def buildLookup_(self, subtables):
return otl.buildLookup(subtables, self.lookupflag, self.markFilterSet)
def buildMarkClasses_(self, marks):
"""{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1}
Helper for MarkBasePostBuilder, MarkLigPosBuilder, and
MarkMarkPosBuilder. Seems to return the same numeric IDs
for mark classes as the AFDKO makeotf tool.
"""
ids = {}
for mark in sorted(marks.keys(), key=self.font.getGlyphID):
markClassName, _markAnchor = marks[mark]
if markClassName not in ids:
ids[markClassName] = len(ids)
return ids
def setBacktrackCoverage_(self, prefix, subtable):
subtable.BacktrackGlyphCount = len(prefix)
subtable.BacktrackCoverage = []
for p in reversed(prefix):
coverage = otl.buildCoverage(p, self.glyphMap)
subtable.BacktrackCoverage.append(coverage)
def setLookAheadCoverage_(self, suffix, subtable):
subtable.LookAheadGlyphCount = len(suffix)
subtable.LookAheadCoverage = []
for s in suffix:
coverage = otl.buildCoverage(s, self.glyphMap)
subtable.LookAheadCoverage.append(coverage)
def setInputCoverage_(self, glyphs, subtable):
subtable.InputGlyphCount = len(glyphs)
subtable.InputCoverage = []
for g in glyphs:
coverage = otl.buildCoverage(g, self.glyphMap)
subtable.InputCoverage.append(coverage)
def build_subst_subtables(self, mapping, klass):
substitutions = [{}]
for key in mapping:
if key[0] == self.SUBTABLE_BREAK_:
substitutions.append({})
else:
substitutions[-1][key] = mapping[key]
subtables = [klass(s) for s in substitutions]
return subtables
def add_subtable_break(self, location):
log.warning(FeatureLibError(
'unsupported "subtable" statement for lookup type',
location
))
class AlternateSubstBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GSUB', 3)
self.alternates = OrderedDict()
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.alternates == other.alternates)
def build(self):
subtables = self.build_subst_subtables(self.alternates,
otl.buildAlternateSubstSubtable)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return self.alternates
def add_subtable_break(self, location):
self.alternates[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ChainContextPosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 8)
self.rules = [] # (prefix, input, suffix, lookups)
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.rules == other.rules)
def build(self):
subtables = []
for (prefix, glyphs, suffix, lookups) in self.rules:
if prefix == self.SUBTABLE_BREAK_:
continue
st = otTables.ChainContextPos()
subtables.append(st)
st.Format = 3
self.setBacktrackCoverage_(prefix, st)
self.setLookAheadCoverage_(suffix, st)
self.setInputCoverage_(glyphs, st)
st.PosCount = 0
st.PosLookupRecord = []
for sequenceIndex, lookupList in enumerate(lookups):
if lookupList is not None:
if not isinstance(lookupList, list):
# Can happen with synthesised lookups
lookupList = [ lookupList ]
for l in lookupList:
st.PosCount += 1
if l.lookup_index is None:
raise FeatureLibError('Missing index of the specified '
'lookup, might be a substitution lookup',
self.location)
rec = otTables.PosLookupRecord()
rec.SequenceIndex = sequenceIndex
rec.LookupListIndex = l.lookup_index
st.PosLookupRecord.append(rec)
return self.buildLookup_(subtables)
def find_chainable_single_pos(self, lookups, glyphs, value):
"""Helper for add_single_pos_chained_()"""
res = None
for lookup in lookups[::-1]:
if lookup == self.SUBTABLE_BREAK_:
return res
if isinstance(lookup, SinglePosBuilder) and \
all(lookup.can_add(glyph, value) for glyph in glyphs):
res = lookup
return res
def add_subtable_break(self, location):
self.rules.append((self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_, [self.SUBTABLE_BREAK_]))
class ChainContextSubstBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GSUB', 6)
self.substitutions = [] # (prefix, input, suffix, lookups)
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.substitutions == other.substitutions)
def build(self):
subtables = []
for (prefix, input, suffix, lookups) in self.substitutions:
if prefix == self.SUBTABLE_BREAK_:
continue
st = otTables.ChainContextSubst()
subtables.append(st)
st.Format = 3
self.setBacktrackCoverage_(prefix, st)
self.setLookAheadCoverage_(suffix, st)
self.setInputCoverage_(input, st)
st.SubstCount = 0
st.SubstLookupRecord = []
for sequenceIndex, lookupList in enumerate(lookups):
if lookupList is not None:
if not isinstance(lookupList, list):
# Can happen with synthesised lookups
lookupList = [ lookupList ]
for l in lookupList:
st.SubstCount += 1
if l.lookup_index is None:
raise FeatureLibError('Missing index of the specified '
'lookup, might be a positioning lookup',
self.location)
rec = otTables.SubstLookupRecord()
rec.SequenceIndex = sequenceIndex
rec.LookupListIndex = l.lookup_index
st.SubstLookupRecord.append(rec)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
result = {}
for (_, _, _, lookuplist) in self.substitutions:
if lookuplist == self.SUBTABLE_BREAK_:
continue
for lookups in lookuplist:
if not isinstance(lookups, list):
lookups = [lookups]
for lookup in lookups:
if lookup is not None:
alts = lookup.getAlternateGlyphs()
for glyph, replacements in alts.items():
result.setdefault(glyph, set()).update(replacements)
return result
def find_chainable_single_subst(self, glyphs):
"""Helper for add_single_subst_chained_()"""
res = None
for _, _, _, substitutions in self.substitutions[::-1]:
if substitutions == self.SUBTABLE_BREAK_:
return res
for sub in substitutions:
if (isinstance(sub, SingleSubstBuilder) and
not any(g in glyphs for g in sub.mapping.keys())):
res = sub
return res
def add_subtable_break(self, location):
self.substitutions.append((self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_))
class LigatureSubstBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GSUB', 4)
self.ligatures = OrderedDict() # {('f','f','i'): 'f_f_i'}
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.ligatures == other.ligatures)
def build(self):
subtables = self.build_subst_subtables(self.ligatures,
otl.buildLigatureSubstSubtable)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.ligatures[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class MultipleSubstBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GSUB', 2)
self.mapping = OrderedDict()
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.mapping == other.mapping)
def build(self):
subtables = self.build_subst_subtables(self.mapping,
otl.buildMultipleSubstSubtable)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class CursivePosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 3)
self.attachments = {}
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.attachments == other.attachments)
def add_attachment(self, location, glyphs, entryAnchor, exitAnchor):
for glyph in glyphs:
self.attachments[glyph] = (entryAnchor, exitAnchor)
def build(self):
st = otl.buildCursivePosSubtable(self.attachments, self.glyphMap)
return self.buildLookup_([st])
class MarkBasePosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 4)
self.marks = {} # glyphName -> (markClassName, anchor)
self.bases = {} # glyphName -> {markClassName: anchor}
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.marks == other.marks and
self.bases == other.bases)
def inferGlyphClasses(self):
result = {glyph: 1 for glyph in self.bases}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
markClasses = self.buildMarkClasses_(self.marks)
marks = {mark: (markClasses[mc], anchor)
for mark, (mc, anchor) in self.marks.items()}
bases = {}
for glyph, anchors in self.bases.items():
bases[glyph] = {markClasses[mc]: anchor
for (mc, anchor) in anchors.items()}
subtables = otl.buildMarkBasePos(marks, bases, self.glyphMap)
return self.buildLookup_(subtables)
class MarkLigPosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 5)
self.marks = {} # glyphName -> (markClassName, anchor)
self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...]
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.marks == other.marks and
self.ligatures == other.ligatures)
def inferGlyphClasses(self):
result = {glyph: 2 for glyph in self.ligatures}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
markClasses = self.buildMarkClasses_(self.marks)
marks = {mark: (markClasses[mc], anchor)
for mark, (mc, anchor) in self.marks.items()}
ligs = {}
for lig, components in self.ligatures.items():
ligs[lig] = []
for c in components:
ligs[lig].append({markClasses[mc]: a for mc, a in c.items()})
subtables = otl.buildMarkLigPos(marks, ligs, self.glyphMap)
return self.buildLookup_(subtables)
class MarkMarkPosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 6)
self.marks = {} # glyphName -> (markClassName, anchor)
self.baseMarks = {} # glyphName -> {markClassName: anchor}
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.marks == other.marks and
self.baseMarks == other.baseMarks)
def inferGlyphClasses(self):
result = {glyph: 3 for glyph in self.baseMarks}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
markClasses = self.buildMarkClasses_(self.marks)
markClassList = sorted(markClasses.keys(), key=markClasses.get)
marks = {mark: (markClasses[mc], anchor)
for mark, (mc, anchor) in self.marks.items()}
st = otTables.MarkMarkPos()
st.Format = 1
st.ClassCount = len(markClasses)
st.Mark1Coverage = otl.buildCoverage(marks, self.glyphMap)
st.Mark2Coverage = otl.buildCoverage(self.baseMarks, self.glyphMap)
st.Mark1Array = otl.buildMarkArray(marks, self.glyphMap)
st.Mark2Array = otTables.Mark2Array()
st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs)
st.Mark2Array.Mark2Record = []
for base in st.Mark2Coverage.glyphs:
anchors = [self.baseMarks[base].get(mc) for mc in markClassList]
st.Mark2Array.Mark2Record.append(otl.buildMark2Record(anchors))
return self.buildLookup_([st])
class ReverseChainSingleSubstBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GSUB', 8)
self.substitutions = [] # (prefix, suffix, mapping)
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.substitutions == other.substitutions)
def build(self):
subtables = []
for prefix, suffix, mapping in self.substitutions:
st = otTables.ReverseChainSingleSubst()
st.Format = 1
self.setBacktrackCoverage_(prefix, st)
self.setLookAheadCoverage_(suffix, st)
st.Coverage = otl.buildCoverage(mapping.keys(), self.glyphMap)
st.GlyphCount = len(mapping)
st.Substitute = [mapping[g] for g in st.Coverage.glyphs]
subtables.append(st)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
# Nothing to do here, each substitution is in its own subtable.
pass
class SingleSubstBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GSUB', 1)
self.mapping = OrderedDict()
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.mapping == other.mapping)
def build(self):
subtables = self.build_subst_subtables(self.mapping,
otl.buildSingleSubstSubtable)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return {glyph: set([repl]) for glyph, repl in self.mapping.items()}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ClassPairPosSubtableBuilder(object):
def __init__(self, builder, valueFormat1, valueFormat2):
self.builder_ = builder
self.classDef1_, self.classDef2_ = None, None
self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2)
self.valueFormat1_, self.valueFormat2_ = valueFormat1, valueFormat2
self.forceSubtableBreak_ = False
self.subtables_ = []
def addPair(self, gc1, value1, gc2, value2):
mergeable = (not self.forceSubtableBreak_ and
self.classDef1_ is not None and
self.classDef1_.canAdd(gc1) and
self.classDef2_ is not None and
self.classDef2_.canAdd(gc2))
if not mergeable:
self.flush_()
self.classDef1_ = otl.ClassDefBuilder(useClass0=True)
self.classDef2_ = otl.ClassDefBuilder(useClass0=False)
self.values_ = {}
self.classDef1_.add(gc1)
self.classDef2_.add(gc2)
self.values_[(gc1, gc2)] = (value1, value2)
def addSubtableBreak(self):
self.forceSubtableBreak_ = True
def subtables(self):
self.flush_()
return self.subtables_
def flush_(self):
if self.classDef1_ is None or self.classDef2_ is None:
return
st = otl.buildPairPosClassesSubtable(self.values_,
self.builder_.glyphMap)
if st.Coverage is None:
return
self.subtables_.append(st)
self.forceSubtableBreak_ = False
class PairPosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 2)
self.pairs = [] # [(gc1, value1, gc2, value2)*]
self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2)
self.locations = {} # (gc1, gc2) --> (filepath, line, column)
def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2):
self.pairs.append((glyphclass1, value1, glyphclass2, value2))
def addGlyphPair(self, location, glyph1, value1, glyph2, value2):
key = (glyph1, glyph2)
oldValue = self.glyphPairs.get(key, None)
if oldValue is not None:
# the Feature File spec explicitly allows specific pairs generated
# by an 'enum' rule to be overridden by preceding single pairs
otherLoc = self.locations[key]
log.debug(
'Already defined position for pair %s %s at %s:%d:%d; '
'choosing the first value',
glyph1, glyph2, otherLoc[0], otherLoc[1], otherLoc[2])
else:
val1, _ = makeOpenTypeValueRecord(value1, pairPosContext=True)
val2, _ = makeOpenTypeValueRecord(value2, pairPosContext=True)
self.glyphPairs[key] = (val1, val2)
self.locations[key] = location
def add_subtable_break(self, location):
self.pairs.append((self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_))
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.glyphPairs == other.glyphPairs and
self.pairs == other.pairs)
def build(self):
builders = {}
builder = None
for glyphclass1, value1, glyphclass2, value2 in self.pairs:
if glyphclass1 is self.SUBTABLE_BREAK_:
if builder is not None:
builder.addSubtableBreak()
continue
val1, valFormat1 = makeOpenTypeValueRecord(
value1, pairPosContext=True)
val2, valFormat2 = makeOpenTypeValueRecord(
value2, pairPosContext=True)
builder = builders.get((valFormat1, valFormat2))
if builder is None:
builder = ClassPairPosSubtableBuilder(
self, valFormat1, valFormat2)
builders[(valFormat1, valFormat2)] = builder
builder.addPair(glyphclass1, val1, glyphclass2, val2)
subtables = []
if self.glyphPairs:
subtables.extend(
otl.buildPairPosGlyphs(self.glyphPairs, self.glyphMap))
for key in sorted(builders.keys()):
subtables.extend(builders[key].subtables())
return self.buildLookup_(subtables)
class SinglePosBuilder(LookupBuilder):
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, 'GPOS', 1)
self.locations = {} # glyph -> (filename, line, column)
self.mapping = {} # glyph -> otTables.ValueRecord
def add_pos(self, location, glyph, valueRecord):
otValueRecord, _ = makeOpenTypeValueRecord(
valueRecord, pairPosContext=False)
if not self.can_add(glyph, otValueRecord):
otherLoc = self.locations[glyph]
raise FeatureLibError(
'Already defined different position for glyph "%s" at %s:%d:%d'
% (glyph, otherLoc[0], otherLoc[1], otherLoc[2]),
location)
if otValueRecord:
self.mapping[glyph] = otValueRecord
self.locations[glyph] = location
def can_add(self, glyph, value):
assert isinstance(value, otl.ValueRecord)
curValue = self.mapping.get(glyph)
return curValue is None or curValue == value
def equals(self, other):
return (LookupBuilder.equals(self, other) and
self.mapping == other.mapping)
def build(self):
subtables = otl.buildSinglePos(self.mapping, self.glyphMap)
return self.buildLookup_(subtables)
| 41.974033 | 83 | 0.597517 |
73eb4e26e5da71474e4b9d58474e2d22b89e338c | 754 | py | Python | appengine/findit/model/test/entity_util_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/model/test/entity_util_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/model/test/entity_util_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from gae_libs.testcase import TestCase
from model import entity_util
class TestModel(ndb.Model):
"""Ndb model to assist unit tests for this class."""
name = ndb.StringProperty()
class ModelUtilTest(TestCase):
def testGetEntityFromUrlsafeKey(self):
self.assertEqual(None, entity_util.GetEntityFromUrlsafeKey(None))
self.assertEqual(None, entity_util.GetEntityFromUrlsafeKey('notvalid'))
model = TestModel(name='name')
model.put()
k = model.key.urlsafe()
self.assertEqual(model, entity_util.GetEntityFromUrlsafeKey(k))
| 29 | 75 | 0.762599 |
73eb5246426fa6ef12cfa493b18fbae112df49e2 | 2,863 | py | Python | pytorch_src/cnn_context_classifier.py | seraphinatarrant/plan-write-revise-demo | b394657b1bd1d6978ea31d6ac5bad6f3a4b6e1da | [
"MIT"
] | 30 | 2019-04-14T03:30:10.000Z | 2021-06-23T18:49:54.000Z | pytorch_src/cnn_context_classifier.py | seraphinatarrant/plan-write-revise-demo | b394657b1bd1d6978ea31d6ac5bad6f3a4b6e1da | [
"MIT"
] | 2 | 2019-08-20T19:00:53.000Z | 2022-03-04T12:16:06.000Z | pytorch_src/cnn_context_classifier.py | seraphinatarrant/plan-write-revise-demo | b394657b1bd1d6978ea31d6ac5bad6f3a4b6e1da | [
"MIT"
] | 5 | 2019-06-26T10:29:01.000Z | 2020-09-16T06:03:13.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
class CNNContextClassifier(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim,
filter_size, dropout_rate, embed_mat=None, fix_embeddings=False):
super(CNNContextClassifier, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
if embed_mat is not None:
self.word_embeds.weight.data = embed_mat
if fix_embeddings:
self.word_embeds.weight.requires_grad=False
self.context_conv = nn.Conv1d(self.embedding_dim, self.embedding_dim,
filter_size, stride=1, padding=int((filter_size-1)/2),
groups=self.embedding_dim) # else groups=1
self.ending_conv = nn.Conv1d(self.embedding_dim, self.embedding_dim,
filter_size, stride=1, padding=int((filter_size-1)/2),
groups=self.embedding_dim) # else groups=1
self.fc = nn.Linear(self.embedding_dim, 1)
self.drop = nn.Dropout(dropout_rate)
def embed_seq(self, vec):
vec1 = self.word_embeds(vec.transpose(0, 1).contiguous())
vec_tr = vec1.transpose(1, 2).contiguous()
return vec_tr # dim [batch_size, embed_dim, length]
# Input dimensions:
# context: Tensor dim [seq_len, batch_size].
# endings: tuple of Tensors -
# (dim [end_seq_len*, batch_size or num_endings] - endings,
# dim [batch_size or num_endings] - batch lengths).
# Training: num_endings = 1; decoding: batch_size = 1.
def forward(self, context, endings, itos=None):
ends = endings[0]
ends_ls = endings[1]
cont_seq_len, batch_size = context.size()
end_seq_len = ends.size()[0]
end = ends.view(end_seq_len, -1)
end_batch_size = end.size()[1]
decode_mode = (batch_size == 1 and end_batch_size > 1)
if not decode_mode:
assert batch_size == end_batch_size
maxpool = nn.MaxPool1d(cont_seq_len) # define layer for context length
context_convol = self.context_conv(self.embed_seq(context))
context_pooled = maxpool(context_convol).view(batch_size, self.embedding_dim)
maxpool_end = nn.MaxPool1d(end_seq_len)
end_conv = F.relu(self.ending_conv(self.embed_seq(end)))
end_pooled = maxpool_end(end_conv).view(end_batch_size, self.embedding_dim)
if decode_mode:
context_pooled = context_pooled.expand(end_batch_size, self.embedding_dim).contiguous()
pooled = context_pooled * end_pooled
dropped = self.drop(pooled)
final = self.fc(dropped).view(-1)
return final
| 38.689189 | 99 | 0.654907 |
73eb5d9f60b3e2efb39a861f0850c7a4cbdfd3b9 | 273 | py | Python | countdown.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
] | null | null | null | countdown.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
] | null | null | null | countdown.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
] | null | null | null | def countdown_for(start=10):
for i in reversed(range(1, start + 1)):
print(i)
print('time is up')
def countdown_recursive(start=10):
if start > 0:
print(start)
countdown_recursive(start-1)
else:
print('time is up')
pass | 21 | 43 | 0.589744 |
73eb66bcf44da862411b6f08b72de56709543835 | 888 | py | Python | Leetcode/Majority Element I/boyer-moore-majority-vote-1.py | vedant-jad99/GeeksForGeeks-DSA-Workshop-Complete-Codes | 35cee8317c05b36864a992789c741554205b3919 | [
"MIT"
] | 1 | 2021-02-11T14:54:34.000Z | 2021-02-11T14:54:34.000Z | Leetcode/Majority Element I/boyer-moore-majority-vote-1.py | vedant-jad99/GeeksForGeeks-DSA-Workshop-Complete-Codes | 35cee8317c05b36864a992789c741554205b3919 | [
"MIT"
] | null | null | null | Leetcode/Majority Element I/boyer-moore-majority-vote-1.py | vedant-jad99/GeeksForGeeks-DSA-Workshop-Complete-Codes | 35cee8317c05b36864a992789c741554205b3919 | [
"MIT"
] | null | null | null | '''
Given an array nums of size n, return the majority element.
The majority element is the element that appears more than ⌊n / 2⌋ times.You may assume that the majority element always exists in the array.
Example:
Input - [2,2,1,1,1,2,2]
Output - 2
Explanation - 2 occurs more than 3 times
Constraints:
Time complexity - O(n)
Space complexity - O(1)
n == nums.length
1 <= n <= 5 * 10^4
-2^31 <= nums[i] <= 2^31 - 1
'''
def majorityElement(nums):
count = 0
m = 0
for i in nums:
if count == 0:
m = i
count += 1
elif m == i:
count += 1
else:
count -= 1
return m
if __name__ == "__main__":
nums = input()
nums = [int(i.strip()) for i in nums[1:-1].split(',')]
print(majorityElement(nums))
| 24 | 141 | 0.513514 |
73ec05a726706dd9c0c3e74d677862b0366cb38b | 997 | py | Python | aamm/__init__.py | rshipp/appassure-mount-manager | 8caace9e023f2d8f2ceaa48153410ece07958fd8 | [
"BSD-3-Clause"
] | null | null | null | aamm/__init__.py | rshipp/appassure-mount-manager | 8caace9e023f2d8f2ceaa48153410ece07958fd8 | [
"BSD-3-Clause"
] | null | null | null | aamm/__init__.py | rshipp/appassure-mount-manager | 8caace9e023f2d8f2ceaa48153410ece07958fd8 | [
"BSD-3-Clause"
] | null | null | null | from pyramid.config import Configurator
from pyramid.renderers import JSON
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_chameleon')
config.add_renderer('prettyjson', JSON(indent=4))
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('api', '/api')
config.add_route('machine_api', '/api/{machine}')
config.add_route('task_api', '/api/tasks/{task_id}')
config.add_route('machine_view', '/{machine}/{machine_name}')
config.add_route('mount_do',
'/{machine}/{machine_name}/{point_id}/{volume_ids}')
config.add_route('dismount_do', '/{machine}/{machine_name}/dismount')
config.add_view('aamm.views.notfound',
renderer='aamm:templates/404.pt',
context='pyramid.exceptions.NotFound')
config.scan()
return config.make_wsgi_app()
| 39.88 | 73 | 0.687061 |
73ec3c2688f32ea255f81d80547bee80fdfb91a2 | 2,985 | py | Python | loaner/web_app/backend/models/bigquery_row_model_test.py | riking/loaner | c131ee3ed543c018a0e039ae437fb8e83a912177 | [
"Apache-2.0"
] | null | null | null | loaner/web_app/backend/models/bigquery_row_model_test.py | riking/loaner | c131ee3ed543c018a0e039ae437fb8e83a912177 | [
"Apache-2.0"
] | null | null | null | loaner/web_app/backend/models/bigquery_row_model_test.py | riking/loaner | c131ee3ed543c018a0e039ae437fb8e83a912177 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.models.bigquery_row_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import mock
from loaner.web_app.backend.models import bigquery_row_model
from loaner.web_app.backend.models import shelf_model
from loaner.web_app.backend.testing import loanertest
class BigQueryRowModelTest(loanertest.TestCase):
"""Tests for BigQueryModel class."""
def setUp(self):
super(BigQueryRowModelTest, self).setUp()
test_shelf = shelf_model.Shelf(
friendly_name='Test', location='Here', capacity=16)
test_shelf.put()
self.test_shelf = test_shelf
mock_bigquery = mock.patch.object(
bigquery_row_model, 'bigquery', autospec=True)
self.addCleanup(mock_bigquery.stop)
self.mock_bigquery = mock_bigquery.start()
self.mock_bigquery_client = mock.Mock()
self.mock_bigquery.BigQueryClient.return_value = self.mock_bigquery_client
def test_add(self):
test_row = bigquery_row_model.BigQueryRow.add(
self.test_shelf, datetime.datetime.utcnow(),
'test@{}'.format(loanertest.USER_DOMAIN),
'test', 'This is a test')
retrieved_row = test_row.key.get()
self.assertEqual(retrieved_row.ndb_key, self.test_shelf.key)
def test_fetch_unstreamed_rows(self):
test_row = bigquery_row_model.BigQueryRow.add(
self.test_shelf, datetime.datetime.utcnow(),
'test@{}'.format(loanertest.USER_DOMAIN),
'test', 'This is a test')
self.assertLen(bigquery_row_model.BigQueryRow.fetch_unstreamed_rows(), 1)
test_row.streamed = True
test_row.put()
self.assertLen(bigquery_row_model.BigQueryRow.fetch_unstreamed_rows(), 0)
def test_stream(self):
test_row = bigquery_row_model.BigQueryRow.add(
self.test_shelf, datetime.datetime.utcnow(),
'test@{}'.format(loanertest.USER_DOMAIN),
'test', 'This is a test')
test_row_dict = test_row.to_json_dict()
expected_bq_row = (
test_row_dict['ndb_key'], test_row_dict['timestamp'],
'test@{}'.format(loanertest.USER_DOMAIN), 'test', 'This is a test',
test_row_dict['entity'])
test_row.stream()
self.mock_bigquery_client.stream_row.assert_called_once_with(
'Shelf', expected_bq_row)
self.assertTrue(test_row.streamed)
if __name__ == '__main__':
loanertest.main()
| 33.920455 | 78 | 0.733333 |
73eca54320938bf572f6faa6555e7aee7bb7a770 | 5,201 | py | Python | server/ldapauth.py | scieloorg/graph-data-experiment | fc108de204dfdfb48a9b32eff79476311eeb2c19 | [
"BSD-2-Clause"
] | 1 | 2019-04-18T18:48:25.000Z | 2019-04-18T18:48:25.000Z | server/ldapauth.py | scieloorg/graph-data-experiment | fc108de204dfdfb48a9b32eff79476311eeb2c19 | [
"BSD-2-Clause"
] | 6 | 2021-05-10T16:10:56.000Z | 2022-02-26T18:18:18.000Z | server/ldapauth.py | scieloorg/graph-data-experiment | fc108de204dfdfb48a9b32eff79476311eeb2c19 | [
"BSD-2-Clause"
] | null | null | null | from contextlib import asynccontextmanager
import re
from urllib.parse import parse_qs, unquote, urlparse
import bonsai
LDAP_DEFAULT_USER_FIELD = "sAMAccountName"
LDAP_DEFAULT_SEARCH_TEMPLATE = "(&(objectClass=person)({user_field}={0}))"
# Avoid bug described in https://github.com/noirello/bonsai/issues/25
bonsai.set_connect_async(False)
def ldap_escape_dn(value):
"""Escape a value in a distinguished name
to perform an LDAP bind (RFC4514)."""
return re.sub(r'[,\\\0#+<>;"=]|^ | $', r"\\\g<0>", value)
def ldap_escape_query(value):
"""Escape a value in an LDAP search query string (RFC4515)."""
return re.sub(r"[*\\\0)(]", r"\\\g<0>", value)
class LDAPError(Exception): pass
class LDAPUserNotFound(LDAPError): pass
class LDAPBindError(LDAPError): pass
class LDAPInvalidCredentials(LDAPBindError): pass
class LDAPInvalidAdminCredentials(LDAPBindError): pass
class LDAPAuth:
"""LDAP connection authenticator.
The DSN string should have this format::
ldaps://<DN>:<PASS>@<HOST>/<SEARCH_DN>?<QUERY>#<SEARCH_TEMPLATE>
Given that query string ``<QUERY>`` is like:
user_field=<USER_FIELD>
Where everything else that looks like XML tags
are fields to be replaced.
The distinguished name ``<DN>`` and password ``<PASS>``
are the ones of an "administrator",
required in order to search a user distinguished name
from its "identity" filled in a ``<USER_FIELD>`` in LDAP.
The ``<HOST>`` is simply the host name for the connection,
which will happen in the port 636, with SSL.
To avoid SSL,
you can use ``ldap'' as the scheme/protocol instead of ``ldaps'',
but that's not recommended.
The default ``<USER_FIELD>`` is ``{LDAP_DEFAULT_USER_FIELD}``.
It's used by the default ``<SEARCH_TEMPLATE>'',
``{LDAP_DEFAULT_SEARCH_TEMPLATE}''.
This template, if customized,
can access any data in the query string.
"""
def __init__(self, dsn):
parsed = urlparse(dsn)
self.url = f"{parsed.scheme}://{parsed.hostname}"
self.search_dn = unquote(parsed.path)[1:] # Strip leading "/"
self.admin_dn = unquote(parsed.username)
self.admin_pass = unquote(parsed.password)
qs = parse_qs(parsed.query, keep_blank_values=True)
self.query_dict = {
"user_field": LDAP_DEFAULT_USER_FIELD,
**{k: v[0] for k, v in qs.items()},
}
self.search_template = unquote(parsed.fragment) \
or LDAP_DEFAULT_SEARCH_TEMPLATE
@asynccontextmanager
async def bind(self, dn, password):
"""Asynchronous context manager for the LDAP BIND operation,
yielding the open connection (``bonsai.Connection`` instance)
or raising an ``LDAPInvalidCredentials``.
"""
client = bonsai.LDAPClient(self.url)
client.set_cert_policy("allow") # TODO: Add certificate
client.set_credentials("SIMPLE", user=dn, password=password)
try:
async with client.connect(is_async=True) as conn:
yield conn
except bonsai.AuthenticationError as exc:
raise LDAPInvalidCredentials from exc
async def get_user_data(self, user, *, attrs=("dn",)):
"""Get the user data in LDAP using the admin credentials.
The output is a ``bonsai.LDAPEntry`` object, whose keys
are ``"dn"`` and all attributes in the ``attrs`` iterable
(set ``attrs=[]`` or ``None`` to get all non-empty attributes).
This method might raise ``LDAPUserNotFound``
or ``LDAPInvalidAdminCredentials``.
"""
try:
async with self.bind(self.admin_dn, self.admin_pass) as conn:
search_result = await conn.search(
self.search_dn,
bonsai.LDAPSearchScope.ONELEVEL,
self.search_template.format(ldap_escape_query(user),
**self.query_dict),
attrlist=None if attrs is None else list(attrs),
)
except LDAPInvalidCredentials as exc:
raise LDAPInvalidAdminCredentials from exc.__cause__
if not search_result:
raise LDAPUserNotFound
return search_result[0]
async def authenticate(self, user, password, **kwargs):
"""Authenticate the user in LDAP returning his/her/its data
as a ``bonsai.LDAPEntry`` object (which inherits from dict).
See the ``get_user_data`` method
for more information about the parameters.
Raises
------
LDAPUserNotFound
The search performed with the administrator account
can't find the user.
LDAPInvalidCredentials
The user was found, but the password is wrong.
LDAPInvalidAdminCredentials
No user search was performed, as the administrator account
DN and/or password is wrong.
"""
user_data = await self.get_user_data(user, **kwargs)
dn = str(user_data["dn"])
async with self.bind(dn, password):
return user_data
LDAPAuth.__doc__ = LDAPAuth.__doc__.format(**globals())
| 37.15 | 74 | 0.6393 |
73ecd779bfd969453218564c26e0f2e52d929575 | 2,860 | py | Python | cassandra_nodetool/tests/conftest.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2020-08-08T02:01:01.000Z | 2020-08-08T02:01:01.000Z | cassandra_nodetool/tests/conftest.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | cassandra_nodetool/tests/conftest.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2019-03-06T14:30:52.000Z | 2019-03-06T14:30:52.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import subprocess
import stat
import os
import logging
import pytest
import time
import common
log = logging.getLogger(__file__)
def wait_on_docker_logs(container_name, max_wait, sentences):
args = [
'docker',
'logs',
container_name
]
log.info("Waiting for {} to come up".format(container_name))
for _ in range(max_wait):
out = subprocess.check_output(args)
if any(s in out for s in sentences):
log.info('{} is up!'.format(container_name))
return True
time.sleep(1)
log.info(out)
return False
def get_container_ip(container_id_or_name):
"""
Get a docker container's IP address from its id or name
"""
args = [
'docker', 'inspect',
'-f', '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}', container_id_or_name
]
return subprocess.check_output(args).strip()
@pytest.fixture(scope="session")
def cassandra_cluster():
"""
Start the cassandra cluster with required configuration
"""
env = os.environ
env['CONTAINER_PORT'] = common.PORT
# We need to restrict permission on the password file
os.chmod(os.path.join(common.HERE, 'compose', 'jmxremote.password'), stat.S_IRUSR)
docker_compose_args = [
"docker-compose",
"-f", os.path.join(common.HERE, 'compose', 'docker-compose.yaml')
]
subprocess.check_call(docker_compose_args + ["up", "-d", common.CASSANDRA_CONTAINER_NAME])
# wait for the cluster to be up before yielding
if not wait_on_docker_logs(
common.CASSANDRA_CONTAINER_NAME,
20,
['Listening for thrift clients', "Created default superuser role 'cassandra'"]
):
raise Exception("Cassandra cluster dd-test-cassandra boot timed out!")
cassandra_seed = get_container_ip("{}".format(common.CASSANDRA_CONTAINER_NAME))
env['CASSANDRA_SEEDS'] = cassandra_seed
subprocess.check_call(docker_compose_args + ["up", "-d", common.CASSANDRA_CONTAINER_NAME_2])
if not wait_on_docker_logs(
common.CASSANDRA_CONTAINER_NAME_2,
50,
['Listening for thrift clients', 'Not starting RPC server as requested']
):
raise Exception("Cassandra cluster {} boot timed out!".format(common.CASSANDRA_CONTAINER_NAME_2))
subprocess.check_call([
"docker",
"exec", common.CASSANDRA_CONTAINER_NAME,
"cqlsh",
"-e", "CREATE KEYSPACE test WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor':2}"
])
yield
subprocess.check_call(docker_compose_args + ["down"])
@pytest.fixture
def aggregator():
from datadog_checks.stubs import aggregator
aggregator.reset()
return aggregator
| 28.888889 | 105 | 0.664685 |
73ecf600501d86d925a898524b8864a671472d55 | 1,056 | py | Python | utils.py | FinchZHU/ultra-thin-PRM | 1cdfd1c521f4420164ea55ff8f940fa6d29eb7ac | [
"MIT"
] | 58 | 2019-07-26T17:40:25.000Z | 2021-08-13T02:59:23.000Z | utils.py | FinchZHU/ultra-thin-PRM | 1cdfd1c521f4420164ea55ff8f940fa6d29eb7ac | [
"MIT"
] | 11 | 2019-07-29T16:10:02.000Z | 2021-06-05T13:52:50.000Z | utils.py | FinchZHU/ultra-thin-PRM | 1cdfd1c521f4420164ea55ff8f940fa6d29eb7ac | [
"MIT"
] | 19 | 2019-07-26T17:39:57.000Z | 2021-08-16T07:36:21.000Z | import base64
# from typing import Tuple, List, Union, Dict, Iterable
import numpy as np
# import torch.nn as nn
def rle_encode(mask: np.ndarray) -> dict:
"""Perform Run-Length Encoding (RLE) on a binary mask.
"""
assert mask.dtype == bool and mask.ndim == 2, 'RLE encoding requires a binary mask (dtype=bool).'
pixels = mask.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return dict(data=base64.b64encode(runs.astype(np.uint32).tobytes()).decode('utf-8'), shape=mask.shape)
def rle_decode(rle: dict) -> np.ndarray:
"""Decode a Run-Length Encoding (RLE).
"""
runs = np.frombuffer(base64.b64decode(rle['data']), np.uint32)
shape = rle['shape']
starts, lengths = [np.asarray(x, dtype=int) for x in (runs[0:][::2], runs[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape)
| 33 | 106 | 0.624053 |
73ed04b840b231e10b628a0d184724b873f5d43f | 851 | py | Python | client/verta/verta/_swagger/_public/modeldb/model/ModeldbUpdateExperimentNameOrDescription.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 835 | 2017-02-08T20:14:24.000Z | 2020-03-12T17:37:49.000Z | client/verta/verta/_swagger/_public/modeldb/model/ModeldbUpdateExperimentNameOrDescription.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 651 | 2019-04-18T12:55:07.000Z | 2022-03-31T23:45:09.000Z | client/verta/verta/_swagger/_public/modeldb/model/ModeldbUpdateExperimentNameOrDescription.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 170 | 2017-02-13T14:49:22.000Z | 2020-02-19T17:59:12.000Z | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbUpdateExperimentNameOrDescription(BaseType):
def __init__(self, id=None, name=None, description=None):
required = {
"id": False,
"name": False,
"description": False,
}
self.id = id
self.name = name
self.description = description
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('id', None)
if tmp is not None:
d['id'] = tmp
tmp = d.get('name', None)
if tmp is not None:
d['name'] = tmp
tmp = d.get('description', None)
if tmp is not None:
d['description'] = tmp
return ModeldbUpdateExperimentNameOrDescription(**d)
| 23.638889 | 62 | 0.618096 |
73ed1c00261ef469147298dfc8130b56df8ae2c4 | 1,090 | py | Python | experiment_process.py | naummo/swarm_maze_opencl_solver | 1047e1293e90f484ccc4ff77cfe61196fb7cbbc6 | [
"MIT"
] | null | null | null | experiment_process.py | naummo/swarm_maze_opencl_solver | 1047e1293e90f484ccc4ff77cfe61196fb7cbbc6 | [
"MIT"
] | null | null | null | experiment_process.py | naummo/swarm_maze_opencl_solver | 1047e1293e90f484ccc4ff77cfe61196fb7cbbc6 | [
"MIT"
] | null | null | null | """
experiment_process.py contains some post-processing functionality.
Not used anymore.
"""
import csv
import os
import numpy as np
import configs as cfg
if not os.path.exists(cfg.reporting_dir):
os.makedirs(cfg.reporting_dir)
# Collisions
results = [0 for i in range(cfg.seconds)]
for filename in os.listdir(cfg.reporting_dir):
if "collisions.csv" in filename:
csvfile = open(os.path.join(cfg.reporting_dir, filename))
csvreader = csv.reader(csvfile, delimiter=',', lineterminator='\n',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in csvreader:
for value in row:
if value != '':
results[int(np.trunc(float(value) / cfg.framespersecond))] += 1
csvfile.close()
print(results)
csvfile = open(os.path.join(".", "collisions_total.csv"), 'w')
csvwriter = csv.writer(csvfile, delimiter=',', lineterminator='\n',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow([(str(val) if val != 0 else "") for val in results])
csvfile.close()
| 32.058824 | 83 | 0.641284 |
73ed4f710a7fb8c83b94a5edd8eddbe280fd00ea | 2,703 | py | Python | src/onecontainer_cloud_tool/utils.py | intel/oneContainer-Cloud-Tool | 843fd4f6aa2e168ac7a7b544e05b92a7495e1121 | [
"BSD-3-Clause"
] | null | null | null | src/onecontainer_cloud_tool/utils.py | intel/oneContainer-Cloud-Tool | 843fd4f6aa2e168ac7a7b544e05b92a7495e1121 | [
"BSD-3-Clause"
] | null | null | null | src/onecontainer_cloud_tool/utils.py | intel/oneContainer-Cloud-Tool | 843fd4f6aa2e168ac7a7b544e05b92a7495e1121 | [
"BSD-3-Clause"
] | 3 | 2021-01-04T10:29:16.000Z | 2022-02-19T00:07:15.000Z | """common utilities."""
import datetime
import math
from pathlib import Path
import uuid
import os
import sys
from Cryptodome.PublicKey import RSA
from onecontainer_cloud_tool.logger import logger
def timestamp():
"""timestamp."""
return math.floor(datetime.datetime.now().timestamp())
def uuid_str():
"""get a uuid string."""
str(uuid.uuid4())
def isfile(file_str):
"""check if file exists."""
return Path(file_str).is_file()
def check_config_exists(config):
"""check if config file exists."""
if not isfile(config.CONFIG_FILE):
raise FileNotFoundError
def remove_config_if_empty(ini_conf, config):
try:
if(len(ini_conf.sections())==0):
os.remove(config.CONFIG_FILE)
return True
except FileNotFoundError:
logger.warning("config file not removed, file not found")
return False
class SSHkeys:
"""generate ephemerial private and public keys."""
def __init__(self):
self.private_key_path = Path(Path.home().resolve() / ".ssh"/ "occt_private_key.pem")
self.private_key = None
self.public_key = None
if isfile(self.private_key_path):
self.read_keys()
else:
self.generate_ssh_keys()
def read_keys(self):
"""read keys from file."""
try:
with open(self.private_key_path, "r") as fh:
self.private_key = fh.read()
self.set_public_key()
except IOError as err:
logger.error(err)
def generate_ssh_keys(self):
"""generate new ssh keys."""
key_pair = RSA.generate(4096)
try:
# write key to current directory the user is in
with open(self.private_key_path, "w") as fh:
private_key = key_pair.export_key().decode()
fh.write(private_key)
logger.debug(f"private_key.pem written to {self.private_key_path}")
self.private_key = private_key
self.set_public_key()
os.chmod(self.private_key_path, 0o600)
except IOError as err:
logger.error(err)
def delete_ssh_keys(self):
"""delete ssh keys used to access resource."""
logger.debug(f"remove ssh keys. in path: {self.private_key_path}")
os.remove(self.private_key_path)
def set_public_key(self):
if self.private_key is not None:
key_pair = RSA.import_key(self.private_key)
self.public_key = (
key_pair.public_key().export_key(format="OpenSSH").decode()
)
else:
logger.error("private key not found, exiting...")
sys.exit(1)
| 28.452632 | 92 | 0.610803 |
73ed7a4f9ecbe2874d723c546ddf57c5209ce152 | 9,726 | py | Python | benchmark/validate_on_lfw.py | jw-pyo/coral_utils | 715b38aacf9563fd45366f46f04f27658680852e | [
"MIT"
] | 2 | 2020-01-24T16:51:40.000Z | 2020-01-24T16:51:57.000Z | benchmark/validate_on_lfw.py | jw-pyo/coral_utils | 715b38aacf9563fd45366f46f04f27658680852e | [
"MIT"
] | null | null | null | benchmark/validate_on_lfw.py | jw-pyo/coral_utils | 715b38aacf9563fd45366f46f04f27658680852e | [
"MIT"
] | null | null | null | """Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/).
Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve
is calculated and plotted. Both the model metagraph and the model parameters need to exist
in the same directory, and the metagraph should have the extension '.meta'.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import tensorflow as tf
import numpy as np
import argparse
#import facenet
#import lfw
import os
import sys
from PIL import Image
import ast
#from tensorflow.python.ops import data_flow_ops
"""
# scipy module cannot be surpported
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
"""
from custom.facenet_tpu import FacenetEngine
from benchmark import lfw
RANDOM_ROTATE = 1
RANDOM_CROP = 2
RANDOM_FLIP = 4
FIXED_STANDARDIZATION = 8
FLIP = 16
def main(args):
# Read the file containing the pairs used for testing
print(args)
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
#image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
#labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
batch_size = args.lfw_batch_size
#control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
#phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
# Load the model
facenet = FacenetEngine(args.model, None)
# Get output tensor
#embedding is the list of embedding vector for training images
#TODO: change the dummy vector into real embedding
#embeddings = [facenet.GetEmbeddingVector(Image.open(path)) for path in paths]
embeddings=None
if args.generate_emb_file:
emb_filename = "emb_{}.txt".format(args.lfw_dir.split("/")[-2])
print("emb_file is {}".format(emb_filename))
emb_file = open(emb_filename, "a")
emb_file.write("[")
for i, path in enumerate(paths):
inf_time,_,emb = facenet.GetEmbeddingVector(Image.open(path))
emb_file.write("[")
for j, elem in enumerate(emb):
if j == len(emb) - 1:
emb_file.write(str(elem))
else:
emb_file.write(str(elem)+", ")
if i == len(paths) - 1:
emb_file.write(str(emb)+"]\n")
else:
emb_file.write(str(emb)+"],\n")
print("time, index: ",str(inf_time)+"ms", str(i))
emb_file.write("]")
emb_file.close()
emb_file = open(emb_filename, "r")
embeddings = ast.literal_eval(emb_file.read())
emb_file.close()
else:
embeddings = []
for i, path in enumerate(paths):
inf_time,_,emb = facenet.GetEmbeddingVector(Image.open(path))
embeddings.append(emb)
print("time, index: ",str(inf_time)+"ms", str(i))
#embeddings = [np.zeros(128, ) for _ in paths]
evaluate(facenet, embeddings, paths, actual_issame, batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean, args.use_flipped_images, args.use_fixed_image_standardization)
def evaluate(engine, embeddings, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization):
"""
#TODO:cannot work Flip mode now
"""
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair
nrof_flips = 2 if use_flipped_images else 1
nrof_images = nrof_embeddings * nrof_flips
labels_array = np.expand_dims(np.arange(0,nrof_images),1)
image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1)
control_array = np.zeros_like(labels_array, np.int64)
if use_fixed_image_standardization:
control_array += np.ones_like(labels_array)*FIXED_STANDARDIZATION
if use_flipped_images:
# Flip every second image
control_array += (labels_array % 2)*FLIP
##sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
embedding_size = int(engine.get_all_output_tensors_sizes()[0]) # 128 or 512
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images, ))
for i in range(nrof_batches):
##feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = np.asarray(embeddings[i*batch_size:(i+1)*batch_size]), np.asarray(labels_array[i*batch_size:(i+1)*batch_size])
lab_array[lab] = lab
emb_array[lab, :] = emb.reshape([emb.shape[0], 1, emb.shape[1]])
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips))
if use_flipped_images:
# Concatenate embeddings for flipped and non flipped version of the images
embeddings[:,:embedding_size] = emb_array[0::2,:]
embeddings[:,embedding_size:] = emb_array[1::2,:]
else:
embeddings = emb_array
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
print("embeddings shape: {}".format(embeddings.shape))
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
"""
Using scipy: cannot be supported
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
"""
def parse_arguments(argv):
parser = argparse.ArgumentParser()
#==================
#
# ==NON-DEFAULT====
#
#==================
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned LFW face patches.')
parser.add_argument('--model', type=str,
help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--generate_emb_file',
help='True if you want to generate embedding file. ex) If you want to validate tflite model, you use this option to dump ev files from tflite model', type=bool)
#==================
#
# =====DEFAULT=====
#
#==================
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='pairs.txt')
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
parser.add_argument('--distance_metric', type=int,
help='Distance metric 0:euclidian, 1:cosine similarity.', default=0)
parser.add_argument('--use_flipped_images',
help='Concatenates embeddings for the image and its horizontally flipped counterpart.', action='store_true')
parser.add_argument('--subtract_mean',
help='Subtract feature mean before calculating distance.', action='store_true')
parser.add_argument('--use_fixed_image_standardization',
help='Performs fixed standardization of images.', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 45.027778 | 193 | 0.687847 |
73ed96950f8b5821bfbbbb2adfda92d15f20964f | 8,600 | py | Python | lib/models/pose_mobilenetv2.py | CHUNYUWANG/imu-human-pose-pytorch | f4813336571789f46eabdfb520e7ed5b20ac04ea | [
"MIT"
] | 72 | 2020-03-26T13:26:39.000Z | 2022-03-16T08:45:34.000Z | lib/models/pose_mobilenetv2.py | zhezh/imu-human-pose-pytorch | f4813336571789f46eabdfb520e7ed5b20ac04ea | [
"MIT"
] | 10 | 2020-04-05T07:17:49.000Z | 2022-03-04T05:32:12.000Z | lib/models/pose_mobilenetv2.py | CHUNYUWANG/imu-human-pose-pytorch | f4813336571789f46eabdfb520e7ed5b20ac04ea | [
"MIT"
] | 13 | 2020-04-12T20:33:38.000Z | 2022-02-17T11:23:13.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import torch
import torch.nn as nn
import math
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True))
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True))
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.identity = stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
nn.Conv2d(
hidden_dim,
hidden_dim,
3,
stride,
1,
groups=hidden_dim,
bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
nn.Conv2d(
hidden_dim,
hidden_dim,
3,
stride,
1,
groups=hidden_dim,
bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.identity:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, width_mult=1.):
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
input_channel = int(32 * width_mult)
layers = [conv_3x3_bn(3, input_channel, 2)]
block = InvertedResidual
for t, c, n, s in self.cfgs:
output_channel = int(c * width_mult)
layers.append(block(input_channel, output_channel, s, t))
input_channel = output_channel
for i in range(1, n):
layers.append(block(input_channel, output_channel, 1, t))
input_channel = output_channel
self.features = nn.Sequential(*layers)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class PoseMobileNetV2(nn.Module):
def __init__(self, cfg, **kwargs):
self.inplanes = 320
self.deconv_with_bias = cfg.POSE_RESNET.DECONV_WITH_BIAS
super(PoseMobileNetV2, self).__init__()
self.mobilenetv2 = MobileNetV2()
self.deconv_layers = self._make_deconv_layer(
cfg.POSE_RESNET.NUM_DECONV_LAYERS,
cfg.POSE_RESNET.NUM_DECONV_FILTERS,
cfg.POSE_RESNET.NUM_DECONV_KERNELS,
)
self.final_layer = nn.Conv2d(
in_channels=cfg.POSE_RESNET.NUM_DECONV_FILTERS[-1],
out_channels=cfg.NETWORK.NUM_JOINTS,
kernel_size=cfg.POSE_RESNET.FINAL_CONV_KERNEL,
stride=1,
padding=1 if cfg.POSE_RESNET.FINAL_CONV_KERNEL == 3 else 0)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.mobilenetv2(x)
x = self.deconv_layers(x)
x_final_feature = x
x = self.final_layer(x)
return x, x_final_feature
def init_weights(self, pretrained=''):
if os.path.isfile(pretrained):
pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
self.load_state_dict(pretrained_state_dict, strict=False)
logger.info('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
else:
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
def get_pose_net(cfg, is_train, **kwargs):
model = PoseMobileNetV2(cfg, **kwargs)
if is_train:
model.init_weights(cfg.NETWORK.PRETRAINED)
return model
| 34.95935 | 78 | 0.535116 |
73ede7b8b5373995fa8018c5efa802caa27e7384 | 87,582 | py | Python | tfoptflow/model_pwcnet.py | mf-zhang/tfoptflow | d27b249fcbae2d1f4373ef4ba16cb0c89890058e | [
"MIT"
] | null | null | null | tfoptflow/model_pwcnet.py | mf-zhang/tfoptflow | d27b249fcbae2d1f4373ef4ba16cb0c89890058e | [
"MIT"
] | null | null | null | tfoptflow/model_pwcnet.py | mf-zhang/tfoptflow | d27b249fcbae2d1f4373ef4ba16cb0c89890058e | [
"MIT"
] | null | null | null | """
model_pwcnet.py
PWC-Net model class.
Written by Phil Ferriere
Licensed under the MIT License (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function
import time
import datetime
import warnings
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import trange
from tensorflow.contrib.mixed_precision import LossScaleOptimizer, FixedLossScaleManager
from model_base import ModelBase
from optflow import flow_write, flow_write_as_png, flow_mag_stats
from losses import pwcnet_loss
from logger import OptFlowTBLogger
from multi_gpus import assign_to_device, average_gradients
from core_warp import dense_image_warp
from core_costvol import cost_volume
from utils import tf_where
_DEBUG_USE_REF_IMPL = False
# Default options
_DEFAULT_PWCNET_TRAIN_OPTIONS = {
'verbose': False,
'ckpt_dir': './ckpts_trained/', # where training checkpoints are stored
'max_to_keep': 10,
'x_dtype': tf.float32, # image pairs input type
'x_shape': [2, 384, 448, 3], # image pairs input shape [2, H, W, 3]
'y_dtype': tf.float32, # u,v flows output type
'y_shape': [384, 448, 2], # u,v flows output shape [H, W, 2]
'train_mode': 'train', # in ['train', 'fine-tune']
'adapt_info': None, # if predicted flows are padded by the model, crop them back by to this size
'sparse_gt_flow': False, # if gt flows are sparse (KITTI), only compute average EPE where gt flows aren't (0., 0.)
# Logging/Snapshot params
'display_step': 100, # show progress every 100 training batches
'snapshot_step': 1000, # save trained model every 1000 training batches
'val_step': 1000, # Test trained model on validation split every 1000 training batches
'val_batch_size': -1, # Use -1 to use entire validation split, or set number of val samples (0 disables it)
# None or in ['top_flow', 'pyramid'|; runs trained model on batch_size random val images, log results
'tb_val_imgs': 'pyramid',
# None or in ['top_flow', 'pyramid'|; runs trained model on batch_size random test images, log results
'tb_test_imgs': None,
# Multi-GPU config
# list devices on which to run the model's train ops (can be more than one GPU)
'gpu_devices': ['/device:GPU:0', '/device:GPU:1'],
# controller device to put the model's variables on (usually, /cpu:0 or /gpu:0 -> try both!)
'controller': '/device:CPU:0',
# Training config and hyper-params
'use_tf_data': True, # Set to True to get data from tf.data.Dataset; otherwise, use feed_dict with numpy
'use_mixed_precision': False, # Set to True to use mixed precision training (fp16 inputs)
'loss_scaler': 128., # Loss scaler (only used in mixed precision training)
'batch_size': 8,
'lr_policy': 'multisteps', # choose between None, 'multisteps', and 'cyclic'; adjust the max_steps below too
# Multistep lr schedule
'init_lr': 1e-04, # initial learning rate
'max_steps': 1200000, # max number of training iterations (i.e., batches to run)
'lr_boundaries': [400000, 600000, 800000, 1000000, 1200000], # step schedule boundaries
'lr_values': [0.0001, 5e-05, 2.5e-05, 1.25e-05, 6.25e-06, 3.125e-06], # step schedule values
# Cyclic lr schedule
'cyclic_lr_max': 5e-04, # max bound, anything higher will generate NaNs on `FlyingChairs+FlyingThings3DHalfRes` mix
'cyclic_lr_base': 1e-05, # min bound
'cyclic_lr_stepsize': 20000, # step schedule values
# 'max_steps': 200000, # max number of training iterations
# Loss functions hyper-params
'loss_fn': 'loss_multiscale', # See 'Implementation details" on page 5 of ref PDF
'alphas': [0.32, 0.08, 0.02, 0.01, 0.005, 0.0025], # See 'Implementation details" on page 5 of ref PDF
'gamma': 0.0004, # See 'Implementation details" on page 5 of ref PDF
'q': 1., # See 'Implementation details" on page 5 of ref PDF
'epsilon': 0., # See 'Implementation details" on page 5 of ref PDF
# Model hyper-params
'pyr_lvls': 6, # number of feature levels in the flow pyramid
'flow_pred_lvl': 2, # which level to upsample to generate the final optical flow prediction
'search_range': 4, # cost volume search range
# if True, use model with dense connections (4705064 params w/o, 9374274 params with (no residual conn.))
'use_dense_cx': False,
# if True, use model with residual connections (4705064 params w/o, 6774064 params with (+2069000) (no dense conn.))
'use_res_cx': False,
}
_DEFAULT_PWCNET_FINETUNE_OPTIONS = {
'verbose': False,
'ckpt_path': './ckpts_trained/pwcnet.ckpt', # original checkpoint to finetune
'ckpt_dir': './ckpts_finetuned/', # where finetuning checkpoints are stored
'max_to_keep': 10,
'x_dtype': tf.float32, # image pairs input type
'x_shape': [2, 384, 768, 3], # image pairs input shape [2, H, W, 3]
'y_dtype': tf.float32, # u,v flows output type
'y_shape': [384, 768, 2], # u,v flows output shape [H, W, 2]
'train_mode': 'fine-tune', # in ['train', 'fine-tune']
'adapt_info': None, # if predicted flows are padded by the model, crop them back by to this size
'sparse_gt_flow': False, # if gt flows are sparse (KITTI), only compute average EPE where gt flows aren't (0., 0.)
# Logging/Snapshot params
'display_step': 100, # show progress every 100 training batches
'snapshot_step': 1000, # save trained model every 1000 training batches
'val_step': 1000, # Test trained model on validation split every 1000 training batches
'val_batch_size': -1, # Use -1 to use entire validation split, or set number of val samples (0 disables it)
'tb_val_imgs': 'top_flow', # None, 'top_flow', or 'pyramid'; runs model on batch_size val images, log results
'tb_test_imgs': None, # None, 'top_flow', or 'pyramid'; runs trained model on batch_size test images, log results
# Multi-GPU config
# list devices on which to run the model's train ops (can be more than one GPU)
'gpu_devices': ['/device:GPU:0', '/device:GPU:1'],
# controller device to put the model's variables on (usually, /cpu:0 or /gpu:0 -> try both!)
'controller': '/device:CPU:0',
# Training config and hyper-params
'use_tf_data': True, # Set to True to get data from tf.data.Dataset; otherwise, use feed_dict with numpy
'use_mixed_precision': False, # Set to True to use mixed precision training (fp16 inputs)
'loss_scaler': 128., # Loss scaler (only used in mixed precision training)
'batch_size': 4,
'lr_policy': 'multisteps', # choose between None, 'multisteps', and 'cyclic'; adjust the max_steps below too
# Multistep lr schedule
'init_lr': 1e-05, # initial learning rate
'max_steps': 500000, # max number of training iterations (i.e., batches to run)
'lr_boundaries': [200000, 300000, 400000, 500000], # step schedule boundaries
'lr_values': [1e-05, 5e-06, 2.5e-06, 1.25e-06, 6.25e-07], # step schedule values
# Cyclic lr schedule
'cyclic_lr_max': 2e-05, # maximum bound
'cyclic_lr_base': 1e-06, # min bound
'cyclic_lr_stepsize': 20000, # step schedule values
# 'max_steps': 200000, # max number of training iterations
# Loss functions hyper-params
'loss_fn': 'loss_robust', # 'loss_robust' doesn't really work; the loss goes down but the EPE doesn't
'alphas': [0.32, 0.08, 0.02, 0.01, 0.005], # See 'Implementation details" on page 5 of ref PDF
'gamma': 0.0004, # See 'Implementation details" on page 5 of ref PDF
'q': 0.4, # See 'Implementation details" on page 5 of ref PDF
'epsilon': 0.01, # See 'Implementation details" on page 5 of ref PDF
# Model hyper-params
'pyr_lvls': 6, # number of feature levels in the flow pyramid
'flow_pred_lvl': 2, # which level to upsample to generate the final optical flow prediction
'search_range': 4, # cost volume search range
# if True, use model with dense connections (4705064 params w/o, 9374274 params with (no residual conn.))
'use_dense_cx': False,
# if True, use model with residual connections (4705064 params w/o, 6774064 params with (+2069000) (no dense conn.))
'use_res_cx': False,
}
_DEFAULT_PWCNET_VAL_OPTIONS = {
'verbose': False,
'ckpt_path': './ckpts_trained/pwcnet.ckpt',
'x_dtype': tf.float32, # image pairs input type
'x_shape': [2, None, None, 3], # image pairs input shape [2, H, W, 3]
'y_dtype': tf.float32, # u,v flows output type
'y_shape': [None, None, 2], # u,v flows output shape [H, W, 2]
'adapt_info': None, # if predicted flows are padded by the model, crop them back by to this size
'sparse_gt_flow': False, # if gt flows are sparse (KITTI), only compute average EPE where gt flows aren't (0., 0.)
# Multi-GPU config
# list devices on which to run the model's train ops (can be more than one GPU)
'gpu_devices': ['/device:GPU:0', '/device:GPU:1'],
# controller device to put the model's variables on (usually, /cpu:0 or /gpu:0 -> try both!)
'controller': '/device:CPU:0',
# Eval config and hyper-params
'batch_size': 1,
'use_tf_data': True, # Set to True to get data from tf.data.Dataset; otherwise, use feed_dict with numpy
'use_mixed_precision': False, # Set to True to use fp16 inputs
# Model hyper-params
'pyr_lvls': 6, # number of feature levels in the flow pyramid
'flow_pred_lvl': 2, # which level to upsample to generate the final optical flow prediction
'search_range': 4, # cost volume search range
# if True, use model with dense connections (4705064 params w/o, 9374274 params with (no residual conn.))
'use_dense_cx': False,
# if True, use model with residual connections (4705064 params w/o, 6774064 params with (+2069000) (no dense conn.))
'use_res_cx': False,
}
_DEFAULT_PWCNET_TEST_OPTIONS = {
'verbose': False,
'ckpt_path': './ckpts_trained/pwcnet.ckpt',
'x_dtype': tf.float32, # image pairs input type
'x_shape': [2, None, None, 3], # image pairs input shape
'y_dtype': tf.float32, # u,v flows output type
'y_shape': [None, None, 2], # u,v flows output shape
# Multi-GPU config
# list devices on which to run the model's train ops (can be more than one GPU)
'gpu_devices': ['/device:GPU:0', '/device:GPU:1'],
# controller device to put the model's variables on (usually, /cpu:0 or /gpu:0 -> try both!)
'controller': '/device:CPU:0',
# Eval config and hyper-params
'batch_size': 1,
'use_tf_data': True, # Set to True to get data from tf.data.Dataset; otherwise, use feed_dict with numpy
'use_mixed_precision': False, # Set to True to use fp16 inputs
# Model hyper-params
'pyr_lvls': 6, # number of feature levels in the flow pyramid
'flow_pred_lvl': 2, # which level to upsample to generate the final optical flow prediction
'search_range': 4, # cost volume search range
# if True, use model with dense connections (4705064 params w/o, 9374274 params with (no residual conn.))
'use_dense_cx': False,
# if True, use model with residual connections (4705064 params w/o, 6774064 params with (+2069000) (no dense conn.))
'use_res_cx': False,
}
# from ref_model import PWCNet
class ModelPWCNet(ModelBase):
def __init__(self, name='pwcnet', mode='train', session=None, options=_DEFAULT_PWCNET_TEST_OPTIONS, dataset=None):
"""Initialize the ModelPWCNet object
Args:
name: Model name
mode: Possible values: 'train', 'val', 'test'
session: optional TF session
options: see _DEFAULT_PWCNET_TRAIN_OPTIONS comments
dataset: Dataset loader
Training Ref:
Per page 4 of paper, section "Training loss," the loss function used in regular training mode is the same as
the one used in Dosovitskiy et al's "FlowNet: Learning optical flow with convolutional networks" paper
(multiscale training loss). For fine-tuning, the loss function used is described at the top of page 5
(robust training loss).
Per page 5 of paper, section "Implementation details," the trade-off weight gamma in the regularization term
is usually set to 0.0004.
Per page 5 of paper, section "Implementation details," we first train the models using the FlyingChairs
dataset using the S<sub>long</sub> learning rate schedule introduced in E. Ilg et al.'s "FlowNet 2.0:
Evolution of optical flow estimation with deep networks", starting from 0.0001 and reducing the learning
rate by half at 0.4M, 0.6M, 0.8M, and 1M iterations. The data augmentation scheme is the same as in that
paper. We crop 448 × 384 patches during data augmentation and use a batch size of 8. We then fine-tune the
models on the FlyingThings3D dataset using the S<sub>fine</sub> schedule while excluding image pairs with
extreme motion (magnitude larger than 1000 pixels). The cropped image size is 768 × 384 and the batch size
is 4. Finally, we finetune the models using the Sintel and KITTI training set as detailed in section "4.1.
Main results".
"""
super().__init__(name, mode, session, options)
self.ds = dataset
# self.adapt_infos = []
# self.unique_y_shapes = []
###
# Model mgmt
###
def build_model(self):
"""Build model
Called by the base class when building the TF graph to setup the list of output tensors
"""
if self.opts['verbose']:
print("Building model...")
assert(self.num_gpus <= 1)
# Build the backbone neural nets and collect the output tensors
with tf.device(self.opts['controller']):
self.flow_pred_tnsr, self.flow_pyr_tnsr = self.nn(self.x_tnsr)
if self.opts['verbose']:
print("... model built.")
def build_model_towers(self):
"""Build model towers. A tower is the name used to describe a copy of the model on a device.
Called by the base class when building the TF graph to setup the list of output tensors
"""
if self.opts['verbose']:
print("Building model towers...")
# Setup a learning rate training schedule
self.setup_lr_sched()
# Instantiate an optimizer
# see https://stackoverflow.com/questions/42064941/tensorflow-float16-support-is-broken
# for float32 epsilon=1e-08, for float16 use epsilon=1e-4
epsilon = 1e-08 if self.opts['use_mixed_precision'] is False else 1e-4
assert (self.opts['train_mode'] in ['train', 'fine-tune'])
if self.opts['loss_fn'] == 'loss_multiscale':
self.optim = tf.train.AdamOptimizer(self.lr, epsilon=epsilon)
else:
self.optim = tf.train.ProximalGradientDescentOptimizer(self.lr)
# Keep track of the gradients and losses per tower
tower_grads, losses, metrics = [], [], []
# Get the current variable scope so we can reuse all variables we need once we get
# to the next iteration of the for loop below
with tf.variable_scope(tf.get_variable_scope()) as outer_scope:
for n, ops_device in enumerate(self.opts['gpu_devices']):
print(f" Building tower_{n}...")
# Use the assign_to_device function to ensure that variables are created on the controller.
with tf.device(assign_to_device(ops_device, self.opts['controller'])), tf.name_scope(f'tower_{n}'):
# Get a slice of the input batch and groundtruth label
x_tnsr = self.x_tnsr[n * self.opts['batch_size']:(n + 1) * self.opts['batch_size'], :]
y_tnsr = self.y_tnsr[n * self.opts['batch_size']:(n + 1) * self.opts['batch_size'], :]
# Build the model for that slice
flow_pred_tnsr, flow_pyr_tnsr = self.nn(x_tnsr)
# The first tower is also the model we will use to perform online evaluation
if n == 0:
self.flow_pred_tnsr, self.flow_pyr_tnsr = flow_pred_tnsr, flow_pyr_tnsr
# Compute the loss for this tower, with regularization term if requested
loss_unreg = pwcnet_loss(y_tnsr, flow_pyr_tnsr, self.opts)
if self.opts['gamma'] == 0.:
loss = loss_unreg
else:
loss_reg = self.opts['gamma'] * \
tf.reduce_sum([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
loss = loss_unreg + loss_reg
# Evaluate model performance on this tower
metrics.append(tf.reduce_mean(tf.norm(y_tnsr - flow_pred_tnsr, ord=2, axis=3)))
# Compute the gradients for this tower, but don't apply them yet
with tf.name_scope("compute_gradients"):
# The function compute_gradients() returns a list of (gradient, variable) pairs
if self.opts['use_mixed_precision'] is True:
grads, vars = zip(*self.optim.compute_gradients(loss * self.opts['loss_scaler']))
# Return the gradients (now float32) to the correct exponent and keep them in check
grads = [grad / self.opts['loss_scaler'] for grad in grads]
grads, _ = tf.clip_by_global_norm(grads, 5.0)
tower_grads.append(zip(grads, vars))
else:
grad_and_vars = self.optim.compute_gradients(loss)
tower_grads.append(grad_and_vars)
losses.append(loss)
# After the first iteration, we want to reuse the variables.
outer_scope.reuse_variables()
print(f" ...tower_{n} built.")
# Apply the gradients on the controlling device
with tf.name_scope("apply_gradients"), tf.device(self.opts['controller']):
# Note that what we are doing here mathematically is equivalent to returning the average loss over the
# towers and compute the gradients relative to that. Unfortunately, this would place all gradient
# computations on one device, which is why we had to compute the gradients above per tower and need to
# average them here. The function average_gradients() takes the list of (gradient, variable) lists
# and turns it into a single (gradient, variables) list.
avg_grads_op = average_gradients(tower_grads)
self.optim_op = self.optim.apply_gradients(avg_grads_op, self.g_step_op)
self.loss_op = tf.reduce_mean(losses)
self.metric_op = tf.reduce_mean(metrics)
if self.opts['verbose']:
print("... model towers built.")
def set_output_tnsrs(self):
"""Initialize output tensors
"""
if self.mode in ['train_noval', 'train_with_val']:
# self.y_hat_train_tnsr = [self.loss_op, self.metric_op, self.optim_op, self.g_step_inc_op]
self.y_hat_train_tnsr = [self.loss_op, self.metric_op, self.optim_op]
if self.mode == 'train_with_val':
# In online evaluation mode, we only care about the average loss and metric for the batch:
self.y_hat_val_tnsr = [self.loss_op, self.metric_op]
if self.mode in ['val', 'val_notrain']:
# In offline evaluation mode, we only care about the individual predictions and metrics:
self.y_hat_val_tnsr = [self.flow_pred_tnsr, self.metric_op]
# if self.opts['sparse_gt_flow'] is True:
# # Find the location of the zerod-out flows in the gt
# zeros_loc = tf.logical_and(tf.equal(self.y_tnsr[:, :, :, 0], 0.0), tf.equal(self.y_tnsr[:, :, :, 1], 0.0))
# zeros_loc = tf.expand_dims(zeros_loc, -1)
#
# # Zero out flow predictions at the same location so we only compute the EPE at the sparse flow points
# sparse_flow_pred_tnsr = tf_where(zeros_loc, tf.zeros_like(self.flow_pred_tnsr), self.flow_pred_tnsr)
#
# self.y_hat_val_tnsr = [sparse_flow_pred_tnsr, self.metric_op]
self.y_hat_test_tnsr = [self.flow_pred_tnsr, self.flow_pyr_tnsr]
###
# Sample mgmt
###
def adapt_x(self, x):
"""Preprocess the input samples to adapt them to the network's requirements
Here, x, is the actual data, not the x TF tensor.
Args:
x: input samples in list[(2,H,W,3)] or (N,2,H,W,3) np array form
Returns:
Samples ready to be given to the network (w. same shape as x)
Also, return adaptation info in (N,2,H,W,3) format
"""
# Ensure we're dealing with RGB image pairs
assert (isinstance(x, np.ndarray) or isinstance(x, list))
if isinstance(x, np.ndarray):
assert (len(x.shape) == 5)
assert (x.shape[1] == 2 and x.shape[4] == 3)
else:
assert (len(x[0].shape) == 4)
assert (x[0].shape[0] == 2 or x[0].shape[3] == 3)
# Bring image range from 0..255 to 0..1 and use floats (also, list[(2,H,W,3)] -> (batch_size,2,H,W,3))
if self.opts['use_mixed_precision'] is True:
x_adapt = np.array(x, dtype=np.float16) if isinstance(x, list) else x.astype(np.float16)
else:
x_adapt = np.array(x, dtype=np.float32) if isinstance(x, list) else x.astype(np.float32)
x_adapt /= 255.
# Make sure the image dimensions are multiples of 2**pyramid_levels, pad them if they're not
_, pad_h = divmod(x_adapt.shape[2], 2**self.opts['pyr_lvls'])
if pad_h != 0:
pad_h = 2 ** self.opts['pyr_lvls'] - pad_h
_, pad_w = divmod(x_adapt.shape[3], 2**self.opts['pyr_lvls'])
if pad_w != 0:
pad_w = 2 ** self.opts['pyr_lvls'] - pad_w
x_adapt_info = None
if pad_h != 0 or pad_w != 0:
padding = [(0, 0), (0, 0), (0, pad_h), (0, pad_w), (0, 0)]
x_adapt_info = x_adapt.shape # Save original shape
x_adapt = np.pad(x_adapt, padding, mode='constant', constant_values=0.)
return x_adapt, x_adapt_info
def adapt_y(self, y):
"""Preprocess the labels to adapt them to the loss computation requirements of the network
Here, y, is the actual data, not the y TF tensor.
Args:
y: labels in list[(H,W,2)] or (N,H,W,2) np array form
Returns:
Labels ready to be used by the network's loss function (w. same shape as y)
Also, return adaptation info in (N,H,W,2) format
"""
# Ensure we're dealing with u,v flows
assert (isinstance(y, np.ndarray) or isinstance(y, list))
if isinstance(y, np.ndarray):
assert (len(y.shape) == 4)
assert (y.shape[3] == 2)
else:
assert (len(y[0].shape) == 3)
assert (y[0].shape[2] == 2)
y_adapt = np.array(y, dtype=np.float32) if isinstance(y, list) else y # list[(H,W,2)] -> (batch_size,H,W,2)
# Make sure the flow dimensions are multiples of 2**pyramid_levels, pad them if they're not
_, pad_h = divmod(y.shape[1], 2**self.opts['pyr_lvls'])
if pad_h != 0:
pad_h = 2 ** self.opts['pyr_lvls'] - pad_h
_, pad_w = divmod(y.shape[2], 2**self.opts['pyr_lvls'])
if pad_w != 0:
pad_w = 2 ** self.opts['pyr_lvls'] - pad_w
y_adapt_info = None
if pad_h != 0 or pad_w != 0:
padding = [(0, 0), (0, pad_h), (0, pad_w), (0, 0)]
y_adapt_info = y_adapt.shape # Save original shape
y_adapt = np.pad(y_adapt, padding, mode='constant', constant_values=0.)
# if y_adapt_info is not None and not y_adapt_info in self.adapt_infos: self.adapt_infos.append(y_adapt_info)
# if not y.shape in self.unique_y_shapes: self.unique_y_shapes.append(y.shape)
return y_adapt, y_adapt_info
def postproc_y_hat_test(self, y_hat, adapt_info=None):
"""Postprocess the results coming from the network during the test mode.
Here, y_hat, is the actual data, not the y_hat TF tensor. Override as necessary.
Args:
y_hat: predictions, see set_output_tnsrs() for details
adapt_info: adaptation information in (N,H,W,2) format
Returns:
Postprocessed labels
"""
assert (isinstance(y_hat, list) and len(y_hat) == 2)
# Have the samples been padded to fit the network's requirements? If so, crop flows back to original size.
pred_flows = y_hat[0]
if adapt_info is not None:
pred_flows = pred_flows[:, 0:adapt_info[1], 0:adapt_info[2], :]
# Individuate flows of the flow pyramid (at this point, they are still batched)
pyramids = y_hat[1]
pred_flows_pyramid = []
for idx in range(len(pred_flows)):
pyramid = []
for lvl in range(self.opts['pyr_lvls'] - self.opts['flow_pred_lvl'] + 1):
pyramid.append(pyramids[lvl][idx])
pred_flows_pyramid.append(pyramid)
return pred_flows, pred_flows_pyramid
def postproc_y_hat_train(self, y_hat, adapt_info=None):
"""Postprocess the results coming from the network during training.
Here, y_hat, is the actual data, not the y_hat TF tensor. Override as necessary.
Args:
y_hat: losses and metrics, see set_output_tnsrs() for details
adapt_info: adaptation information in (N,H,W,2) format
Returns:
Batch loss and metric
"""
assert (isinstance(y_hat, list) and len(y_hat) == 3)
return y_hat[0], y_hat[1]
def postproc_y_hat_val(self, y_hat, adapt_info=None):
"""Postprocess the results coming from the network during validation.
Here, y_hat, is the actual data, not the y_hat TF tensor. Override as necessary.
Args:
y_hat: batch loss and metric, or predicted flows and metrics, see set_output_tnsrs() for details
adapt_info: adaptation information in (N,H,W,2) format
Returns:
Either, batch loss and metric
Or, predicted flows and metrics
"""
if self.mode in ['train_noval', 'train_with_val']:
# In online evaluation mode, we only care about the average loss and metric for the batch:
assert (isinstance(y_hat, list) and len(y_hat) == 2)
return y_hat[0], y_hat[1]
if self.mode in ['val', 'val_notrain']:
# Have the samples been padded to fit the network's requirements? If so, crop flows back to original size.
pred_flows = y_hat[0]
if adapt_info is not None:
pred_flows = pred_flows[:, 0:adapt_info[1], 0:adapt_info[2], :]
return pred_flows, y_hat[1]
###
# Training helpers
###
def setup_loss_ops(self):
"""Setup loss computations. See pwcnet_loss() function for unregularized loss implementation details.
"""
# Setup unregularized loss
loss_unreg = pwcnet_loss(self.y_tnsr, self.flow_pyr_tnsr, self.opts)
# Add regularization term
if self.opts['gamma'] == 0.:
self.loss_op = loss_unreg
else:
loss_reg = self.opts['gamma'] * tf.reduce_sum([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
self.loss_op = loss_unreg + loss_reg
def setup_optim_op(self):
"""Select the Adam optimizer, define the optimization process.
"""
# Instantiate optimizer
# see https://stackoverflow.com/questions/42064941/tensorflow-float16-support-is-broken
# for float32 epsilon=1e-08, for float16 use epsilon=1e-4
epsilon = 1e-08 if self.opts['use_mixed_precision'] is False else 1e-4
if self.opts['loss_fn'] == 'loss_multiscale':
self.optim = tf.train.AdamOptimizer(self.lr, epsilon=epsilon)
else:
self.optim = tf.train.ProximalGradientDescentOptimizer(self.lr)
if self.opts['use_mixed_precision'] is True:
# Choose a loss scale manager which decides how to pick the right loss scale throughout the training process.
loss_scale_mgr = FixedLossScaleManager(self.opts['loss_scaler'])
# Wrap the original optimizer in a LossScaleOptimizer
self.optim = LossScaleOptimizer(self.optim, loss_scale_mgr)
# zmf: deal with NaN
# Let minimize() take care of both computing the gradients and applying them to the model variables
# self.optim_op = self.optim.minimize(self.loss_op, self.g_step_op, tf.trainable_variables())
grads_and_vars = self.optim.compute_gradients(self.loss_op, var_list=tf.trainable_variables())
if tf.is_nan(grads_and_vars[0]) == True:
grads_and_vars_ = [(tf.where(tf.is_nan(grad), tf.zeros_like(grad), grad), val) for grad, val in grads_and_vars]
elif tf.is_nan(grads_and_vars[1]) == True:
grads_and_vars_ = [(tf.where(tf.is_nan(grad), tf.zeros_like(grad), grad), val) for grad, val in grads_and_vars]
else:
grads_and_vars_ = grads_and_vars
self.optim_op = self.optim.apply_gradients(grads_and_vars_, global_step=self.g_step_op, name=None)
else:
# Let minimize() take care of both computing the gradients and applying them to the model variables
# self.optim_op = self.optim.minimize(self.loss_op, self.g_step_op, tf.trainable_variables())
grads_and_vars = self.optim.compute_gradients(self.loss_op, var_list=tf.trainable_variables())
if tf.is_nan(grads_and_vars[0]) == True:
grads_and_vars_ = [(tf.where(tf.is_nan(grad), tf.zeros_like(grad), grad), val) for grad, val in grads_and_vars]
elif tf.is_nan(grads_and_vars[1]) == True:
grads_and_vars_ = [(tf.where(tf.is_nan(grad), tf.zeros_like(grad), grad), val) for grad, val in grads_and_vars]
else:
grads_and_vars_ = grads_and_vars
# fmz
self.optim_op = self.optim.apply_gradients(grads_and_vars_, global_step=self.g_step_op, name=None)
def config_train_ops(self):
"""Configure training ops.
Called by the base class when building the TF graph to setup all the training ops, including:
- setting up loss computations,
- setting up metrics computations,
- creating a learning rate training schedule,
- selecting an optimizer,
- creating lists of output tensors.
"""
assert (self.opts['train_mode'] in ['train', 'fine-tune'])
if self.opts['verbose']:
print("Configuring training ops...")
# Setup loss computations
self.setup_loss_ops()
# Setup metrics computations
self.setup_metrics_ops()
# Setup a learning rate training schedule
self.setup_lr_sched()
# Setup optimizer computations
self.setup_optim_op()
if self.opts['verbose']:
print("... training ops configured.")
def config_loggers(self):
"""Configure train logger and, optionally, val logger. Here add a logger for test images, if requested.
"""
super().config_loggers()
if self.opts['tb_test_imgs'] is True:
self.tb_test = OptFlowTBLogger(self.opts['ckpt_dir'], 'test')
def train(self):
"""Training loop
"""
with self.graph.as_default():
# Reset step counter
if self.opts['train_mode'] == 'fine-tune':
step = 1
self.sess.run(self.g_step_op.assign(0))
if self.opts['verbose']:
print("Start finetuning...")
else:
if self.last_ckpt is not None:
step = self.g_step_op.eval(session=self.sess) + 1
if self.opts['verbose']:
print(f"Resume training from step {step}...")
else:
step = 1
if self.opts['verbose']:
print("Start training from scratch...")
# Get batch sizes
batch_size = self.opts['batch_size']
val_batch_size = self.opts['val_batch_size']
if self.mode == 'train_noval':
warnings.warn("Setting val_batch_size=0 because dataset is in 'train_noval' mode")
val_batch_size = 0
if val_batch_size == -1:
val_batch_size = self.ds.val_size
# Init batch progress trackers
train_loss, train_epe, duration = [], [], []
ranking_value = 0
# Only load Tensorboard validation/test images once
if self.opts['tb_val_imgs'] is not None:
tb_val_loaded = False
if self.opts['tb_test_imgs'] is not None:
tb_test_loaded = False
# Use feed_dict from np or with tf.data.Dataset?
if self.opts['use_tf_data'] is True:
# Create tf.data.Dataset managers
train_tf_ds = self.ds.get_tf_ds(batch_size, self.num_gpus, split='train', sess=self.sess)
val_tf_ds = self.ds.get_tf_ds(batch_size, self.num_gpus, split='val', sess=self.sess)
# Ops for initializing the two different iterators
train_next_batch = train_tf_ds.make_one_shot_iterator().get_next()
val_next_batch = val_tf_ds.make_one_shot_iterator().get_next()
while step < self.opts['max_steps'] + 1:
# Get a batch of samples and make them conform to the network's requirements
# x: [batch_size*num_gpus,2,H,W,3] uint8 y: [batch_size*num_gpus,H,W,2] float32
# x_adapt: [batch_size,2,H,W,3] float32 y_adapt: [batch_size,H,W,2] float32
if self.opts['use_tf_data'] is True:
x, y, _ = self.sess.run(train_next_batch)
# print(x.shape,y.shape,x.mean()) # (8, 2, 256, 448, 3) (8, 256, 448, 2) 135.9569324311756
else:
x, y, _ = self.ds.next_batch(batch_size * self.num_gpus, split='train')
x_adapt, _ = self.adapt_x(x)
y_adapt, _ = self.adapt_y(y)
# print(x_adapt.shape,y_adapt.shape,x_adapt.mean()) # (8, 2, 256, 448, 3) (8, 256, 448, 2) 0.5331643
# Run the samples through the network (loss, error rate, and optim ops (backprop))
feed_dict = {self.x_tnsr: x_adapt, self.y_tnsr: y_adapt}
start_time = time.time()
y_hat = self.sess.run(self.y_hat_train_tnsr, feed_dict=feed_dict)
duration.append(time.time() - start_time)
loss, epe = self.postproc_y_hat_train(y_hat) # y_hat: [107.0802, 5.8556495, None]
# if self.num_gpus == 1: # Single-GPU case
# else: # Multi-CPU case
train_loss.append(loss), train_epe.append(epe)
# Show training progress
if step % self.opts['display_step'] == 0:
# Send results to tensorboard
loss, epe = np.mean(train_loss), np.mean(train_epe)
ranking_value = epe
self.tb_train.log_scalar("losses/loss", loss, step)
self.tb_train.log_scalar("metrics/epe", epe, step)
lr = self.lr.eval(session=self.sess)
self.tb_train.log_scalar("optim/lr", lr, step)
# Print results, if requested
if self.opts['verbose']:
sec_per_step = np.mean(duration)
samples_per_step = batch_size * self.num_gpus
samples_per_sec = samples_per_step / sec_per_step
eta = round((self.opts['max_steps'] - step) * sec_per_step)
ts = time.strftime("%Y-%m-%d %H:%M:%S")
status = f"{ts} Iter {self.g_step_op.eval(session=self.sess)}" \
f" [Train]: loss={loss:.2f}, epe={epe:.2f}, lr={lr:.6f}," \
f" samples/sec={samples_per_sec:.1f}, sec/step={sec_per_step:.3f}," \
f" eta={datetime.timedelta(seconds=eta)}"
print(status)
# Reset batch progress trackers
train_loss, train_epe, duration = [], [], []
# Show progress on validation ds, if requested
if val_batch_size > 0 and step % self.opts['val_step'] == 0:
val_loss, val_epe = [], []
rounds, _ = divmod(val_batch_size, batch_size * self.num_gpus)
for _round in range(rounds):
if self.opts['use_tf_data'] is True:
x, y, _, _ = self.sess.run(val_next_batch)
else:
# Get a batch of val samples and make them conform to the network's requirements
x, y, _ = self.ds.next_batch(batch_size * self.num_gpus, split='val')
# x: [batch_size * self.num_gpus,2,H,W,3] uint8 y: [batch_size,H,W,2] float32
x_adapt, _ = self.adapt_x(x)
y_adapt, _ = self.adapt_y(y)
# x_adapt: [batch_size * self.num_gpus,2,H,W,3] float32 y_adapt: [batch_size,H,W,2] float32
# Run the val samples through the network (loss and error rate ops)
feed_dict = {self.x_tnsr: x_adapt, self.y_tnsr: y_adapt}
y_hat = self.sess.run(self.y_hat_val_tnsr, feed_dict=feed_dict)
loss, epe = self.postproc_y_hat_val(y_hat)
val_loss.append(loss), val_epe.append(epe)
# Send the results to tensorboard
loss, epe = np.mean(val_loss), np.mean(val_epe)
ranking_value = epe
self.tb_val.log_scalar("losses/loss", loss, step)
self.tb_val.log_scalar("metrics/epe", epe, step)
# Print results, if requested
if self.opts['verbose']:
ts = time.strftime("%Y-%m-%d %H:%M:%S")
status = f"{ts} Iter {self.g_step_op.eval(session=self.sess)} [Val]: loss={loss:.2f}, epe={epe:.2f}"
print(status)
# Save a checkpoint every snapshot_step
if step % self.opts['snapshot_step'] == 0 or step == self.opts['max_steps']:
# Log evolution of test images to Tensorboard, if requested
if self.opts['tb_test_imgs'] is not None:
# Get a batch of test samples and make them conform to the network's requirements
if tb_test_loaded is False:
x_tb_test, IDs_tb_test = self.ds.get_samples(
batch_size * self.num_gpus, split='test', simple_IDs=True)
x_tb_test_adapt, _ = self.adapt_x(x_tb_test)
# IDs_tb_test = self.ds.simplify_IDs(x_IDs)
tb_test_loaded = True
# Run the test samples through the network
feed_dict = {self.x_tnsr: x_tb_test_adapt}
y_hat = self.sess.run(self.y_hat_test_tnsr, feed_dict=feed_dict)
pred_flows, pred_flows_pyr = self.postproc_y_hat_test(y_hat)
# Only show batch_size results, no matter what the GPU count is
pred_flows, pred_flows_pyr = pred_flows[0:batch_size], pred_flows_pyr[0:batch_size]
# Send the results to tensorboard
if self.opts['tb_test_imgs'] == 'top_flow':
self.tb_test.log_imgs_w_flows('test/{}_flows', x_tb_test, None, 0, pred_flows,
None, step, IDs_tb_test)
else:
self.tb_test.log_imgs_w_flows('test/{}_flows_pyr', x_tb_test, pred_flows_pyr,
self.opts['pyr_lvls'] - self.opts['flow_pred_lvl'], pred_flows,
None, step, IDs_tb_test)
# Log evolution of val images, if requested
if self.opts['tb_val_imgs'] is not None:
# Get a batch of val samples and make them conform to the network's requirements
if tb_val_loaded is False:
x_tb_val, y_tb_val, IDs_tb_val = self.ds.get_samples(
batch_size * self.num_gpus, split='val', simple_IDs=True)
x_tb_val_adapt, _ = self.adapt_x(x_tb_val)
# IDs_tb_val = self.ds.simplify_IDs(x_IDs)
tb_val_loaded = True
# Run the val samples through the network (top flow and pyramid)
feed_dict = {self.x_tnsr: x_tb_val_adapt}
y_hat = self.sess.run(self.y_hat_test_tnsr, feed_dict=feed_dict)
pred_flows, pred_flows_pyr = self.postproc_y_hat_test(y_hat)
# Only show batch_size results, no matter what the GPU count is
x_tb_val, y_tb_val = x_tb_val[0:batch_size], y_tb_val[0:batch_size]
IDs_tb_val = IDs_tb_val[0:batch_size]
pred_flows, pred_flows_pyr = pred_flows[0:batch_size], pred_flows_pyr[0:batch_size]
# Send the results to tensorboard
if self.opts['tb_val_imgs'] == 'top_flow':
self.tb_val.log_imgs_w_flows('val/{}_flows', x_tb_val, None, 0, pred_flows,
y_tb_val, step, IDs_tb_val)
else:
self.tb_val.log_imgs_w_flows('val/{}_flows_pyr', x_tb_val[0:batch_size], pred_flows_pyr,
self.opts['pyr_lvls'] - self.opts['flow_pred_lvl'], pred_flows,
y_tb_val, step, IDs_tb_val)
# Save model
self.save_ckpt(ranking_value)
step += 1
if self.opts['verbose']:
print("... done training.")
###
# Evaluation helpers
###
def setup_metrics_ops(self):
"""Setup metrics computations. Use the endpoint error metric to track progress.
Note that, if the label flows come back from the network padded, it isn't a fair assessment of the performance
of the model if we also measure the EPE in the padded area. This area is to be cropped out before returning
the predicted flows to the caller, so exclude that area when computing the performance metric.
"""
# Have the samples been padded to the nn's requirements? If so, crop flows back to original size.
y_tnsr, flow_pred_tnsr = self.y_tnsr, self.flow_pred_tnsr
if self.opts['adapt_info'] is not None: # (1,436,1024,2)
y_tnsr = y_tnsr[:, 0:self.opts['adapt_info'][1], 0:self.opts['adapt_info'][2], :]
flow_pred_tnsr = flow_pred_tnsr[:, 0:self.opts['adapt_info'][1], 0:self.opts['adapt_info'][2], :]
if self.opts['sparse_gt_flow'] is True:
# Find the location of the zerod-out flows in the gt
zeros_loc = tf.logical_and(tf.equal(y_tnsr[:, :, :, 0], 0.0), tf.equal(y_tnsr[:, :, :, 1], 0.0))
zeros_loc = tf.expand_dims(zeros_loc, -1)
# Zero out flow predictions at the same location so we only compute the EPE at the sparse flow points
flow_pred_tnsr = tf_where(zeros_loc, tf.zeros_like(flow_pred_tnsr), flow_pred_tnsr)
if self.mode in ['train_noval', 'train_with_val']:
# In online evaluation mode, we only care about the average loss and metric for the batch:
self.metric_op = tf.reduce_mean(tf.norm(y_tnsr - flow_pred_tnsr, ord=2, axis=3))
if self.mode in ['val', 'val_notrain']:
# In offline evaluation mode, we actually care about each individual prediction and metric -> axis=(1, 2)
self.metric_op = tf.reduce_mean(tf.norm(y_tnsr - flow_pred_tnsr, ord=2, axis=3), axis=(1, 2))
def eval(self, metric_name=None, save_preds=False):
"""Evaluation loop. Test the trained model on the validation split of the dataset.
Args:
save_preds: if True, the predictions are saved to disk
Returns:
Aaverage score for the entire dataset, a panda df with individual scores for further error analysis
"""
with self.graph.as_default():
# Use feed_dict from np or with tf.data.Dataset?
batch_size = self.opts['batch_size']
if self.opts['use_tf_data'] is True:
# Create tf.data.Dataset manager
tf_ds = self.ds.get_tf_ds(batch_size=batch_size, split='val', sess=self.sess)
# Ops for initializing the iterator
next_batch = tf_ds.make_one_shot_iterator().get_next()
# Store results in a dataframe
if metric_name is None:
metric_name = 'Score'
df = pd.DataFrame(columns=['ID', metric_name, 'Duration', 'Avg_Flow_Mag', 'Max_Flow_Mag'])
# Chunk dataset
rounds, rounds_left = divmod(self.ds.val_size, batch_size)
if rounds_left:
rounds += 1
# Loop through samples and track their model performance
desc = f'Measuring {metric_name} and saving preds' if save_preds else f'Measuring {metric_name}'
idx = 0
for _round in trange(rounds, ascii=True, ncols=100, desc=desc):
# Fetch and adapt sample
if self.opts['use_tf_data'] is True:
x, y, y_hat_paths, IDs = self.sess.run(next_batch)
y_hat_paths = [y_hat_path.decode() for y_hat_path in y_hat_paths]
IDs = [ID.decode() for ID in IDs]
else:
# Get a batch of samples and make them conform to the network's requirements
x, y, y_hat_paths, IDs = self.ds.next_batch(batch_size, split='val_with_pred_paths')
# x: [batch_size * self.num_gpus,2,H,W,3] uint8 y: [batch_size,H,W,2] float32
x_adapt, _ = self.adapt_x(x)
y_adapt, y_adapt_info = self.adapt_y(y)
# x_adapt: [batch_size * self.num_gpus,2,H,W,3] float32 y_adapt: [batch_size,H,W,2] float32
# Run the sample through the network (metric op)
feed_dict = {self.x_tnsr: x_adapt, self.y_tnsr: y_adapt}
start_time = time.time()
y_hat = self.sess.run(self.y_hat_val_tnsr, feed_dict=feed_dict)
duration = time.time() - start_time
y_hats, metrics = self.postproc_y_hat_val(y_hat, y_adapt_info)
# Save the individual results in df
duration /= batch_size
for y_hat, metric, y_hat_path, ID in zip(y_hats, metrics, y_hat_paths, IDs):
_, flow_mag_avg, flow_mag_max = flow_mag_stats(y_hat)
df.loc[idx] = (ID, metric, duration, flow_mag_avg, flow_mag_max)
if save_preds:
flow_write(y_hat, y_hat_path)
info=f"{metric_name}={metric:.2f}"
flow_write_as_png(y_hat, y_hat_path.replace('.flo', '.png'), info=info)
idx += 1
# Compute stats
avg_metric, avg_duration = df.loc[:, metric_name].mean(), df.loc[:, 'Duration'].mean()
# print(self.unique_y_shapes)
return avg_metric, avg_duration, df
###
# Inference helpers
###
def predict(self, return_preds=False, save_preds=True):
"""Inference loop. Run the trained model on the test split of the dataset.
The data samples are provided by the OpticalFlowDataset object associated with this ModelPWCNet instance.
To predict flows for image pairs not provided by such object, use predict_from_img_pairs() instead.
Args:
return_preds: if True, the predictions are returned to the caller in list([2, H, W, 3]) format.
save_preds: if True, the predictions are saved to disk in .flo and .png format
Returns:
if return_preds is True, the predictions and their IDs are returned (might require a lot of RAM...)
if return_preds is False, return None
"""
with self.graph.as_default():
# Use feed_dict from np or with tf.data.Dataset?
batch_size = self.opts['batch_size']
if self.opts['use_tf_data'] is True:
# Create tf.data.Dataset manager
tf_ds = self.ds.get_tf_ds(batch_size=batch_size, split='test', sess=self.sess)
# Ops for initializing the iterator
next_batch = tf_ds.make_one_shot_iterator().get_next()
# Chunk dataset
rounds, rounds_left = divmod(self.ds.tst_size, batch_size)
if rounds_left:
rounds += 1
# Loop through input samples and run inference on them
if return_preds is True:
preds, ids = [], []
desc = f'Predicting flows and saving preds' if save_preds else f'Predicting flows'
for _round in trange(rounds, ascii=True, ncols=100, desc=desc):
# Fetch and adapt sample
if self.opts['use_tf_data'] is True:
x, y_hat_paths, IDs = self.sess.run(next_batch)
y_hat_paths = [y_hat_path.decode() for y_hat_path in y_hat_paths]
IDs = [ID.decode() for ID in IDs]
else:
# Get a batch of samples and make them conform to the network's requirements
x, y_hat_paths, IDs = self.ds.next_batch(batch_size, split='test_with_pred_paths')
# x: [batch_size,2,H,W,3] uint8; x_adapt: [batch_size,2,H,W,3] float32
x_adapt, x_adapt_info = self.adapt_x(x)
if x_adapt_info is not None:
y_adapt_info = (x_adapt_info[0], x_adapt_info[2], x_adapt_info[3], 2)
else:
y_adapt_info = None
# Run the sample through the network
feed_dict = {self.x_tnsr: x_adapt}
y_hat = self.sess.run(self.y_hat_test_tnsr, feed_dict=feed_dict)
y_hats, _ = self.postproc_y_hat_test(y_hat, y_adapt_info)
# Save the predicted flows to disk, if requested
for y_hat, y_hat_path, ID in zip(y_hats, y_hat_paths, IDs):
if return_preds is True:
preds.append(y_hat)
ids.append(ID)
if save_preds is True:
flow_write(y_hat, y_hat_path)
flow_write_as_png(y_hat, y_hat_path.replace('.flo', '.png'))
if return_preds is True:
return preds[0:self.ds.tst_size], ids[0:self.ds.tst_size]
else:
return None
def predict_from_img_pairs(self, img_pairs, batch_size=1, verbose=False):
"""Inference loop. Run inference on a list of image pairs.
Args:
img_pairs: list of image pairs/tuples in list((img_1, img_2),...,(img_n, img_nplusone)) format.
batch_size: size of the batch to process (all images must have the same dimension, if batch_size>1)
verbose: if True, show progress bar
Returns:
Predicted flows in list format
"""
with self.graph.as_default():
# Chunk image pair list
batch_size = self.opts['batch_size']
test_size = len(img_pairs)
rounds, rounds_left = divmod(test_size, batch_size)
if rounds_left:
rounds += 1
# Loop through input samples and run inference on them
preds, test_ptr = [], 0
rng = trange(rounds, ascii=True, ncols=100, desc='Predicting flows') if verbose else range(rounds)
for _round in rng:
# In batch mode, make sure to wrap around if there aren't enough input samples to process
if test_ptr + batch_size < test_size:
new_ptr = test_ptr + batch_size
indices = list(range(test_ptr, test_ptr + batch_size))
else:
new_ptr = (test_ptr + batch_size) % test_size
indices = list(range(test_ptr, test_size)) + list(range(0, new_ptr))
test_ptr = new_ptr
# Repackage input image pairs as np.ndarray
x = np.array([img_pairs[idx] for idx in indices])
# Make input samples conform to the network's requirements
# x: [batch_size,2,H,W,3] uint8; x_adapt: [batch_size,2,H,W,3] float32
x_adapt, x_adapt_info = self.adapt_x(x)
if x_adapt_info is not None:
y_adapt_info = (x_adapt_info[0], x_adapt_info[2], x_adapt_info[3], 2)
else:
y_adapt_info = None
# Run the adapted samples through the network
feed_dict = {self.x_tnsr: x_adapt}
y_hat = self.sess.run(self.y_hat_test_tnsr, feed_dict=feed_dict)
y_hats, _ = self.postproc_y_hat_test(y_hat, y_adapt_info)
# Return flat list of predicted labels
for y_hat in y_hats:
preds.append(y_hat)
return preds[0:test_size]
###
# PWC-Net pyramid helpers
###
def extract_features(self, x_tnsr, name='featpyr'):
"""Extract pyramid of features
Args:
x_tnsr: Input tensor (input pair of images in [batch_size, 2, H, W, 3] format)
name: Variable scope name
Returns:
c1, c2: Feature pyramids
Ref:
Per page 3 of paper, section "Feature pyramid extractor," given two input images I1 and I2, we generate
L-level pyramids of feature representations, with the bottom (zeroth) level being the input images,
i.e., Ct<sup>0</sup> = It. To generate feature representation at the l-th layer, Ct<sup>l</sup>, we use
layers of convolutional filters to downsample the features at the (l−1)th pyramid level, Ct<sup>l-1</sup>,
by a factor of 2. From the first to the sixth levels, the number of feature channels are respectively
16, 32, 64, 96, 128, and 196. Also see page 15 of paper for a rendering of the network architecture.
Per page 15, individual images of the image pair are encoded using the same Siamese network. Each
convolution is followed by a leaky ReLU unit. The convolutional layer and the x2 downsampling layer at
each level is implemented using a single convolutional layer with a stride of 2.
Note that Figure 4 on page 15 differs from the PyTorch implementation in two ways:
- It's missing a convolution layer at the end of each conv block
- It shows a number of filters of 192 (instead of 196) at the end of the last conv block
Ref PyTorch code:
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
[...]
self.conv1a = conv(3, 16, kernel_size=3, stride=2)
self.conv1aa = conv(16, 16, kernel_size=3, stride=1)
self.conv1b = conv(16, 16, kernel_size=3, stride=1)
self.conv2a = conv(16, 32, kernel_size=3, stride=2)
self.conv2aa = conv(32, 32, kernel_size=3, stride=1)
self.conv2b = conv(32, 32, kernel_size=3, stride=1)
self.conv3a = conv(32, 64, kernel_size=3, stride=2)
self.conv3aa = conv(64, 64, kernel_size=3, stride=1)
self.conv3b = conv(64, 64, kernel_size=3, stride=1)
self.conv4a = conv(64, 96, kernel_size=3, stride=2)
self.conv4aa = conv(96, 96, kernel_size=3, stride=1)
self.conv4b = conv(96, 96, kernel_size=3, stride=1)
self.conv5a = conv(96, 128, kernel_size=3, stride=2)
self.conv5aa = conv(128,128, kernel_size=3, stride=1)
self.conv5b = conv(128,128, kernel_size=3, stride=1)
self.conv6aa = conv(128,196, kernel_size=3, stride=2)
self.conv6a = conv(196,196, kernel_size=3, stride=1)
self.conv6b = conv(196,196, kernel_size=3, stride=1)
[...]
c11 = self.conv1b(self.conv1aa(self.conv1a(im1))) # Higher-res
c21 = self.conv1b(self.conv1aa(self.conv1a(im2)))
c12 = self.conv2b(self.conv2aa(self.conv2a(c11)))
c22 = self.conv2b(self.conv2aa(self.conv2a(c21)))
c13 = self.conv3b(self.conv3aa(self.conv3a(c12)))
c23 = self.conv3b(self.conv3aa(self.conv3a(c22)))
c14 = self.conv4b(self.conv4aa(self.conv4a(c13)))
c24 = self.conv4b(self.conv4aa(self.conv4a(c23)))
c15 = self.conv5b(self.conv5aa(self.conv5a(c14)))
c25 = self.conv5b(self.conv5aa(self.conv5a(c24)))
c16 = self.conv6b(self.conv6a(self.conv6aa(c15)))
c26 = self.conv6b(self.conv6a(self.conv6aa(c25))) # Lower-res
Ref Caffee code:
https://github.com/NVlabs/PWC-Net/blob/438ca897ae77e08f419ddce5f0d7fa63b0a27a77/Caffe/model/train.prototxt#L314-L1141
"""
assert(1 <= self.opts['pyr_lvls'] <= 6)
if self.dbg:
print(f"Building feature pyramids (c11,c21) ... (c1{self.opts['pyr_lvls']},c2{self.opts['pyr_lvls']})")
# Make the feature pyramids 1-based for better readability down the line
num_chann = [None, 16, 32, 64, 96, 128, 196]
c1, c2 = [None], [None]
init = tf.keras.initializers.he_normal()
with tf.variable_scope(name):
for pyr, x, reuse, name in zip([c1, c2], [x_tnsr[:, 0], x_tnsr[:, 1]], [None, True], ['c1', 'c2']):
# pyr = c1 == [None], x = x_tnsr[:, 0] 1st frame [batch_size,H,W,3], reuse = None, name = 'c1'
# pyr = c2 == [None], x = x_tnsr[:, 1] 2nd frame [batch_size,H,W,3], reuse = True, name = 'c2'
for lvl in range(1, self.opts['pyr_lvls'] + 1): # 123456
# tf.layers.conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', ... , name, reuse)
# reuse is set to True because we want to learn a single set of weights for the pyramid
# reuse: Boolean, whether to reuse the weights of a previous layer by the same name.
# kernel_initializer = 'he_normal' or tf.keras.initializers.he_normal(seed=None)
f = num_chann[lvl]
x = tf.layers.conv2d(inputs=x, filters=f, kernel_size=3, strides=2, padding='same', kernel_initializer=init, name=f'conv{lvl}a', reuse=reuse)
x = tf.nn.leaky_relu(features=x, alpha=0.1) # , name=f'relu{lvl+1}a') # default alpha is 0.2 for TF
x = tf.layers.conv2d(inputs=x, filters=f, kernel_size=3, strides=1, padding='same', kernel_initializer=init, name=f'conv{lvl}aa', reuse=reuse)
x = tf.nn.leaky_relu(features=x, alpha=0.1) # , name=f'relu{lvl+1}aa')
x = tf.layers.conv2d(inputs=x, filters=f, kernel_size=3, strides=1, padding='same', kernel_initializer=init, name=f'conv{lvl}b', reuse=reuse)
x = tf.nn.leaky_relu(features=x, alpha=0.1, name=f'{name}{lvl}')
pyr.append(x)
return c1, c2
###
# PWC-Net warping helpers
###
def warp(self, c2, sc_up_flow, lvl, name='warp'):
"""Warp a level of Image1's feature pyramid using the upsampled flow at level+1 of Image2's pyramid.
Args:
c2: The level of the feature pyramid of Image2 to warp
sc_up_flow: Scaled and upsampled estimated optical flow (from Image1 to Image2) used for warping
lvl: Index of that level
name: Op scope name
Ref:
Per page 4 of paper, section "Warping layer," at the l-th level, we warp features of the second image toward
the first image using the x2 upsampled flow from the l+1th level:
C1w<sup>l</sup>(x) = C2<sup>l</sup>(x + Up2(w<sup>l+1</sup>)(x))
where x is the pixel index and the upsampled flow Up2(w<sup>l+1</sup>) is set to be zero at the top level.
We use bilinear interpolation to implement the warping operation and compute the gradients to the input
CNN features and flow for backpropagation according to E. Ilg's FlowNet 2.0 paper.
For non-translational motion, warping can compensate for some geometric distortions and put image patches
at the right scale.
Per page 3 of paper, section "3. Approach," the warping and cost volume layers have no learnable parameters
and, hence, reduce the model size.
Ref PyTorch code:
# warp an image/tensor (im2) back to im1, according to the optical flow
# x: [B, C, H, W] (im2)
# flo: [B, 2, H, W] flow
def warp(self, x, flo):
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# scale grid to [-1,1]
vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:]/max(W-1,1)-1.0
vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:]/max(H-1,1)-1.0
vgrid = vgrid.permute(0,2,3,1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask<0.9999] = 0
mask[mask>0] = 1
return output*mask
[...]
warp5 = self.warp(c25, up_flow6*0.625)
warp4 = self.warp(c24, up_flow5*1.25)
warp3 = self.warp(c23, up_flow4*2.5)
warp2 = self.warp(c22, up_flow3*5.0)
Ref TF documentation:
tf.contrib.image.dense_image_warp(image, flow, name='dense_image_warp')
https://www.tensorflow.org/api_docs/python/tf/contrib/image/dense_image_warp
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/image/python/kernel_tests/dense_image_warp_test.py
Other implementations:
https://github.com/bryanyzhu/deepOF/blob/master/flyingChairsWrapFlow.py
https://github.com/bryanyzhu/deepOF/blob/master/ucf101wrapFlow.py
https://github.com/rajat95/Optical-Flow-Warping-Tensorflow/blob/master/warp.py
"""
op_name = f'{name}{lvl}'
if self.dbg:
msg = f'Adding {op_name} with inputs {c2.op.name} and {sc_up_flow.op.name}'
print(msg)
with tf.name_scope(name):
return dense_image_warp(c2, sc_up_flow, name=op_name)
def deconv(self, x, lvl, name='up_flow'):
"""Upsample, not using a bilinear filter, but rather learn the weights of a conv2d_transpose op filters.
Args:
x: Level features or flow to upsample
lvl: Index of that level
name: Op scope name
Ref PyTorch code:
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
[...]
self.deconv6 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat6 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
...
self.deconv5 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat5 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
...
self.deconv4 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat4 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
...
self.deconv3 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat3 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
...
self.deconv2 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
[...]
up_flow6 = self.deconv6(flow6)
up_feat6 = self.upfeat6(x)
...
up_flow5 = self.deconv5(flow5)
up_feat5 = self.upfeat5(x)
...
up_flow4 = self.deconv4(flow4)
up_feat4 = self.upfeat4(x)
...
up_flow3 = self.deconv3(flow3)
up_feat3 = self.upfeat3(x)
"""
op_name = f'{name}{lvl}'
if self.dbg:
print(f'Adding {op_name} with input {x.op.name}')
with tf.variable_scope('upsample'):
# tf.layers.conv2d_transpose(inputs, filters, kernel_size, strides=(1, 1), padding='valid', ... , name)
return tf.layers.conv2d_transpose(x, 2, 4, 2, 'same', name=op_name)
###
# Cost Volume helpers
###
def corr(self, c1, warp, lvl, name='corr'):
"""Build cost volume for associating a pixel from Image1 with its corresponding pixels in Image2.
Args:
c1: The level of the feature pyramid of Image1
warp: The warped level of the feature pyramid of image22
lvl: Index of that level
name: Op scope name
Ref:
Per page 3 of paper, section "Cost Volume," a cost volume stores the data matching costs for associating
a pixel from Image1 with its corresponding pixels in Image2. Most traditional optical flow techniques build
the full cost volume at a single scale, which is both computationally expensive and memory intensive. By
contrast, PWC-Net constructs a partial cost volume at multiple pyramid levels.
The matching cost is implemented as the correlation between features of the first image and warped features
of the second image:
CV<sup>l</sup>(x1,x2) = (C1<sup>l</sup>(x1))<sup>T</sup> . Cw<sup>l</sup>(x2) / N
where where T is the transpose operator and N is the length of the column vector C1<sup>l</sup>(x1).
For an L-level pyramid, we only need to compute a partial cost volume with a limited search range of d
pixels. A one-pixel motion at the top level corresponds to 2**(L−1) pixels at the full resolution images.
Thus we can set d to be small, e.g. d=4. The dimension of the 3D cost volume is d**2 × Hl × Wl, where Hl
and Wl denote the height and width of the L-th pyramid level, respectively.
Per page 3 of paper, section "3. Approach," the warping and cost volume layers have no learnable parameters
and, hence, reduce the model size.
Per page 5 of paper, section "Implementation details," we use a search range of 4 pixels to compute the
cost volume at each level.
Ref PyTorch code:
from correlation_package.modules.corr import Correlation
self.corr = Correlation(pad_size=md, kernel_size=1, max_displacement=4, stride1=1, stride2=1, corr_multiply=1)
[...]
corr6 = self.corr(c16, c26)
corr6 = self.leakyRELU(corr6)
...
corr5 = self.corr(c15, warp5)
corr5 = self.leakyRELU(corr5)
...
corr4 = self.corr(c14, warp4)
corr4 = self.leakyRELU(corr4)
...
corr3 = self.corr(c13, warp3)
corr3 = self.leakyRELU(corr3)
...
corr2 = self.corr(c12, warp2)
corr2 = self.leakyRELU(corr2)
"""
op_name = f'corr{lvl}'
if self.dbg:
print(f'Adding {op_name} with inputs {c1.op.name} and {warp.op.name}')
with tf.name_scope(name):
return cost_volume(c1, warp, self.opts['search_range'], op_name)
###
# Optical flow estimator helpers
###
def predict_flow(self, corr, c1, up_flow, up_feat, lvl, name='predict_flow'):
"""Estimate optical flow.
Args:
corr: The cost volume at level lvl
c1: The level of the feature pyramid of Image1
up_flow: An upsampled version of the predicted flow from the previous level
up_feat: An upsampled version of the features that were used to generate the flow prediction
lvl: Index of the level
name: Op scope name
Args:
upfeat: The features used to generate the predicted flow
flow: The predicted flow
Ref:
Per page 4 of paper, section "Optical flow estimator," the optical flow estimator is a multi-layer CNN. Its
input are the cost volume, features of the first image, and upsampled optical flow and its output is the
flow w<sup>l</sup> at the l-th level. The numbers of feature channels at each convolutional layers are
respectively 128, 128, 96, 64, and 32, which are kept fixed at all pyramid levels. The estimators at
different levels have their own parameters instead of sharing the same parameters. This estimation process
is repeated until the desired level, l0.
Per page 5 of paper, section "Implementation details," we use a 7-level pyramid and set l0 to be 2, i.e.,
our model outputs a quarter resolution optical flow and uses bilinear interpolation to obtain the
full-resolution optical flow.
The estimator architecture can be enhanced with DenseNet connections. The inputs to every convolutional
layer are the output of and the input to its previous layer. DenseNet has more direct connections than
traditional layers and leads to significant improvement in image classification.
Note that we do not use DenseNet connections in this implementation because a) they increase the size of the
model, and, b) per page 7 of paper, section "Optical flow estimator," removing the DenseNet connections
results in higher training error but lower validation errors when the model is trained on FlyingChairs
(that being said, after the model is fine-tuned on FlyingThings3D, DenseNet leads to lower errors).
Ref PyTorch code:
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
[...]
nd = (2*md+1)**2
dd = np.cumsum([128,128,96,64,32])
od = nd
self.conv6_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv6_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv6_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv6_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv6_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.predict_flow6 = predict_flow(od+dd[4])
[...]
od = nd+128+4
self.conv5_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv5_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv5_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv5_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv5_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.predict_flow5 = predict_flow(od+dd[4])
[...]
od = nd+96+4
self.conv4_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv4_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv4_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv4_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv4_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.predict_flow4 = predict_flow(od+dd[4])
[...]
od = nd+64+4
self.conv3_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv3_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv3_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv3_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv3_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.predict_flow3 = predict_flow(od+dd[4])
[...]
od = nd+32+4
self.conv2_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv2_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv2_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv2_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv2_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.predict_flow2 = predict_flow(od+dd[4])
[...]
self.dc_conv1 = conv(od+dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv7 = predict_flow(32)
[...]
x = torch.cat((self.conv6_0(corr6), corr6),1)
x = torch.cat((self.conv6_1(x), x),1)
x = torch.cat((self.conv6_2(x), x),1)
x = torch.cat((self.conv6_3(x), x),1)
x = torch.cat((self.conv6_4(x), x),1)
flow6 = self.predict_flow6(x)
...
x = torch.cat((corr5, c15, up_flow6, up_feat6), 1)
x = torch.cat((self.conv5_0(x), x),1)
x = torch.cat((self.conv5_1(x), x),1)
x = torch.cat((self.conv5_2(x), x),1)
x = torch.cat((self.conv5_3(x), x),1)
x = torch.cat((self.conv5_4(x), x),1)
flow5 = self.predict_flow5(x)
...
x = torch.cat((corr4, c14, up_flow5, up_feat5), 1)
x = torch.cat((self.conv4_0(x), x),1)
x = torch.cat((self.conv4_1(x), x),1)
x = torch.cat((self.conv4_2(x), x),1)
x = torch.cat((self.conv4_3(x), x),1)
x = torch.cat((self.conv4_4(x), x),1)
flow4 = self.predict_flow4(x)
...
x = torch.cat((corr3, c13, up_flow4, up_feat4), 1)
x = torch.cat((self.conv3_0(x), x),1)
x = torch.cat((self.conv3_1(x), x),1)
x = torch.cat((self.conv3_2(x), x),1)
x = torch.cat((self.conv3_3(x), x),1)
x = torch.cat((self.conv3_4(x), x),1)
flow3 = self.predict_flow3(x)
...
x = torch.cat((corr2, c12, up_flow3, up_feat3), 1)
x = torch.cat((self.conv2_0(x), x),1)
x = torch.cat((self.conv2_1(x), x),1)
x = torch.cat((self.conv2_2(x), x),1)
x = torch.cat((self.conv2_3(x), x),1)
x = torch.cat((self.conv2_4(x), x),1)
flow2 = self.predict_flow2(x)
"""
op_name = f'flow{lvl}'
init = tf.keras.initializers.he_normal()
with tf.variable_scope(name):
if c1 is None and up_flow is None and up_feat is None:
if self.dbg:
print(f'Adding {op_name} with input {corr.op.name}')
x = corr
else:
if self.dbg:
msg = f'Adding {op_name} with inputs {corr.op.name}, {c1.op.name}, {up_flow.op.name}, {up_feat.op.name}'
print(msg)
x = tf.concat([corr, c1, up_flow, up_feat], axis=3)
conv = tf.layers.conv2d(x, 128, 3, 1, 'same', kernel_initializer=init, name=f'conv{lvl}_0')
act = tf.nn.leaky_relu(conv, alpha=0.1) # default alpha is 0.2 for TF
x = tf.concat([act, x], axis=3) if self.opts['use_dense_cx'] else act
conv = tf.layers.conv2d(x, 128, 3, 1, 'same', kernel_initializer=init, name=f'conv{lvl}_1')
act = tf.nn.leaky_relu(conv, alpha=0.1)
x = tf.concat([act, x], axis=3) if self.opts['use_dense_cx'] else act
conv = tf.layers.conv2d(x, 96, 3, 1, 'same', kernel_initializer=init, name=f'conv{lvl}_2')
act = tf.nn.leaky_relu(conv, alpha=0.1)
x = tf.concat([act, x], axis=3) if self.opts['use_dense_cx'] else act
conv = tf.layers.conv2d(x, 64, 3, 1, 'same', kernel_initializer=init, name=f'conv{lvl}_3')
act = tf.nn.leaky_relu(conv, alpha=0.1)
x = tf.concat([act, x], axis=3) if self.opts['use_dense_cx'] else act
conv = tf.layers.conv2d(x, 32, 3, 1, 'same', kernel_initializer=init, name=f'conv{lvl}_4')
act = tf.nn.leaky_relu(conv, alpha=0.1) # will also be used as an input by the context network
upfeat = tf.concat([act, x], axis=3, name=f'upfeat{lvl}') if self.opts['use_dense_cx'] else act
flow = tf.layers.conv2d(upfeat, 2, 3, 1, 'same', name=op_name)
return upfeat, flow
###
# PWC-Net context network helpers
###
def refine_flow(self, feat, flow, lvl, name='ctxt'):
"""Post-ptrocess the estimated optical flow using a "context" nn.
Args:
feat: Features of the second-to-last layer from the optical flow estimator
flow: Estimated flow to refine
lvl: Index of the level
name: Op scope name
Ref:
Per page 4 of paper, section "Context network," traditional flow methods often use contextual information
to post-process the flow. Thus we employ a sub-network, called the context network, to effectively enlarge
the receptive field size of each output unit at the desired pyramid level. It takes the estimated flow and
features of the second last layer from the optical flow estimator and outputs a refined flow.
The context network is a feed-forward CNN and its design is based on dilated convolutions. It consists of
7 convolutional layers. The spatial kernel for each convolutional layer is 3×3. These layers have different
dilation constants. A convolutional layer with a dilation constant k means that an input unit to a filter
in the layer are k-unit apart from the other input units to the filter in the layer, both in vertical and
horizontal directions. Convolutional layers with large dilation constants enlarge the receptive field of
each output unit without incurring a large computational burden. From bottom to top, the dilation constants
are 1, 2, 4, 8, 16, 1, and 1.
Ref PyTorch code:
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
[...]
self.dc_conv1 = conv(od+dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv7 = predict_flow(32)
[...]
x = torch.cat((corr2, c12, up_flow3, up_feat3), 1)
x = torch.cat((self.conv2_0(x), x),1)
x = torch.cat((self.conv2_1(x), x),1)
x = torch.cat((self.conv2_2(x), x),1)
x = torch.cat((self.conv2_3(x), x),1)
x = torch.cat((self.conv2_4(x), x),1)
flow2 = self.predict_flow2(x)
x = self.dc_conv4(self.dc_conv3(self.dc_conv2(self.dc_conv1(x))))
flow2 += self.dc_conv7(self.dc_conv6(self.dc_conv5(x)))
"""
op_name = f'refined_flow{lvl}'
if self.dbg:
print(f'Adding {op_name} sum of dc_convs_chain({feat.op.name}) with {flow.op.name}')
init = tf.keras.initializers.he_normal()
with tf.variable_scope(name):
x = tf.layers.conv2d(feat, 128, 3, 1, 'same', dilation_rate=1, kernel_initializer=init, name=f'dc_conv{lvl}1')
x = tf.nn.leaky_relu(x, alpha=0.1) # default alpha is 0.2 for TF
x = tf.layers.conv2d(x, 128, 3, 1, 'same', dilation_rate=2, kernel_initializer=init, name=f'dc_conv{lvl}2')
x = tf.nn.leaky_relu(x, alpha=0.1)
x = tf.layers.conv2d(x, 128, 3, 1, 'same', dilation_rate=4, kernel_initializer=init, name=f'dc_conv{lvl}3')
x = tf.nn.leaky_relu(x, alpha=0.1)
x = tf.layers.conv2d(x, 96, 3, 1, 'same', dilation_rate=8, kernel_initializer=init, name=f'dc_conv{lvl}4')
x = tf.nn.leaky_relu(x, alpha=0.1)
x = tf.layers.conv2d(x, 64, 3, 1, 'same', dilation_rate=16, kernel_initializer=init, name=f'dc_conv{lvl}5')
x = tf.nn.leaky_relu(x, alpha=0.1)
x = tf.layers.conv2d(x, 32, 3, 1, 'same', dilation_rate=1, kernel_initializer=init, name=f'dc_conv{lvl}6')
x = tf.nn.leaky_relu(x, alpha=0.1)
x = tf.layers.conv2d(x, 2, 3, 1, 'same', dilation_rate=1, kernel_initializer=init, name=f'dc_conv{lvl}7')
return tf.add(flow, x, name=op_name)
###
# PWC-Net nn builder
###
def nn(self, x_tnsr, name='pwcnet'):
"""Defines and connects the backbone neural nets
Args:
inputs: TF placeholder that contains the input frame pairs in [batch_size, 2, H, W, 3] format
name: Name of the nn
Returns:
net: Output tensors of the backbone network
Ref:
RE: the scaling of the upsampled estimated optical flow, per page 5, section "Implementation details," we
do not further scale the supervision signal at each level, the same as the FlowNet paper. As a result, we
need to scale the upsampled flow at each pyramid level for the warping layer. For example, at the second
level, we scale the upsampled flow from the third level by a factor of 5 (=20/4) before warping features
of the second image.
Based on:
- https://github.com/daigo0927/PWC-Net_tf/blob/master/model.py
Written by Daigo Hirooka, Copyright (c) 2018 Daigo Hirooka
MIT License
"""
with tf.variable_scope(name):
# Extract pyramids of CNN features from both input images (1-based lists))
c1, c2 = self.extract_features(x_tnsr)
# print(c1) # [None, (bs, 128, 224, 16), (bs, 64, 112, 32), (bs, 32, 56, 64), (bs, 16, 28, 96), (bs, 8, 14, 128), (bs, 4, 7, 196)]
flow_pyr = []
for lvl in range(self.opts['pyr_lvls'], self.opts['flow_pred_lvl'] - 1, -1): # 6,2-1,-1: 6,5,4,3,2
if lvl == self.opts['pyr_lvls']: # 6, first time in loop
# Compute the cost volume
corr = self.corr(c1[lvl], c2[lvl], lvl)
# Estimate the optical flow
upfeat, flow = self.predict_flow(corr, None, None, None, lvl)
else:
# Warp level of Image1's using the upsampled flow
scaler = 20. / 2**lvl # scaler values are 0.625, 1.25, 2.5, 5.0
warp = self.warp(c2[lvl], up_flow * scaler, lvl)
# Compute the cost volume
corr = self.corr(c1[lvl], warp, lvl)
# Estimate the optical flow
upfeat, flow = self.predict_flow(corr, c1[lvl], up_flow, up_feat, lvl)
_, lvl_height, lvl_width, _ = tf.unstack(tf.shape(c1[lvl]))
if lvl != self.opts['flow_pred_lvl']: # not final iteration
if self.opts['use_res_cx']:
flow = self.refine_flow(upfeat, flow, lvl)
# Upsample predicted flow and the features used to compute predicted flow
flow_pyr.append(flow)
up_flow = self.deconv(flow, lvl, 'up_flow')
up_feat = self.deconv(upfeat, lvl, 'up_feat')
else:
# Refine the final predicted flow
flow = self.refine_flow(upfeat, flow, lvl)
flow_pyr.append(flow)
# Upsample the predicted flow (final output) to match the size of the images
scaler = 2**self.opts['flow_pred_lvl']
if self.dbg:
print(f'Upsampling {flow.op.name} by {scaler} in each dimension.')
size = (lvl_height * scaler, lvl_width * scaler)
flow_pred = tf.image.resize_bilinear(flow, size, name="flow_pred") * scaler
break
return flow_pred, flow_pyr
| 54.096356 | 162 | 0.593638 |
73ee18b3fa5432773dd7b9ae20f4030af799d424 | 140 | py | Python | src/typeDefs/bay.py | nagasudhirpulla/wrldc_codebook | 8fbc795074e16e2012b29ae875b99aa721a7f021 | [
"MIT"
] | null | null | null | src/typeDefs/bay.py | nagasudhirpulla/wrldc_codebook | 8fbc795074e16e2012b29ae875b99aa721a7f021 | [
"MIT"
] | 21 | 2021-01-08T18:03:32.000Z | 2021-02-02T16:17:34.000Z | src/typeDefs/bay.py | nagasudhirpulla/wrldc_codebook | 8fbc795074e16e2012b29ae875b99aa721a7f021 | [
"MIT"
] | null | null | null | from src.typeDefs.element import IElement
class IBay(IElement):
bayNumber: str
stationName: str
bayType: str
voltage: str
| 15.555556 | 41 | 0.707143 |
73ee26638f821fa91e901fa44be61af5dd314396 | 4,649 | py | Python | tests/python/pants_test/backend/jvm/tasks/test_jvm_platform_analysis_integration.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/jvm/tasks/test_jvm_platform_analysis_integration.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/jvm/tasks/test_jvm_platform_analysis_integration.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from contextlib import contextmanager
from textwrap import dedent
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class JvmPlatformAnalysisIntegrationTest(PantsRunIntegrationTest):
"""Make sure jvm-platform-analysis runs properly, especially with respect to caching behavior."""
class JavaSandbox(object):
"""Testing sandbox for making temporary java_library targets."""
def __init__(self, test, workdir, javadir):
self.javadir = javadir
self.workdir = workdir
self.test = test
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
@property
def build_file_path(self):
return os.path.join(self.javadir, 'BUILD')
def write_build_file(self, contents):
with open(self.build_file_path, 'w') as f:
f.write(contents)
def spec(self, name):
return '{}:{}'.format(self.javadir, name)
def clean_all(self):
return self.test.run_pants_with_workdir(['clean-all'], workdir=self.workdir)
def jvm_platform_validate(self, *targets):
return self.test.run_pants_with_workdir(['jvm-platform-validate', '--check=fatal']
+ map(self.spec, targets),
workdir=self.workdir)
@contextmanager
def setup_sandbox(self):
with temporary_dir('.') as sourcedir:
with self.temporary_workdir() as workdir:
javadir = os.path.join(sourcedir, 'src', 'java')
os.makedirs(javadir)
yield self.JavaSandbox(self, workdir, javadir)
@property
def _good_one_two(self):
return dedent("""
java_library(name='one',
platform='1.7',
)
java_library(name='two',
platform='1.8',
)
""")
@property
def _bad_one_two(self):
return dedent("""
java_library(name='one',
platform='1.7',
dependencies=[':two'],
)
java_library(name='two',
platform='1.8',
)
""")
def test_good_targets_works_fresh(self):
with self.setup_sandbox() as sandbox:
sandbox.write_build_file(self._good_one_two)
self.assert_success(sandbox.clean_all())
self.assert_success(sandbox.jvm_platform_validate('one', 'two'))
def test_bad_targets_fails_fresh(self):
with self.setup_sandbox() as sandbox:
sandbox.write_build_file(self._bad_one_two)
self.assert_success(sandbox.clean_all())
self.assert_failure(sandbox.jvm_platform_validate('one', 'two'))
def test_good_then_bad(self):
with self.setup_sandbox() as sandbox:
sandbox.write_build_file(self._good_one_two)
self.assert_success(sandbox.clean_all())
self.assert_success(sandbox.jvm_platform_validate('one', 'two'))
sandbox.write_build_file(self._bad_one_two)
self.assert_failure(sandbox.jvm_platform_validate('one', 'two'))
def test_bad_then_good(self):
with self.setup_sandbox() as sandbox:
sandbox.write_build_file(self._bad_one_two)
self.assert_success(sandbox.clean_all())
self.assert_failure(sandbox.jvm_platform_validate('one', 'two'))
sandbox.write_build_file(self._good_one_two)
self.assert_success(sandbox.jvm_platform_validate('one', 'two'))
def test_good_caching(self):
# Make sure targets are cached after a good run.
with self.setup_sandbox() as sandbox:
sandbox.write_build_file(self._good_one_two)
self.assert_success(sandbox.clean_all())
first_run = sandbox.jvm_platform_validate('one', 'two')
self.assert_success(first_run)
self.assertIn('Invalidated 2 targets', first_run.stdout_data)
second_run = sandbox.jvm_platform_validate('one', 'two')
self.assert_success(second_run)
self.assertNotIn('Invalidated 2 targets', second_run.stdout_data)
def test_bad_caching(self):
# Make sure targets aren't cached after a bad run.
with self.setup_sandbox() as sandbox:
sandbox.write_build_file(self._bad_one_two)
self.assert_success(sandbox.clean_all())
first_run = sandbox.jvm_platform_validate('one', 'two')
self.assert_failure(first_run)
self.assertIn('Invalidated 2 targets', first_run.stdout_data)
second_run = sandbox.jvm_platform_validate('one', 'two')
self.assert_failure(second_run)
self.assertIn('Invalidated 2 targets', second_run.stdout_data)
| 35.48855 | 99 | 0.697139 |
73ee2ef8531de75a1cad0058cfade27b3af3e718 | 96,293 | py | Python | qiskit/circuit/quantumcircuit.py | nkanazawa1989/qiskit-terra | 88267be7228e5d09533e4e2d8bbccfcafa6f2e8c | [
"Apache-2.0"
] | 1 | 2019-06-04T12:23:36.000Z | 2019-06-04T12:23:36.000Z | qiskit/circuit/quantumcircuit.py | nkanazawa1989/qiskit-terra | 88267be7228e5d09533e4e2d8bbccfcafa6f2e8c | [
"Apache-2.0"
] | 35 | 2019-03-07T02:09:22.000Z | 2022-03-22T19:55:15.000Z | qiskit/circuit/quantumcircuit.py | nkanazawa1989/qiskit-terra | 88267be7228e5d09533e4e2d8bbccfcafa6f2e8c | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Quantum circuit object."""
import copy
import itertools
import sys
import warnings
import numbers
import multiprocessing as mp
from collections import OrderedDict, defaultdict
import numpy as np
from qiskit.exceptions import QiskitError
from qiskit.util import is_main_process
from qiskit.circuit.instruction import Instruction
from qiskit.circuit.gate import Gate
from qiskit.qasm.qasm import Qasm
from qiskit.circuit.exceptions import CircuitError
from .parameterexpression import ParameterExpression
from .quantumregister import QuantumRegister, Qubit, AncillaRegister
from .classicalregister import ClassicalRegister, Clbit
from .parametertable import ParameterTable
from .parametervector import ParameterVector
from .instructionset import InstructionSet
from .register import Register
from .bit import Bit
from .quantumcircuitdata import QuantumCircuitData
try:
import pygments
from pygments.formatters import Terminal256Formatter # pylint: disable=no-name-in-module
from qiskit.qasm.pygments import OpenQASMLexer # pylint: disable=ungrouped-imports
from qiskit.qasm.pygments import QasmTerminalStyle # pylint: disable=ungrouped-imports
HAS_PYGMENTS = True
except Exception: # pylint: disable=broad-except
HAS_PYGMENTS = False
class QuantumCircuit:
"""Create a new circuit.
A circuit is a list of instructions bound to some registers.
Args:
regs: list(:class:`Register`) or list(``int``) The registers to be
included in the circuit.
* If a list of :class:`Register` objects, represents the :class:`QuantumRegister`
and/or :class:`ClassicalRegister` objects to include in the circuit.
For example:
* ``QuantumCircuit(QuantumRegister(4))``
* ``QuantumCircuit(QuantumRegister(4), ClassicalRegister(3))``
* ``QuantumCircuit(QuantumRegister(4, 'qr0'), QuantumRegister(2, 'qr1'))``
* If a list of ``int``, the amount of qubits and/or classical bits to include in
the circuit. It can either be a single int for just the number of quantum bits,
or 2 ints for the number of quantum bits and classical bits, respectively.
For example:
* ``QuantumCircuit(4) # A QuantumCircuit with 4 qubits``
* ``QuantumCircuit(4, 3) # A QuantumCircuit with 4 qubits and 3 classical bits``
name (str): the name of the quantum circuit. If not set, an
automatically generated string will be assigned.
global_phase (float): The global phase of the circuit in radians.
Raises:
CircuitError: if the circuit name, if given, is not valid.
Examples:
Construct a simple Bell state circuit.
.. jupyter-execute::
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw()
Construct a 5-qubit GHZ circuit.
.. jupyter-execute::
from qiskit import QuantumCircuit
qc = QuantumCircuit(5)
qc.h(0)
qc.cx(0, range(1, 5))
qc.measure_all()
Construct a 4-qubit Berstein-Vazirani circuit using registers.
.. jupyter-execute::
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
qr = QuantumRegister(3, 'q')
anc = QuantumRegister(1, 'ancilla')
cr = ClassicalRegister(3, 'c')
qc = QuantumCircuit(qr, anc, cr)
qc.x(anc[0])
qc.h(anc[0])
qc.h(qr[0:3])
qc.cx(qr[0:3], anc[0])
qc.h(qr[0:3])
qc.barrier(qr)
qc.measure(qr, cr)
qc.draw()
"""
instances = 0
prefix = 'circuit'
# Class variable OPENQASM header
header = "OPENQASM 2.0;"
extension_lib = "include \"qelib1.inc\";"
def __init__(self, *regs, name=None, global_phase=0):
if any([not isinstance(reg, (QuantumRegister, ClassicalRegister)) for reg in regs]):
try:
regs = tuple(int(reg) for reg in regs)
except Exception:
raise CircuitError("Circuit args must be Registers or be castable to an int" +
"(%s '%s' was provided)"
% ([type(reg).__name__ for reg in regs], regs))
if name is None:
name = self.cls_prefix() + str(self.cls_instances())
if sys.platform != "win32" and not is_main_process():
name += '-{}'.format(mp.current_process().pid)
self._increment_instances()
if not isinstance(name, str):
raise CircuitError("The circuit name should be a string "
"(or None to auto-generate a name).")
self.name = name
# Data contains a list of instructions and their contexts,
# in the order they were applied.
self._data = []
# This is a map of registers bound to this circuit, by name.
self.qregs = []
self.cregs = []
self._qubits = []
self._clbits = []
self._ancillas = []
self._calibrations = defaultdict(dict)
self.add_register(*regs)
# Parameter table tracks instructions with variable parameters.
self._parameter_table = ParameterTable()
self._layout = None
self._global_phase = 0
self.global_phase = global_phase
@property
def data(self):
"""Return the circuit data (instructions and context).
Returns:
QuantumCircuitData: a list-like object containing the tuples for the circuit's data.
Each tuple is in the format ``(instruction, qargs, cargs)``, where instruction is an
Instruction (or subclass) object, qargs is a list of Qubit objects, and cargs is a
list of Clbit objects.
"""
return QuantumCircuitData(self)
@property
def calibrations(self):
"""Return calibration dictionary.
The custom pulse definition of a given gate is of the form
{'gate_name': {(qubits, params): schedule}}
"""
return dict(self._calibrations)
@data.setter
def data(self, data_input):
"""Sets the circuit data from a list of instructions and context.
Args:
data_input (list): A list of instructions with context
in the format (instruction, qargs, cargs), where Instruction
is an Instruction (or subclass) object, qargs is a list of
Qubit objects, and cargs is a list of Clbit objects.
"""
# If data_input is QuantumCircuitData(self), clearing self._data
# below will also empty data_input, so make a shallow copy first.
data_input = data_input.copy()
self._data = []
self._parameter_table = ParameterTable()
for inst, qargs, cargs in data_input:
self.append(inst, qargs, cargs)
def __str__(self):
return str(self.draw(output='text'))
def __eq__(self, other):
if not isinstance(other, QuantumCircuit):
return False
# TODO: remove the DAG from this function
from qiskit.converters import circuit_to_dag
return circuit_to_dag(self) == circuit_to_dag(other)
@classmethod
def _increment_instances(cls):
cls.instances += 1
@classmethod
def cls_instances(cls):
"""Return the current number of instances of this class,
useful for auto naming."""
return cls.instances
@classmethod
def cls_prefix(cls):
"""Return the prefix to use for auto naming."""
return cls.prefix
def has_register(self, register):
"""
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
"""
has_reg = False
if (isinstance(register, QuantumRegister) and
register in self.qregs):
has_reg = True
elif (isinstance(register, ClassicalRegister) and
register in self.cregs):
has_reg = True
return has_reg
def mirror(self):
"""DEPRECATED: use circuit.reverse_ops().
Returns:
QuantumCircuit: the reversed circuit.
"""
warnings.warn('circuit.mirror() is deprecated. Use circuit.reverse_ops() to '
'reverse the order of gates.', DeprecationWarning)
return self.reverse_ops()
def reverse_ops(self):
"""Reverse the circuit by reversing the order of instructions.
This is done by recursively reversing all instructions.
It does not invert (adjoint) any gate.
Returns:
QuantumCircuit: the reversed circuit.
Examples:
input:
┌───┐
q_0: ┤ H ├─────■──────
└───┘┌────┴─────┐
q_1: ─────┤ RX(1.57) ├
└──────────┘
output:
┌───┐
q_0: ─────■──────┤ H ├
┌────┴─────┐└───┘
q_1: ┤ RX(1.57) ├─────
└──────────┘
"""
reverse_circ = QuantumCircuit(*self.qregs, *self.cregs,
name=self.name + '_reverse')
for inst, qargs, cargs in reversed(self.data):
reverse_circ._append(inst.reverse_ops(), qargs, cargs)
return reverse_circ
def reverse_bits(self):
"""Return a circuit with the opposite order of wires.
The circuit is "vertically" flipped. If a circuit is
defined over multiple registers, the resulting circuit will have
the same registers but with their order flipped.
This method is useful for converting a circuit written in little-endian
convention to the big-endian equivalent, and vice versa.
Returns:
QuantumCircuit: the circuit with reversed bit order.
Examples:
input:
┌───┐
q_0: ┤ H ├─────■──────
└───┘┌────┴─────┐
q_1: ─────┤ RX(1.57) ├
└──────────┘
output:
┌──────────┐
q_0: ─────┤ RX(1.57) ├
┌───┐└────┬─────┘
q_1: ┤ H ├─────■──────
└───┘
"""
circ = QuantumCircuit(*reversed(self.qregs), *reversed(self.cregs),
name=self.name)
num_qubits = self.num_qubits
num_clbits = self.num_clbits
old_qubits = self.qubits
old_clbits = self.clbits
new_qubits = circ.qubits
new_clbits = circ.clbits
for inst, qargs, cargs in self.data:
new_qargs = [new_qubits[num_qubits - old_qubits.index(q) - 1] for q in qargs]
new_cargs = [new_clbits[num_clbits - old_clbits.index(c) - 1] for c in cargs]
circ._append(inst, new_qargs, new_cargs)
return circ
def inverse(self):
"""Invert (take adjoint of) this circuit.
This is done by recursively inverting all gates.
Returns:
QuantumCircuit: the inverted circuit
Raises:
CircuitError: if the circuit cannot be inverted.
Examples:
input:
┌───┐
q_0: ┤ H ├─────■──────
└───┘┌────┴─────┐
q_1: ─────┤ RX(1.57) ├
└──────────┘
output:
┌───┐
q_0: ──────■──────┤ H ├
┌─────┴─────┐└───┘
q_1: ┤ RX(-1.57) ├─────
└───────────┘
"""
inverse_circ = QuantumCircuit(*self.qregs, *self.cregs,
name=self.name + '_dg', global_phase=-self.global_phase)
for inst, qargs, cargs in reversed(self._data):
inverse_circ._append(inst.inverse(), qargs, cargs)
return inverse_circ
def repeat(self, reps):
"""Repeat this circuit ``reps`` times.
Args:
reps (int): How often this circuit should be repeated.
Returns:
QuantumCircuit: A circuit containing ``reps`` repetitions of this circuit.
"""
repeated_circ = QuantumCircuit(*self.qregs, *self.cregs,
name=self.name + '**{}'.format(reps),
global_phase=reps * self.global_phase)
# benefit of appending instructions: decomposing shows the subparts, i.e. the power
# is actually `reps` times this circuit, and it is currently much faster than `compose`.
if reps > 0:
try: # try to append as gate if possible to not disallow to_gate
inst = self.to_gate()
except QiskitError:
inst = self.to_instruction()
for _ in range(reps):
repeated_circ._append(inst, self.qubits, self.clbits)
return repeated_circ
def power(self, power, matrix_power=False):
"""Raise this circuit to the power of ``power``.
If ``power`` is a positive integer and ``matrix_power`` is ``False``, this implementation
defaults to calling ``repeat``. Otherwise, if the circuit is unitary, the matrix is
computed to calculate the matrix power.
Args:
power (int): The power to raise this circuit to.
matrix_power (bool): If True, the circuit is converted to a matrix and then the
matrix power is computed. If False, and ``power`` is a positive integer,
the implementation defaults to ``repeat``.
Raises:
CircuitError: If the circuit needs to be converted to a gate but it is not unitary.
Returns:
QuantumCircuit: A circuit implementing this circuit raised to the power of ``power``.
"""
if power >= 0 and isinstance(power, numbers.Integral) and not matrix_power:
return self.repeat(power)
# attempt conversion to gate
if len(self.parameters) > 0:
raise CircuitError('Cannot raise a parameterized circuit to a non-positive power '
'or matrix-power, please bind the free parameters: '
'{}'.format(self.parameters))
try:
gate = self.to_gate()
except QiskitError:
raise CircuitError('The circuit contains non-unitary operations and cannot be '
'controlled. Note that no qiskit.circuit.Instruction objects may '
'be in the circuit for this operation.')
power_circuit = QuantumCircuit(*self.qregs, *self.cregs)
power_circuit.append(gate.power(power), list(range(gate.num_qubits)))
return power_circuit
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Control this circuit on ``num_ctrl_qubits`` qubits.
Args:
num_ctrl_qubits (int): The number of control qubits.
label (str): An optional label to give the controlled operation for visualization.
ctrl_state (str or int): The control state in decimal or as a bitstring
(e.g. '111'). If None, use ``2**num_ctrl_qubits - 1``.
Returns:
QuantumCircuit: The controlled version of this circuit.
Raises:
CircuitError: If the circuit contains a non-unitary operation and cannot be controlled.
"""
try:
gate = self.to_gate()
except QiskitError:
raise CircuitError('The circuit contains non-unitary operations and cannot be '
'controlled. Note that no qiskit.circuit.Instruction objects may '
'be in the circuit for this operation.')
controlled_gate = gate.control(num_ctrl_qubits, label, ctrl_state)
control_qreg = QuantumRegister(num_ctrl_qubits)
controlled_circ = QuantumCircuit(control_qreg, *self.qregs,
name='c_{}'.format(self.name))
controlled_circ.append(controlled_gate, controlled_circ.qubits)
return controlled_circ
def combine(self, rhs):
"""Append rhs to self if self contains compatible registers.
Two circuits are compatible if they contain the same registers
or if they contain different registers with unique names. The
returned circuit will contain all unique registers between both
circuits.
Return self + rhs as a new object.
Args:
rhs (QuantumCircuit): The quantum circuit to append to the right hand side.
Returns:
QuantumCircuit: Returns a new QuantumCircuit object
Raises:
QiskitError: if the rhs circuit is not compatible
"""
# Check registers in LHS are compatible with RHS
self._check_compatible_regs(rhs)
# Make new circuit with combined registers
combined_qregs = copy.deepcopy(self.qregs)
combined_cregs = copy.deepcopy(self.cregs)
for element in rhs.qregs:
if element not in self.qregs:
combined_qregs.append(element)
for element in rhs.cregs:
if element not in self.cregs:
combined_cregs.append(element)
circuit = QuantumCircuit(*combined_qregs, *combined_cregs)
for instruction_context in itertools.chain(self.data, rhs.data):
circuit._append(*instruction_context)
circuit.global_phase = self.global_phase + rhs.global_phase
return circuit
def extend(self, rhs):
"""Append QuantumCircuit to the right hand side if it contains compatible registers.
Two circuits are compatible if they contain the same registers
or if they contain different registers with unique names. The
returned circuit will contain all unique registers between both
circuits.
Modify and return self.
Args:
rhs (QuantumCircuit): The quantum circuit to append to the right hand side.
Returns:
QuantumCircuit: Returns this QuantumCircuit object (which has been modified)
Raises:
QiskitError: if the rhs circuit is not compatible
"""
# Check registers in LHS are compatible with RHS
self._check_compatible_regs(rhs)
# Add new registers
for element in rhs.qregs:
if element not in self.qregs:
self.qregs.append(element)
for element in rhs.cregs:
if element not in self.cregs:
self.cregs.append(element)
# Copy the circuit data if rhs and self are the same, otherwise the data of rhs is
# appended to both self and rhs resulting in an infinite loop
data = rhs.data.copy() if rhs is self else rhs.data
# Add new gates
for instruction_context in data:
self._append(*instruction_context)
self.global_phase += rhs.global_phase
return self
def compose(self, other, qubits=None, clbits=None, front=False, inplace=False):
"""Compose circuit with ``other`` circuit or instruction, optionally permuting wires.
``other`` can be narrower or of equal width to ``self``.
Args:
other (qiskit.circuit.Instruction or QuantumCircuit or BaseOperator):
(sub)circuit to compose onto self.
qubits (list[Qubit|int]): qubits of self to compose onto.
clbits (list[Clbit|int]): clbits of self to compose onto.
front (bool): If True, front composition will be performed (not implemented yet).
inplace (bool): If True, modify the object. Otherwise return composed circuit.
Returns:
QuantumCircuit: the composed circuit (returns None if inplace==True).
Raises:
CircuitError: if composing on the front.
QiskitError: if ``other`` is wider or there are duplicate edge mappings.
Examples:
>>> lhs.compose(rhs, qubits=[3, 2], inplace=True)
.. parsed-literal::
┌───┐ ┌─────┐ ┌───┐
lqr_1_0: ───┤ H ├─── rqr_0: ──■──┤ Tdg ├ lqr_1_0: ───┤ H ├───────────────
├───┤ ┌─┴─┐└─────┘ ├───┤
lqr_1_1: ───┤ X ├─── rqr_1: ┤ X ├─────── lqr_1_1: ───┤ X ├───────────────
┌──┴───┴──┐ └───┘ ┌──┴───┴──┐┌───┐
lqr_1_2: ┤ U1(0.1) ├ + = lqr_1_2: ┤ U1(0.1) ├┤ X ├───────
└─────────┘ └─────────┘└─┬─┘┌─────┐
lqr_2_0: ─────■───── lqr_2_0: ─────■───────■──┤ Tdg ├
┌─┴─┐ ┌─┴─┐ └─────┘
lqr_2_1: ───┤ X ├─── lqr_2_1: ───┤ X ├───────────────
└───┘ └───┘
lcr_0: 0 ═══════════ lcr_0: 0 ═══════════════════════
lcr_1: 0 ═══════════ lcr_1: 0 ═══════════════════════
"""
if inplace:
dest = self
else:
dest = self.copy()
if not isinstance(other, QuantumCircuit):
if front:
dest.data.insert(0, (other, qubits, clbits))
else:
dest.append(other, qargs=qubits, cargs=clbits)
if inplace:
return None
return dest
instrs = other.data
if other.num_qubits > self.num_qubits or \
other.num_clbits > self.num_clbits:
raise CircuitError("Trying to compose with another QuantumCircuit "
"which has more 'in' edges.")
# number of qubits and clbits must match number in circuit or None
identity_qubit_map = dict(zip(other.qubits, self.qubits))
identity_clbit_map = dict(zip(other.clbits, self.clbits))
if qubits is None:
qubit_map = identity_qubit_map
elif len(qubits) != len(other.qubits):
raise CircuitError("Number of items in qubits parameter does not"
" match number of qubits in the circuit.")
else:
qubit_map = {other.qubits[i]: (self.qubits[q] if isinstance(q, int) else q)
for i, q in enumerate(qubits)}
if clbits is None:
clbit_map = identity_clbit_map
elif len(clbits) != len(other.clbits):
raise CircuitError("Number of items in clbits parameter does not"
" match number of clbits in the circuit.")
else:
clbit_map = {other.clbits[i]: (self.clbits[c] if isinstance(c, int) else c)
for i, c in enumerate(clbits)}
edge_map = {**qubit_map, **clbit_map} or {**identity_qubit_map, **identity_clbit_map}
mapped_instrs = []
for instr, qargs, cargs in instrs:
n_qargs = [edge_map[qarg] for qarg in qargs]
n_cargs = [edge_map[carg] for carg in cargs]
n_instr = instr.copy()
if instr.condition is not None:
from qiskit.dagcircuit import DAGCircuit # pylint: disable=cyclic-import
n_instr.condition = DAGCircuit._map_condition(edge_map, instr.condition)
mapped_instrs.append((n_instr, n_qargs, n_cargs))
if front:
dest._data = mapped_instrs + dest._data
else:
dest._data += mapped_instrs
for instr, _, _ in mapped_instrs:
dest._update_parameter_table(instr)
dest.global_phase += other.global_phase
if inplace:
return None
return dest
@property
def qubits(self):
"""
Returns a list of quantum bits in the order that the registers were added.
"""
return self._qubits
@property
def clbits(self):
"""
Returns a list of classical bits in the order that the registers were added.
"""
return self._clbits
@property
def ancillas(self):
"""
Returns a list of ancilla bits in the order that the registers were added.
"""
return self._ancillas
def __add__(self, rhs):
"""Overload + to implement self.combine."""
return self.combine(rhs)
def __iadd__(self, rhs):
"""Overload += to implement self.extend."""
return self.extend(rhs)
def __len__(self):
"""Return number of operations in circuit."""
return len(self._data)
def __getitem__(self, item):
"""Return indexed operation."""
return self._data[item]
@staticmethod
def cast(value, _type):
"""Best effort to cast value to type. Otherwise, returns the value."""
try:
return _type(value)
except (ValueError, TypeError):
return value
@staticmethod
def _bit_argument_conversion(bit_representation, in_array):
ret = None
try:
if isinstance(bit_representation, Bit):
# circuit.h(qr[0]) -> circuit.h([qr[0]])
ret = [bit_representation]
elif isinstance(bit_representation, Register):
# circuit.h(qr) -> circuit.h([qr[0], qr[1]])
ret = bit_representation[:]
elif isinstance(QuantumCircuit.cast(bit_representation, int), int):
# circuit.h(0) -> circuit.h([qr[0]])
ret = [in_array[bit_representation]]
elif isinstance(bit_representation, slice):
# circuit.h(slice(0,2)) -> circuit.h([qr[0], qr[1]])
ret = in_array[bit_representation]
elif isinstance(bit_representation, list) and \
all(isinstance(bit, Bit) for bit in bit_representation):
# circuit.h([qr[0], qr[1]]) -> circuit.h([qr[0], qr[1]])
ret = bit_representation
elif isinstance(QuantumCircuit.cast(bit_representation, list), (range, list)):
# circuit.h([0, 1]) -> circuit.h([qr[0], qr[1]])
# circuit.h(range(0,2)) -> circuit.h([qr[0], qr[1]])
# circuit.h([qr[0],1]) -> circuit.h([qr[0], qr[1]])
ret = [index if isinstance(index, Bit) else in_array[
index] for index in bit_representation]
else:
raise CircuitError('Not able to expand a %s (%s)' % (bit_representation,
type(bit_representation)))
except IndexError:
raise CircuitError('Index out of range.')
except TypeError:
raise CircuitError('Type error handling %s (%s)' % (bit_representation,
type(bit_representation)))
return ret
def qbit_argument_conversion(self, qubit_representation):
"""
Converts several qubit representations (such as indexes, range, etc.)
into a list of qubits.
Args:
qubit_representation (Object): representation to expand
Returns:
List(tuple): Where each tuple is a qubit.
"""
return QuantumCircuit._bit_argument_conversion(qubit_representation, self.qubits)
def cbit_argument_conversion(self, clbit_representation):
"""
Converts several classical bit representations (such as indexes, range, etc.)
into a list of classical bits.
Args:
clbit_representation (Object): representation to expand
Returns:
List(tuple): Where each tuple is a classical bit.
"""
return QuantumCircuit._bit_argument_conversion(clbit_representation, self.clbits)
def append(self, instruction, qargs=None, cargs=None):
"""Append one or more instructions to the end of the circuit, modifying
the circuit in place. Expands qargs and cargs.
Args:
instruction (qiskit.circuit.Instruction): Instruction instance to append
qargs (list(argument)): qubits to attach instruction to
cargs (list(argument)): clbits to attach instruction to
Returns:
qiskit.circuit.Instruction: a handle to the instruction that was just added
Raises:
CircuitError: if object passed is a subclass of Instruction
CircuitError: if object passed is neither subclass nor an instance of Instruction
"""
# Convert input to instruction
if not isinstance(instruction, Instruction) and not hasattr(instruction, 'to_instruction'):
if issubclass(instruction, Instruction):
raise CircuitError('Object is a subclass of Instruction, please add () to '
'pass an instance of this object.')
raise CircuitError('Object to append must be an Instruction or '
'have a to_instruction() method.')
if not isinstance(instruction, Instruction) and hasattr(instruction, "to_instruction"):
instruction = instruction.to_instruction()
expanded_qargs = [self.qbit_argument_conversion(qarg) for qarg in qargs or []]
expanded_cargs = [self.cbit_argument_conversion(carg) for carg in cargs or []]
instructions = InstructionSet()
for (qarg, carg) in instruction.broadcast_arguments(expanded_qargs, expanded_cargs):
instructions.add(self._append(instruction, qarg, carg), qarg, carg)
return instructions
def _append(self, instruction, qargs, cargs):
"""Append an instruction to the end of the circuit, modifying
the circuit in place.
Args:
instruction (Instruction or Operator): Instruction instance to append
qargs (list(tuple)): qubits to attach instruction to
cargs (list(tuple)): clbits to attach instruction to
Returns:
Instruction: a handle to the instruction that was just added
Raises:
CircuitError: if the gate is of a different shape than the wires
it is being attached to.
"""
if not isinstance(instruction, Instruction):
raise CircuitError('object is not an Instruction.')
# do some compatibility checks
self._check_dups(qargs)
self._check_qargs(qargs)
self._check_cargs(cargs)
# add the instruction onto the given wires
instruction_context = instruction, qargs, cargs
self._data.append(instruction_context)
self._update_parameter_table(instruction)
return instruction
def _update_parameter_table(self, instruction):
for param_index, param in enumerate(instruction.params):
if isinstance(param, ParameterExpression):
current_parameters = self._parameter_table
for parameter in param.parameters:
if parameter in current_parameters:
if not self._check_dup_param_spec(self._parameter_table[parameter],
instruction, param_index):
self._parameter_table[parameter].append((instruction, param_index))
else:
if parameter.name in self._parameter_table.get_names():
raise CircuitError(
'Name conflict on adding parameter: {}'.format(parameter.name))
self._parameter_table[parameter] = [(instruction, param_index)]
return instruction
def _check_dup_param_spec(self, parameter_spec_list, instruction, param_index):
for spec in parameter_spec_list:
if spec[0] is instruction and spec[1] == param_index:
return True
return False
def add_register(self, *regs):
"""Add registers."""
if not regs:
return
if any([isinstance(reg, int) for reg in regs]):
# QuantumCircuit defined without registers
if len(regs) == 1 and isinstance(regs[0], int):
# QuantumCircuit with anonymous quantum wires e.g. QuantumCircuit(2)
regs = (QuantumRegister(regs[0], 'q'),)
elif len(regs) == 2 and all([isinstance(reg, int) for reg in regs]):
# QuantumCircuit with anonymous wires e.g. QuantumCircuit(2, 3)
regs = (QuantumRegister(regs[0], 'q'), ClassicalRegister(regs[1], 'c'))
else:
raise CircuitError("QuantumCircuit parameters can be Registers or Integers."
" If Integers, up to 2 arguments. QuantumCircuit was called"
" with %s." % (regs,))
for register in regs:
if register.name in [reg.name for reg in self.qregs + self.cregs]:
raise CircuitError("register name \"%s\" already exists"
% register.name)
if isinstance(register, AncillaRegister):
self._ancillas.extend(register)
if isinstance(register, QuantumRegister):
self.qregs.append(register)
self._qubits.extend(register)
elif isinstance(register, ClassicalRegister):
self.cregs.append(register)
self._clbits.extend(register)
else:
raise CircuitError("expected a register")
def _check_dups(self, qubits):
"""Raise exception if list of qubits contains duplicates."""
squbits = set(qubits)
if len(squbits) != len(qubits):
raise CircuitError("duplicate qubit arguments")
def _check_qargs(self, qargs):
"""Raise exception if a qarg is not in this circuit or bad format."""
if not all(isinstance(i, Qubit) for i in qargs):
raise CircuitError("qarg is not a Qubit")
if not all(self.has_register(i.register) for i in qargs):
raise CircuitError("register not in this circuit")
def _check_cargs(self, cargs):
"""Raise exception if clbit is not in this circuit or bad format."""
if not all(isinstance(i, Clbit) for i in cargs):
raise CircuitError("carg is not a Clbit")
if not all(self.has_register(i.register) for i in cargs):
raise CircuitError("register not in this circuit")
def to_instruction(self, parameter_map=None):
"""Create an Instruction out of this circuit.
Args:
parameter_map(dict): For parameterized circuits, a mapping from
parameters in the circuit to parameters to be used in the
instruction. If None, existing circuit parameters will also
parameterize the instruction.
Returns:
qiskit.circuit.Instruction: a composite instruction encapsulating this circuit
(can be decomposed back)
"""
from qiskit.converters.circuit_to_instruction import circuit_to_instruction
return circuit_to_instruction(self, parameter_map)
def to_gate(self, parameter_map=None, label=None):
"""Create a Gate out of this circuit.
Args:
parameter_map(dict): For parameterized circuits, a mapping from
parameters in the circuit to parameters to be used in the
gate. If None, existing circuit parameters will also
parameterize the gate.
label (str): Optional gate label.
Returns:
Gate: a composite gate encapsulating this circuit
(can be decomposed back)
"""
from qiskit.converters.circuit_to_gate import circuit_to_gate
return circuit_to_gate(self, parameter_map, label=label)
def decompose(self):
"""Call a decomposition pass on this circuit,
to decompose one level (shallow decompose).
Returns:
QuantumCircuit: a circuit one level decomposed
"""
from qiskit.transpiler.passes.basis.decompose import Decompose
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.converters.dag_to_circuit import dag_to_circuit
pass_ = Decompose()
decomposed_dag = pass_.run(circuit_to_dag(self))
return dag_to_circuit(decomposed_dag)
def _check_compatible_regs(self, rhs):
"""Raise exception if the circuits are defined on incompatible registers"""
list1 = self.qregs + self.cregs
list2 = rhs.qregs + rhs.cregs
for element1 in list1:
for element2 in list2:
if element2.name == element1.name:
if element1 != element2:
raise CircuitError("circuits are not compatible")
@staticmethod
def _get_composite_circuit_qasm_from_instruction(instruction):
"""Returns OpenQASM string composite circuit given an instruction.
The given instruction should be the result of composite_circuit.to_instruction()."""
qubit_parameters = ",".join(["q%i" % num for num in range(instruction.num_qubits)])
composite_circuit_gates = ""
for data, qargs, _ in instruction.definition:
gate_qargs = ",".join(["q%i" % index for index in [qubit.index for qubit in qargs]])
composite_circuit_gates += "%s %s; " % (data.qasm(), gate_qargs)
qasm_string = "gate %s %s {%s}" % (instruction.name, qubit_parameters,
composite_circuit_gates)
return qasm_string
def qasm(self, formatted=False, filename=None):
"""Return OpenQASM string.
Parameters:
formatted (bool): Return formatted Qasm string.
filename (str): Save Qasm to file with name 'filename'.
Returns:
str: If formatted=False.
Raises:
ImportError: If pygments is not installed and ``formatted`` is
``True``.
"""
existing_gate_names = ['ch', 'cx', 'cy', 'cz', 'crx', 'cry', 'crz', 'ccx', 'cswap',
'cu1', 'cu3', 'dcx', 'h', 'i', 'id', 'iden', 'iswap', 'ms',
'r', 'rx', 'rxx', 'ry', 'ryy', 'rz', 'rzx', 'rzz', 's', 'sdg',
'swap', 'x', 'y', 'z', 't', 'tdg', 'u1', 'u2', 'u3']
existing_composite_circuits = []
string_temp = self.header + "\n"
string_temp += self.extension_lib + "\n"
for register in self.qregs:
string_temp += register.qasm() + "\n"
for register in self.cregs:
string_temp += register.qasm() + "\n"
unitary_gates = []
for instruction, qargs, cargs in self._data:
if instruction.name == 'measure':
qubit = qargs[0]
clbit = cargs[0]
string_temp += "%s %s[%d] -> %s[%d];\n" % (instruction.qasm(),
qubit.register.name, qubit.index,
clbit.register.name, clbit.index)
# If instruction is a composite circuit
elif not isinstance(instruction, Gate) and (instruction.name not in ['barrier',
'reset']):
if instruction not in existing_composite_circuits:
if instruction.name in existing_gate_names:
old_name = instruction.name
instruction.name += "_" + str(id(instruction))
warnings.warn("A gate named {} already exists. "
"We have renamed "
"your gate to {}".format(old_name, instruction.name))
# Get qasm of composite circuit
qasm_string = self._get_composite_circuit_qasm_from_instruction(instruction)
# Insert composite circuit qasm definition right after header and extension lib
string_temp = string_temp.replace(self.extension_lib,
"%s\n%s" % (self.extension_lib,
qasm_string))
existing_composite_circuits.append(instruction)
existing_gate_names.append(instruction.name)
# Insert qasm representation of the original instruction
string_temp += "%s %s;\n" % (instruction.qasm(),
",".join(["%s[%d]" % (j.register.name, j.index)
for j in qargs + cargs]))
else:
string_temp += "%s %s;\n" % (instruction.qasm(),
",".join(["%s[%d]" % (j.register.name, j.index)
for j in qargs + cargs]))
if instruction.name == 'unitary':
unitary_gates.append(instruction)
# this resets them, so if another call to qasm() is made the gate def is added again
for gate in unitary_gates:
gate._qasm_def_written = False
if filename:
with open(filename, 'w+') as file:
file.write(string_temp)
file.close()
if formatted:
if not HAS_PYGMENTS:
raise ImportError("To use the formatted output pygments>2.4 "
"must be installed. To install pygments run "
'"pip install pygments".')
code = pygments.highlight(string_temp,
OpenQASMLexer(),
Terminal256Formatter(style=QasmTerminalStyle))
print(code)
return None
else:
return string_temp
def draw(self, output=None, scale=None, filename=None, style=None,
interactive=False, plot_barriers=True,
reverse_bits=False, justify=None, vertical_compression='medium', idle_wires=True,
with_layout=True, fold=None, ax=None, initial_state=False, cregbundle=True):
"""Draw the quantum circuit.
**text**: ASCII art TextDrawing that can be printed in the console.
**latex**: high-quality images compiled via LaTeX.
**latex_source**: raw uncompiled LaTeX output.
**matplotlib**: images with color rendered purely in Python.
Args:
output (str): Select the output method to use for drawing the
circuit. Valid choices are ``text``, ``latex``,
``latex_source``, or ``mpl``. By default the `'text`' drawer is
used unless a user config file has an alternative backend set
as the default. If the output kwarg is set, that backend
will always be used over the default in a user config file.
scale (float): scale of image to draw (shrink if < 1)
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style
file. This option is only used by the ``mpl`` output type. If a
str is passed in that is the path to a json file which contains
a dictionary of style, then that will be opened, parsed, and used
as the input dict. See: :ref:`Style Dict Doc <style-dict-circ-doc>` for more
information on the contents.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be
silently ignored.
reverse_bits (bool): When set to True, reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are ``left``, ``right`` or
``none``. If anything else is supplied it defaults to left
justified. It refers to where gates should be placed in the
output circuit if there is an option. ``none`` results in
each gate being placed in its own column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the ``text`` output so the
drawing will take less vertical room. Default is ``medium``.
Only used by the ``text`` output, will be silently ignored
otherwise.
idle_wires (bool): Include idle wires (wires with no circuit
elements) in output visualization. Default is True.
with_layout (bool): Include layout information, with labels on the
physical layout. Default is True.
fold (int): Sets pagination. It can be disabled using -1.
In `text`, sets the length of the lines. This is useful when the
drawing does not fit in the console. If None (default), it will
try to guess the console width using ``shutil.
get_terminal_size()``. However, if running in jupyter, the
default line length is set to 80 characters. In ``mpl`` is the
number of (visual) layers before folding. Default is 25.
ax (matplotlib.axes.Axes): An optional Axes object to be used for
the visualization output. If none is specified, a new matplotlib
Figure will be created and used. Additionally, if specified,
there will be no returned Figure since it is redundant. This is
only used when the ``output`` kwarg is set to use the ``mpl``
backend. It will be silently ignored with all other outputs.
initial_state (bool): Optional. Adds ``|0>`` in the beginning of the wire.
Only used by the ``text``, ``latex`` and ``latex_source`` outputs.
Default: ``False``.
cregbundle (bool): Optional. If set True bundle classical registers. Not used by
the ``matplotlib`` output. Default: ``True``.
Returns:
:class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or
:class:`TextDrawing`:
* `PIL.Image` (output='latex')
an in-memory representation of the image of the circuit
diagram.
* `matplotlib.figure.Figure` (output='mpl')
a matplotlib figure object for the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
* `TextDrawing` (output='text')
A drawing that can be printed as ASCII art.
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods require non-installed
libraries
.. _style-dict-circ-doc:
**Style Dict Details**
The style dict kwarg contains numerous options that define the style of
the output circuit visualization. The style dict is only used by the
``mpl`` output. The options available in the style dict are defined
below:
Args:
textcolor (str): The color code to use for text. Defaults to
`'#000000'`
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'`
linecolor (str): The color code to use for lines. Defaults to
`'#000000'`
creglinecolor (str): The color code to use for classical register
lines. Defaults to `'#778899'`
gatetextcolor (str): The color code to use for gate text. Defaults
to `'#000000'`
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'`
barrierfacecolor (str): The color code to use for barriers.
Defaults to `'#bdbdbd'`
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'`
fontsize (int): The font size to use for text. Defaults to 13.
subfontsize (int): The font size to use for subtext. Defaults to 8.
displaytext (dict): A dictionary of the text to use for each
element type in the output visualization. The default values
are::
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There
is no provision for passing an incomplete dict in.
displaycolor (dict): The color codes to use for each circuit
element. The default values are::
{
'id': '#F0E442',
'u0': '#E7AB3B',
'u1': '#E7AB3B',
'u2': '#E7AB3B',
'u3': '#E7AB3B',
'x': '#58C698',
'y': '#58C698',
'z': '#58C698',
'h': '#70B7EB',
's': '#E0722D',
'sdg': '#E0722D',
't': '#E0722D',
'tdg': '#E0722D',
'rx': '#ffffff',
'ry': '#ffffff',
'rz': '#ffffff',
'reset': '#D188B4',
'target': '#70B7EB',
'meas': '#D188B4'
}
Also, just like `displaytext` there is no provision for an
incomplete dict passed in.
latexdrawerstyle (bool): When set to True, enable LaTeX mode, which
will draw gates like the `latex` output modes.
usepiformat (bool): When set to True, use radians for output.
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20.
cregbundle (bool): If set True, bundle classical registers
showindex (bool): If set True, draw an index.
compress (bool): If set True, draw a compressed circuit.
figwidth (int): The maximum width (in inches) for the output figure.
dpi (int): The DPI to use for the output image. Defaults to 150.
margin (list): A list of margin values to adjust spacing around
output image. Takes a list of 4 ints:
[x left, x right, y bottom, y top].
creglinestyle (str): The style of line to use for classical
registers. Choices are `'solid'`, `'doublet'`, or any valid
matplotlib `linestyle` kwarg value. Defaults to `doublet`
"""
# pylint: disable=cyclic-import
from qiskit.visualization import circuit_drawer
if isinstance(output, (int, float, np.number)):
warnings.warn("Setting 'scale' as the first argument is deprecated. "
"Use scale=%s instead." % output,
DeprecationWarning)
scale = output
output = None
return circuit_drawer(self, scale=scale,
filename=filename, style=style,
output=output,
interactive=interactive,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold,
ax=ax,
initial_state=initial_state,
cregbundle=cregbundle)
def size(self):
"""Returns total number of gate operations in circuit.
Returns:
int: Total number of gate operations.
"""
gate_ops = 0
for instr, _, _ in self._data:
if instr.name not in ['barrier', 'snapshot']:
gate_ops += 1
return gate_ops
def depth(self):
"""Return circuit depth (i.e., length of critical path).
This does not include compiler or simulator directives
such as 'barrier' or 'snapshot'.
Returns:
int: Depth of circuit.
Notes:
The circuit depth and the DAG depth need not be the
same.
"""
# Labels the registers by ints
# and then the qubit position in
# a register is given by reg_int+qubit_num
reg_offset = 0
reg_map = {}
for reg in self.qregs + self.cregs:
reg_map[reg.name] = reg_offset
reg_offset += reg.size
# If no registers return 0
if reg_offset == 0:
return 0
# A list that holds the height of each qubit
# and classical bit.
op_stack = [0] * reg_offset
# Here we are playing a modified version of
# Tetris where we stack gates, but multi-qubit
# gates, or measurements have a block for each
# qubit or cbit that are connected by a virtual
# line so that they all stacked at the same depth.
# Conditional gates act on all cbits in the register
# they are conditioned on.
# We treat barriers or snapshots different as
# They are transpiler and simulator directives.
# The max stack height is the circuit depth.
for instr, qargs, cargs in self._data:
levels = []
reg_ints = []
# If count then add one to stack heights
count = True
if instr.name in ['barrier', 'snapshot']:
count = False
for ind, reg in enumerate(qargs + cargs):
# Add to the stacks of the qubits and
# cbits used in the gate.
reg_ints.append(reg_map[reg.register.name] + reg.index)
if count:
levels.append(op_stack[reg_ints[ind]] + 1)
else:
levels.append(op_stack[reg_ints[ind]])
# Assuming here that there is no conditional
# snapshots or barriers ever.
if instr.condition:
# Controls operate over all bits in the
# classical register they use.
cint = reg_map[instr.condition[0].name]
for off in range(instr.condition[0].size):
if cint + off not in reg_ints:
reg_ints.append(cint + off)
levels.append(op_stack[cint + off] + 1)
max_level = max(levels)
for ind in reg_ints:
op_stack[ind] = max_level
return max(op_stack)
def width(self):
"""Return number of qubits plus clbits in circuit.
Returns:
int: Width of circuit.
"""
return sum(reg.size for reg in self.qregs + self.cregs)
@property
def num_qubits(self):
"""Return number of qubits."""
qubits = 0
for reg in self.qregs:
qubits += reg.size
return qubits
@property
def num_ancillas(self):
"""Return the number of ancilla qubits."""
return len(self.ancillas)
@property
def n_qubits(self):
"""Deprecated, use ``num_qubits`` instead. Return number of qubits."""
warnings.warn('The QuantumCircuit.n_qubits method is deprecated as of 0.13.0, and '
'will be removed no earlier than 3 months after that release date. '
'You should use the QuantumCircuit.num_qubits method instead.',
DeprecationWarning, stacklevel=2)
return self.num_qubits
@property
def num_clbits(self):
"""Return number of classical bits."""
return sum(len(reg) for reg in self.cregs)
def count_ops(self):
"""Count each operation kind in the circuit.
Returns:
OrderedDict: a breakdown of how many operations of each kind, sorted by amount.
"""
count_ops = {}
for instr, _, _ in self._data:
count_ops[instr.name] = count_ops.get(instr.name, 0) + 1
return OrderedDict(sorted(count_ops.items(), key=lambda kv: kv[1], reverse=True))
def num_nonlocal_gates(self):
"""Return number of non-local gates (i.e. involving 2+ qubits).
Conditional nonlocal gates are also included.
"""
multi_qubit_gates = 0
for instr, _, _ in self._data:
if instr.num_qubits > 1 and instr.name not in ['barrier', 'snapshot']:
multi_qubit_gates += 1
return multi_qubit_gates
def num_connected_components(self, unitary_only=False):
"""How many non-entangled subcircuits can the circuit be factored to.
Args:
unitary_only (bool): Compute only unitary part of graph.
Returns:
int: Number of connected components in circuit.
"""
# Convert registers to ints (as done in depth).
reg_offset = 0
reg_map = {}
if unitary_only:
regs = self.qregs
else:
regs = self.qregs + self.cregs
for reg in regs:
reg_map[reg.name] = reg_offset
reg_offset += reg.size
# Start with each qubit or cbit being its own subgraph.
sub_graphs = [[bit] for bit in range(reg_offset)]
num_sub_graphs = len(sub_graphs)
# Here we are traversing the gates and looking to see
# which of the sub_graphs the gate joins together.
for instr, qargs, cargs in self._data:
if unitary_only:
args = qargs
num_qargs = len(args)
else:
args = qargs + cargs
num_qargs = len(args) + (1 if instr.condition else 0)
if num_qargs >= 2 and instr.name not in ['barrier', 'snapshot']:
graphs_touched = []
num_touched = 0
# Controls necessarily join all the cbits in the
# register that they use.
if instr.condition and not unitary_only:
creg = instr.condition[0]
creg_int = reg_map[creg.name]
for coff in range(creg.size):
temp_int = creg_int + coff
for k in range(num_sub_graphs):
if temp_int in sub_graphs[k]:
graphs_touched.append(k)
num_touched += 1
break
for item in args:
reg_int = reg_map[item.register.name] + item.index
for k in range(num_sub_graphs):
if reg_int in sub_graphs[k]:
if k not in graphs_touched:
graphs_touched.append(k)
num_touched += 1
break
# If the gate touches more than one subgraph
# join those graphs together and return
# reduced number of subgraphs
if num_touched > 1:
connections = []
for idx in graphs_touched:
connections.extend(sub_graphs[idx])
_sub_graphs = []
for idx in range(num_sub_graphs):
if idx not in graphs_touched:
_sub_graphs.append(sub_graphs[idx])
_sub_graphs.append(connections)
sub_graphs = _sub_graphs
num_sub_graphs -= (num_touched - 1)
# Cannot go lower than one so break
if num_sub_graphs == 1:
break
return num_sub_graphs
def num_unitary_factors(self):
"""Computes the number of tensor factors in the unitary
(quantum) part of the circuit only.
"""
return self.num_connected_components(unitary_only=True)
def num_tensor_factors(self):
"""Computes the number of tensor factors in the unitary
(quantum) part of the circuit only.
Notes:
This is here for backwards compatibility, and will be
removed in a future release of Qiskit. You should call
`num_unitary_factors` instead.
"""
return self.num_unitary_factors()
def copy(self, name=None):
"""Copy the circuit.
Args:
name (str): name to be given to the copied circuit. If None, then the name stays the same
Returns:
QuantumCircuit: a deepcopy of the current circuit, with the specified name
"""
cpy = copy.copy(self)
# copy registers correctly, in copy.copy they are only copied via reference
cpy.qregs = self.qregs.copy()
cpy.cregs = self.cregs.copy()
cpy._qubits = self._qubits.copy()
cpy._clbits = self._clbits.copy()
instr_instances = {id(instr): instr
for instr, _, __ in self._data}
instr_copies = {id_: instr.copy()
for id_, instr in instr_instances.items()}
cpy._parameter_table = ParameterTable({
param: [(instr_copies[id(instr)], param_index)
for instr, param_index in self._parameter_table[param]]
for param in self._parameter_table
})
cpy._data = [(instr_copies[id(inst)], qargs.copy(), cargs.copy())
for inst, qargs, cargs in self._data]
if name:
cpy.name = name
return cpy
def _create_creg(self, length, name):
""" Creates a creg, checking if ClassicalRegister with same name exists
"""
if name in [creg.name for creg in self.cregs]:
save_prefix = ClassicalRegister.prefix
ClassicalRegister.prefix = name
new_creg = ClassicalRegister(length)
ClassicalRegister.prefix = save_prefix
else:
new_creg = ClassicalRegister(length, name)
return new_creg
def _create_qreg(self, length, name):
""" Creates a qreg, checking if QuantumRegister with same name exists
"""
if name in [qreg.name for qreg in self.qregs]:
save_prefix = QuantumRegister.prefix
QuantumRegister.prefix = name
new_qreg = QuantumRegister(length)
QuantumRegister.prefix = save_prefix
else:
new_qreg = QuantumRegister(length, name)
return new_qreg
def measure_active(self, inplace=True):
"""Adds measurement to all non-idle qubits. Creates a new ClassicalRegister with
a size equal to the number of non-idle qubits being measured.
Returns a new circuit with measurements if `inplace=False`.
Parameters:
inplace (bool): All measurements inplace or return new circuit.
Returns:
QuantumCircuit: Returns circuit with measurements when `inplace = False`.
"""
from qiskit.converters.circuit_to_dag import circuit_to_dag
if inplace:
circ = self
else:
circ = self.copy()
dag = circuit_to_dag(circ)
qubits_to_measure = [qubit for qubit in circ.qubits if qubit not in dag.idle_wires()]
new_creg = circ._create_creg(len(qubits_to_measure), 'measure')
circ.add_register(new_creg)
circ.barrier()
circ.measure(qubits_to_measure, new_creg)
if not inplace:
return circ
else:
return None
def measure_all(self, inplace=True):
"""Adds measurement to all qubits. Creates a new ClassicalRegister with a
size equal to the number of qubits being measured.
Returns a new circuit with measurements if `inplace=False`.
Parameters:
inplace (bool): All measurements inplace or return new circuit.
Returns:
QuantumCircuit: Returns circuit with measurements when `inplace = False`.
"""
if inplace:
circ = self
else:
circ = self.copy()
new_creg = circ._create_creg(len(circ.qubits), 'meas')
circ.add_register(new_creg)
circ.barrier()
circ.measure(circ.qubits, new_creg)
if not inplace:
return circ
else:
return None
def remove_final_measurements(self, inplace=True):
"""Removes final measurement on all qubits if they are present.
Deletes the ClassicalRegister that was used to store the values from these measurements
if it is idle.
Returns a new circuit without measurements if `inplace=False`.
Parameters:
inplace (bool): All measurements removed inplace or return new circuit.
Returns:
QuantumCircuit: Returns circuit with measurements removed when `inplace = False`.
"""
# pylint: disable=cyclic-import
from qiskit.transpiler.passes import RemoveFinalMeasurements
from qiskit.converters import circuit_to_dag
if inplace:
circ = self
else:
circ = self.copy()
dag = circuit_to_dag(circ)
remove_final_meas = RemoveFinalMeasurements()
new_dag = remove_final_meas.run(dag)
# Set circ cregs and instructions to match the new DAGCircuit's
circ.data.clear()
circ.cregs = list(new_dag.cregs.values())
for node in new_dag.topological_op_nodes():
qubits = []
for qubit in node.qargs:
qubits.append(new_dag.qregs[qubit.register.name][qubit.index])
clbits = []
for clbit in node.cargs:
clbits.append(new_dag.cregs[clbit.register.name][clbit.index])
# Get arguments for classical condition (if any)
inst = node.op.copy()
inst.condition = node.condition
circ.append(inst, qubits, clbits)
if not inplace:
return circ
else:
return None
@staticmethod
def from_qasm_file(path):
"""Take in a QASM file and generate a QuantumCircuit object.
Args:
path (str): Path to the file for a QASM program
Return:
QuantumCircuit: The QuantumCircuit object for the input QASM
"""
qasm = Qasm(filename=path)
return _circuit_from_qasm(qasm)
@staticmethod
def from_qasm_str(qasm_str):
"""Take in a QASM string and generate a QuantumCircuit object.
Args:
qasm_str (str): A QASM program string
Return:
QuantumCircuit: The QuantumCircuit object for the input QASM
"""
qasm = Qasm(data=qasm_str)
return _circuit_from_qasm(qasm)
@property
def global_phase(self):
"""Return the global phase of the circuit in radians."""
return self._global_phase
@global_phase.setter
def global_phase(self, angle):
"""Set the phase of the circuit.
Args:
angle (float, ParameterExpression): radians
"""
if isinstance(angle, ParameterExpression):
self._global_phase = angle
else:
# Set the phase to the [-2 * pi, 2 * pi] interval
angle = float(angle)
if not angle:
self._global_phase = 0
elif angle < 0:
self._global_phase = angle % (-2 * np.pi)
else:
self._global_phase = angle % (2 * np.pi)
@property
def parameters(self):
"""Convenience function to get the parameters defined in the parameter table."""
return self._parameter_table.get_keys()
@property
def num_parameters(self):
"""Convenience function to get the number of parameter objects in the circuit."""
return len(self.parameters)
def assign_parameters(self, param_dict, inplace=False):
"""Assign parameters to new parameters or values.
The keys of the parameter dictionary must be Parameter instances in the current circuit. The
values of the dictionary can either be numeric values or new parameter objects.
The values can be assigned to the current circuit object or to a copy of it.
Args:
param_dict (dict): A dictionary specifying the mapping from ``current_parameter``
to ``new_parameter``, where ``new_parameter`` can be a new parameter object
or a numeric value.
inplace (bool): If False, a copy of the circuit with the bound parameters is
returned. If True the circuit instance itself is modified.
Raises:
CircuitError: If param_dict contains parameters not present in the circuit
Returns:
optional(QuantumCircuit): A copy of the circuit with bound parameters, if
``inplace`` is True, otherwise None.
Examples:
>>> from qiskit.circuit import QuantumCircuit, Parameter
>>> circuit = QuantumCircuit(2)
>>> params = [Parameter('A'), Parameter('B'), Parameter('C')]
>>> circuit.ry(params[0], 0)
>>> circuit.crx(params[1], 0, 1)
>>> circuit.draw()
┌───────┐
q_0: |0>┤ Ry(A) ├────■────
└───────┘┌───┴───┐
q_1: |0>─────────┤ Rx(B) ├
└───────┘
>>> circuit.assign_parameters({params[0]: params[2]}, inplace=True)
>>> circuit.draw()
┌───────┐
q_0: |0>┤ Ry(C) ├────■────
└───────┘┌───┴───┐
q_1: |0>─────────┤ Rx(B) ├
└───────┘
>>> bound_circuit = circuit.assign_parameters({params[1]: 1, params[2]: 2})
>>> bound_circuit.draw()
┌───────┐
q_0: |0>┤ Ry(2) ├────■────
└───────┘┌───┴───┐
q_1: |0>─────────┤ Rx(1) ├
└───────┘
>>> bound_circuit.parameters # this one has no free parameters anymore
set()
>>> circuit.parameters # the original one is still parameterized
{Parameter(A), Parameter(C)}
"""
# replace in self or in a copy depending on the value of in_place
bound_circuit = self if inplace else self.copy()
# unroll the parameter dictionary (needed if e.g. it contains a ParameterVector)
unrolled_param_dict = self._unroll_param_dict(param_dict)
# check that only existing parameters are in the parameter dictionary
if unrolled_param_dict.keys() > self._parameter_table.keys():
raise CircuitError('Cannot bind parameters ({}) not present in the circuit.'.format(
[str(p) for p in param_dict.keys() - self._parameter_table]))
# replace the parameters with a new Parameter ("substitute") or numeric value ("bind")
for parameter, value in unrolled_param_dict.items():
if isinstance(value, ParameterExpression):
bound_circuit._substitute_parameter(parameter, value)
else:
bound_circuit._bind_parameter(parameter, value)
del bound_circuit._parameter_table[parameter] # clear evaluated expressions
return None if inplace else bound_circuit
def bind_parameters(self, value_dict):
"""Assign numeric parameters to values yielding a new circuit.
To assign new Parameter objects or bind the values in-place, without yielding a new
circuit, use the assign_parameters method.
Args:
value_dict (dict): {parameter: value, ...}
Raises:
CircuitError: If value_dict contains parameters not present in the circuit
TypeError: If value_dict contains a ParameterExpression in the values.
Returns:
QuantumCircuit: copy of self with assignment substitution.
"""
bound_circuit = self.copy()
# unroll the parameter dictionary (needed if e.g. it contains a ParameterVector)
unrolled_value_dict = self._unroll_param_dict(value_dict)
# check that only existing parameters are in the parameter dictionary
if len(unrolled_value_dict) > len(self._parameter_table):
raise CircuitError('Cannot bind parameters ({}) not present in the circuit.'.format(
[str(p) for p in value_dict.keys() - self._parameter_table.keys()]))
# replace the parameters with a new Parameter ("substitute") or numeric value ("bind")
for parameter, value in unrolled_value_dict.items():
bound_circuit._bind_parameter(parameter, value)
del bound_circuit._parameter_table[parameter] # clear evaluated expressions
return bound_circuit
def _unroll_param_dict(self, value_dict):
unrolled_value_dict = {}
for (param, value) in value_dict.items():
if isinstance(param, ParameterExpression):
unrolled_value_dict[param] = value
if isinstance(param, ParameterVector):
if not len(param) == len(value):
raise CircuitError('ParameterVector {} has length {}, which '
'differs from value list {} of '
'len {}'.format(param, len(param), value, len(value)))
unrolled_value_dict.update(zip(param, value))
return unrolled_value_dict
def _bind_parameter(self, parameter, value):
"""Assigns a parameter value to matching instructions in-place."""
for (instr, param_index) in self._parameter_table[parameter]:
instr.params[param_index] = instr.params[param_index].bind({parameter: value})
# For instructions which have already been defined (e.g. composite
# instructions), search the definition for instances of the
# parameter which also need to be bound.
self._rebind_definition(instr, parameter, value)
# bind circuit's phase
if (isinstance(self.global_phase, ParameterExpression) and
parameter in self.global_phase.parameters):
self.global_phase = self.global_phase.bind({parameter: value})
def _substitute_parameter(self, old_parameter, new_parameter_expr):
"""Substitute an existing parameter in all circuit instructions and the parameter table."""
for instr, param_index in self._parameter_table[old_parameter]:
new_param = instr.params[param_index].subs({old_parameter: new_parameter_expr})
instr.params[param_index] = new_param
self._rebind_definition(instr, old_parameter, new_parameter_expr)
entry = self._parameter_table.pop(old_parameter)
for new_parameter in new_parameter_expr.parameters:
self._parameter_table[new_parameter] = entry
if (isinstance(self.global_phase, ParameterExpression)
and old_parameter in self.global_phase.parameters):
self.global_phase = self.global_phase.subs({old_parameter: new_parameter_expr})
def _rebind_definition(self, instruction, parameter, value):
if instruction._definition:
for op, _, _ in instruction._definition:
for idx, param in enumerate(op.params):
if isinstance(param, ParameterExpression) and parameter in param.parameters:
if isinstance(value, ParameterExpression):
op.params[idx] = param.subs({parameter: value})
else:
op.params[idx] = param.bind({parameter: value})
self._rebind_definition(op, parameter, value)
def barrier(self, *qargs):
"""Apply :class:`~qiskit.circuit.Barrier`. If qargs is None, applies to all."""
from .barrier import Barrier
qubits = []
if not qargs: # None
for qreg in self.qregs:
for j in range(qreg.size):
qubits.append(qreg[j])
for qarg in qargs:
if isinstance(qarg, QuantumRegister):
qubits.extend([qarg[j] for j in range(qarg.size)])
elif isinstance(qarg, list):
qubits.extend(qarg)
elif isinstance(qarg, range):
qubits.extend(list(qarg))
elif isinstance(qarg, slice):
qubits.extend(self.qubits[qarg])
else:
qubits.append(qarg)
return self.append(Barrier(len(qubits)), qubits, [])
def h(self, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.HGate`."""
from .library.standard_gates.h import HGate
return self.append(HGate(), [qubit], [])
def ch(self, control_qubit, target_qubit, # pylint: disable=invalid-name
label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CHGate`."""
from .library.standard_gates.h import CHGate
return self.append(CHGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def i(self, qubit):
"""Apply :class:`~qiskit.circuit.library.IGate`."""
from .library.standard_gates.i import IGate
return self.append(IGate(), [qubit], [])
def id(self, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.IGate`."""
return self.i(qubit)
def ms(self, theta, qubits): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.MSGate`."""
from .library.standard_gates.ms import MSGate
return self.append(MSGate(len(qubits), theta), qubits)
def p(self, theta, qubit):
"""Apply :class:`~qiskit.circuit.library.PhaseGate`."""
from .library.standard_gates.p import PhaseGate
return self.append(PhaseGate(theta), [qubit], [])
def cp(self, theta, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CPhaseGate`."""
from .library.standard_gates.p import CPhaseGate
return self.append(CPhaseGate(theta, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def r(self, theta, phi, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.RGate`."""
from .library.standard_gates.r import RGate
return self.append(RGate(theta, phi), [qubit], [])
def rccx(self, control_qubit1, control_qubit2, target_qubit):
"""Apply :class:`~qiskit.circuit.library.RCCXGate`."""
from .library.standard_gates.x import RCCXGate
return self.append(RCCXGate(), [control_qubit1, control_qubit2, target_qubit], [])
def rcccx(self, control_qubit1, control_qubit2, control_qubit3, target_qubit):
"""Apply :class:`~qiskit.circuit.library.RC3XGate`."""
from .library.standard_gates.x import RC3XGate
return self.append(RC3XGate(),
[control_qubit1, control_qubit2, control_qubit3, target_qubit],
[])
def rx(self, theta, qubit, label=None): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.RXGate`."""
from .library.standard_gates.rx import RXGate
return self.append(RXGate(theta, label=label), [qubit], [])
def crx(self, theta, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CRXGate`."""
from .library.standard_gates.rx import CRXGate
return self.append(CRXGate(theta, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def rxx(self, theta, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.RXXGate`."""
from .library.standard_gates.rxx import RXXGate
return self.append(RXXGate(theta), [qubit1, qubit2], [])
def ry(self, theta, qubit, label=None): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.RYGate`."""
from .library.standard_gates.ry import RYGate
return self.append(RYGate(theta, label=label), [qubit], [])
def cry(self, theta, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CRYGate`."""
from .library.standard_gates.ry import CRYGate
return self.append(CRYGate(theta, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def ryy(self, theta, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.RYYGate`."""
from .library.standard_gates.ryy import RYYGate
return self.append(RYYGate(theta), [qubit1, qubit2], [])
def rz(self, phi, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.RZGate`."""
from .library.standard_gates.rz import RZGate
return self.append(RZGate(phi), [qubit], [])
def crz(self, theta, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CRZGate`."""
from .library.standard_gates.rz import CRZGate
return self.append(CRZGate(theta, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def rzx(self, theta, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.RZXGate`."""
from .library.standard_gates.rzx import RZXGate
return self.append(RZXGate(theta), [qubit1, qubit2], [])
def rzz(self, theta, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.RZZGate`."""
from .library.standard_gates.rzz import RZZGate
return self.append(RZZGate(theta), [qubit1, qubit2], [])
def s(self, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.SGate`."""
from .library.standard_gates.s import SGate
return self.append(SGate(), [qubit], [])
def sdg(self, qubit):
"""Apply :class:`~qiskit.circuit.library.SdgGate`."""
from .library.standard_gates.s import SdgGate
return self.append(SdgGate(), [qubit], [])
def swap(self, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.SwapGate`."""
from .library.standard_gates.swap import SwapGate
return self.append(SwapGate(), [qubit1, qubit2], [])
def iswap(self, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.iSwapGate`."""
from .library.standard_gates.iswap import iSwapGate
return self.append(iSwapGate(), [qubit1, qubit2], [])
def cswap(self, control_qubit, target_qubit1, target_qubit2, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CSwapGate`."""
from .library.standard_gates.swap import CSwapGate
return self.append(CSwapGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit1, target_qubit2], [])
def fredkin(self, control_qubit, target_qubit1, target_qubit2):
"""Apply :class:`~qiskit.circuit.library.CSwapGate`."""
return self.cswap(control_qubit, target_qubit1, target_qubit2)
def sx(self, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.SXGate`."""
from .library.standard_gates.sx import SXGate
return self.append(SXGate(), [qubit], [])
def sxdg(self, qubit):
"""Apply :class:`~qiskit.circuit.library.SXdgGate`."""
from .library.standard_gates.sx import SXdgGate
return self.append(SXdgGate(), [qubit], [])
def csx(self, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CSXGate`."""
from .library.standard_gates.sx import CSXGate
return self.append(CSXGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def t(self, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.TGate`."""
from .library.standard_gates.t import TGate
return self.append(TGate(), [qubit], [])
def tdg(self, qubit):
"""Apply :class:`~qiskit.circuit.library.TdgGate`."""
from .library.standard_gates.t import TdgGate
return self.append(TdgGate(), [qubit], [])
def u(self, theta, phi, lam, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.UGate`."""
from .library.standard_gates.u import UGate
return self.append(UGate(theta, phi, lam), [qubit], [])
def cu(self, theta, phi, lam, gamma, # pylint: disable=invalid-name
control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CUGate`."""
from .library.standard_gates.u import CUGate
return self.append(CUGate(theta, phi, lam, gamma, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def u1(self, theta, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.U1Gate`."""
from .library.standard_gates.u1 import U1Gate
return self.append(U1Gate(theta), [qubit], [])
def cu1(self, theta, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CU1Gate`."""
from .library.standard_gates.u1 import CU1Gate
return self.append(CU1Gate(theta, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def mcu1(self, lam, control_qubits, target_qubit):
"""Apply :class:`~qiskit.circuit.library.MCU1Gate`."""
from .library.standard_gates.u1 import MCU1Gate
num_ctrl_qubits = len(control_qubits)
return self.append(MCU1Gate(lam, num_ctrl_qubits), control_qubits[:] + [target_qubit], [])
def u2(self, phi, lam, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.U2Gate`."""
from .library.standard_gates.u2 import U2Gate
return self.append(U2Gate(phi, lam), [qubit], [])
def u3(self, theta, phi, lam, qubit): # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.U3Gate`."""
from .library.standard_gates.u3 import U3Gate
return self.append(U3Gate(theta, phi, lam), [qubit], [])
def cu3(self, theta, phi, lam, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CU3Gate`."""
from .library.standard_gates.u3 import CU3Gate
return self.append(CU3Gate(theta, phi, lam, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def x(self, qubit, label=None):
"""Apply :class:`~qiskit.circuit.library.XGate`."""
from .library.standard_gates.x import XGate
return self.append(XGate(label=label), [qubit], [])
def cx(self, control_qubit, target_qubit, # pylint:disable=invalid-name
label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CXGate`."""
from .library.standard_gates.x import CXGate
return self.append(CXGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def cnot(self, control_qubit, target_qubit, label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CXGate`."""
self.cx(control_qubit, target_qubit, label, ctrl_state)
def dcx(self, qubit1, qubit2):
"""Apply :class:`~qiskit.circuit.library.DCXGate`."""
from .library.standard_gates.dcx import DCXGate
return self.append(DCXGate(), [qubit1, qubit2], [])
def ccx(self, control_qubit1, control_qubit2, target_qubit):
"""Apply :class:`~qiskit.circuit.library.CCXGate`."""
from .library.standard_gates.x import CCXGate
return self.append(CCXGate(),
[control_qubit1, control_qubit2, target_qubit], [])
def toffoli(self, control_qubit1, control_qubit2, target_qubit):
"""Apply :class:`~qiskit.circuit.library.CCXGate`."""
self.ccx(control_qubit1, control_qubit2, target_qubit)
def mcx(self, control_qubits, target_qubit, ancilla_qubits=None, mode='noancilla'):
"""Apply :class:`~qiskit.circuit.library.MCXGate`.
The multi-cX gate can be implemented using different techniques, which use different numbers
of ancilla qubits and have varying circuit depth. These modes are:
- 'no-ancilla': Requires 0 ancilla qubits.
- 'recursion': Requires 1 ancilla qubit if more than 4 controls are used, otherwise 0.
- 'v-chain': Requires 2 less ancillas than the number of control qubits.
- 'v-chain-dirty': Same as for the clean ancillas (but the circuit will be longer).
"""
from .library.standard_gates.x import MCXGrayCode, MCXRecursive, MCXVChain
num_ctrl_qubits = len(control_qubits)
available_implementations = {
'noancilla': MCXGrayCode(num_ctrl_qubits),
'recursion': MCXRecursive(num_ctrl_qubits),
'v-chain': MCXVChain(num_ctrl_qubits, False),
'v-chain-dirty': MCXVChain(num_ctrl_qubits, dirty_ancillas=True),
# outdated, previous names
'advanced': MCXRecursive(num_ctrl_qubits),
'basic': MCXVChain(num_ctrl_qubits, dirty_ancillas=False),
'basic-dirty-ancilla': MCXVChain(num_ctrl_qubits, dirty_ancillas=True)
}
# check ancilla input
if ancilla_qubits:
_ = self.qbit_argument_conversion(ancilla_qubits)
try:
gate = available_implementations[mode]
except KeyError:
all_modes = list(available_implementations.keys())
raise ValueError('Unsupported mode ({}) selected, choose one of {}'.format(mode,
all_modes))
if hasattr(gate, 'num_ancilla_qubits') and gate.num_ancilla_qubits > 0:
required = gate.num_ancilla_qubits
if ancilla_qubits is None:
raise AttributeError('No ancillas provided, but {} are needed!'.format(required))
# convert ancilla qubits to a list if they were passed as int or qubit
if not hasattr(ancilla_qubits, '__len__'):
ancilla_qubits = [ancilla_qubits]
if len(ancilla_qubits) < required:
actually = len(ancilla_qubits)
raise ValueError('At least {} ancillas required, but {} given.'.format(required,
actually))
# size down if too many ancillas were provided
ancilla_qubits = ancilla_qubits[:required]
else:
ancilla_qubits = []
return self.append(gate, control_qubits[:] + [target_qubit] + ancilla_qubits[:], [])
def mct(self, control_qubits, target_qubit, ancilla_qubits=None, mode='noancilla'):
"""Apply :class:`~qiskit.circuit.library.MCXGate`."""
return self.mcx(control_qubits, target_qubit, ancilla_qubits, mode)
def y(self, qubit):
"""Apply :class:`~qiskit.circuit.library.YGate`."""
from .library.standard_gates.y import YGate
return self.append(YGate(), [qubit], [])
def cy(self, control_qubit, target_qubit, # pylint: disable=invalid-name
label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CYGate`."""
from .library.standard_gates.y import CYGate
return self.append(CYGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def z(self, qubit):
"""Apply :class:`~qiskit.circuit.library.ZGate`."""
from .library.standard_gates.z import ZGate
return self.append(ZGate(), [qubit], [])
def cz(self, control_qubit, target_qubit, # pylint: disable=invalid-name
label=None, ctrl_state=None):
"""Apply :class:`~qiskit.circuit.library.CZGate`."""
from .library.standard_gates.z import CZGate
return self.append(CZGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit], [])
def add_calibration(self, gate, qubits, schedule, params=None):
"""Register a low-level, custom pulse definition for the given gate.
Args:
gate (Union[Gate, str]): Gate information.
qubits (Union[int, Tuple[int]]): List of qubits to be measured.
schedule (Schedule): Schedule information.
params (Optional[List[Union[float, Parameter]]]): A list of parameters.
Raises:
Exception: if the gate is of type string and params is None.
"""
if isinstance(gate, Gate):
self._calibrations[gate.name][(tuple(qubits), tuple(gate.params))] = schedule
else:
self._calibrations[gate][(tuple(qubits), tuple(params or []))] = schedule
def _circuit_from_qasm(qasm):
# pylint: disable=cyclic-import
from qiskit.converters import ast_to_dag
from qiskit.converters import dag_to_circuit
ast = qasm.parse()
dag = ast_to_dag(ast)
return dag_to_circuit(dag)
| 41.976024 | 100 | 0.576137 |
73ee4de435b31e64815b354dadc3125e210e48fe | 9,793 | py | Python | astronomaly/feature_extraction/autoencoder.py | nmcardoso/astronomaly | 28c6a7ced2e1553de463a74fc3a19635e45ae548 | [
"BSD-3-Clause"
] | 48 | 2019-11-22T14:41:59.000Z | 2022-03-23T01:48:59.000Z | astronomaly/feature_extraction/autoencoder.py | nmcardoso/astronomaly | 28c6a7ced2e1553de463a74fc3a19635e45ae548 | [
"BSD-3-Clause"
] | 12 | 2021-02-23T15:35:29.000Z | 2022-01-26T09:48:35.000Z | astronomaly/feature_extraction/autoencoder.py | nmcardoso/astronomaly | 28c6a7ced2e1553de463a74fc3a19635e45ae548 | [
"BSD-3-Clause"
] | 6 | 2019-11-27T10:02:43.000Z | 2021-10-11T02:18:06.000Z | import numpy as np
import os
from astronomaly.base.base_pipeline import PipelineStage
try:
from keras.models import load_model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
except ImportError:
print("Failed to import Keras. Deep learning will be unavailable.")
class Autoencoder:
def __init__(self, model_file=''):
"""
Class containing autoencoder training methods.
Parameters
----------
model_file : string, optional
Allows for loading of previously trained Keras model in HDF5
format. Note these models are very sensitive, the exact same
preprocessing steps must be used to reproduce results.
"""
if len(model_file) != 0:
try:
self.autoencoder = load_model(model_file)
inputs = self.autoencoder.input
outputs = self.autoencoder.get_layer('encoder').output
self.encoder = Model(inputs=inputs, outputs=outputs)
except OSError:
print('Model file ', model_file,
'is invalid. Weights not loaded. New model created.')
self.autoencoder = None
else:
self.autoencoder = None
def shape_check(self, images):
"""
Convenience function to reshape images appropriate for deep learning.
Parameters
----------
images : np.ndarray, list
Array of list of images
Returns
-------
np.ndarray
Converted array compliant with CNN
"""
images = np.array(images)
if len(images.shape) == 2:
images = images.reshape([-1, images.shape[0], images.shape[1], 1])
if len(images.shape) == 3:
images = images.reshape([-1,
images.shape[0], images.shape[1],
images.shape[2]])
return images
def compile_autoencoder_model(self, input_image_shape):
"""
Compiles the default autoencoder model. Note this model is designed to
operate on 128x128 images. While it can run on different size images
this can dramatically change the size of the final feature space.
Parameters
----------
input_image_shape : tuple
The expected shape of the input images. Can either be length 2 or 3
(to include number of channels).
"""
if len(input_image_shape) == 2:
input_image_shape = (input_image_shape[0], input_image_shape[1], 1)
# Assumes "channels last" format
input_img = Input(shape=input_image_shape)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
# x = MaxPooling2D((2, 2), padding='same')(x)
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# x = MaxPooling2D((4, 4), padding='same')(x)
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# encoder = MaxPooling2D((4, 4), padding='same', name='encoder')(x)
# # at this point the representation is (4, 4, 8) i.e. 128-dimensional
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoder)
# x = UpSampling2D((4, 4))(x)
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# x = UpSampling2D((4, 4))(x)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
# x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
encoder = MaxPooling2D((2, 2), padding='same', name='encoder')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoder = Conv2D(input_image_shape[-1], (3, 3), activation='sigmoid',
padding='same')(x)
autoencoder = Model(input_img, decoder)
autoencoder.compile(loss='mse', optimizer='adam')
self.autoencoder = autoencoder
self.encoder = Model(inputs=autoencoder.input,
outputs=autoencoder.get_layer('encoder').output)
def fit(self, training_data, batch_size=32, epochs=10):
"""
Actually train the autoencoder.
Parameters
----------
training_data : np.ndarray, list
Either array or list of images. It's recommended that this data be
augmented with translation or rotation (or both).
batch_size : int, optional
Number of samples used to update weights in each iteration. A
larger batch size can be more accurate but requires more memory and
is slower to train.
epochs : int, optional
Number of full passes through the entire training set.
"""
X = self.shape_check(training_data)
self.autoencoder.fit(X, X,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True)
def encode(self, images):
"""
Returns the deep encoded features for an array of images.
Parameters
----------
images : np.ndarray
Input images (nobjects x image_shape). For a single image,
provide [image] as an array is expected.
Returns
-------
np.ndarray
Deep features (nobjects x nfeatures)
"""
return self.encoder.predict(self.shape_check(images))
def save(self, filename):
"""
Saves Keras model in HDF5 format
Parameters
----------
filename : string
Location for saved model
"""
self.autoencoder.save(filename)
class AutoencoderFeatures(PipelineStage):
def __init__(self, training_dataset=None, retrain=False, **kwargs):
"""
Runs a very simple autoencoder to produce lower dimensional features.
This function is currently not very flexible in terms of changing
parameters, network architecture etc.
Parameters
----------
training_dataset : Dataset, optional
A Dataset-type object containing data to train the autoencoder on.
Note that since Astronomaly runs in an unsupervised setting, this
can be the same data that the final anomaly detection algorithm is
run on. However you may wish to augment the training data, for
example by applying translation to the cutouts.
retrain : bool, optional
Whether or not to train the algorithm again or load from a model
file. This is useful because the automated checks in whether or not
to rerun a function only operate when "run_on_dataset" is called
whereas the training is performed in __init__.
Raises
------
ValueError
If training data is not provided.
"""
super().__init__(training_dataset=training_dataset, **kwargs)
if training_dataset is None:
raise ValueError('A training dataset object must be provided.')
model_file = os.path.join(self.output_dir, 'autoencoder.h5')
if retrain or ('force_rerun' in kwargs and kwargs['force_rerun']):
self.autoenc = Autoencoder()
else:
self.autoenc = Autoencoder(model_file=model_file)
if self.autoenc.autoencoder is None:
cutouts = []
# Here I'm explicitly assuming the entire training set can be read
# into memory
print("Loading training data...")
for i in training_dataset.index:
cutouts.append(training_dataset.get_sample(i))
print("%d objects loaded." % len(cutouts))
img_shape = cutouts[0].shape
print('Compiling autoencoder model...')
self.autoenc.compile_autoencoder_model(img_shape)
print('Done!')
print('Training autoencoder...')
self.autoenc.fit(cutouts, epochs=10)
print('Done!')
if self.save_output:
print('Autoencoder model saved to', model_file)
self.autoenc.save(model_file)
else:
print('Trained autoencoder read from file', model_file)
def _execute_function(self, image):
"""
Runs the trained autoencoder to get the encoded features of the input
image.
Parameters
----------
image : np.ndarray
Cutout to run autoencoder on
Returns
-------
np.ndarray
Encoded features
"""
feats = self.autoenc.encode(image)
feats = np.reshape(feats, [np.prod(feats.shape[1:])])
if len(self.labels) == 0:
self.labels = ['enc_%d' % i for i in range(len(feats))]
return feats
| 36.541045 | 79 | 0.572245 |
73ee601cee3e0a29da1ea2ce0685781d1a8cbdb9 | 19,078 | py | Python | lib/python3.8/site-packages/django_elasticsearch_dsl_drf/tests/test_filtering_common.py | ervinpepic/Kodecta_media_catalog | c1e0692d42ee4935a7e1ae7fec1913ddab3054f2 | [
"Apache-2.0"
] | null | null | null | lib/python3.8/site-packages/django_elasticsearch_dsl_drf/tests/test_filtering_common.py | ervinpepic/Kodecta_media_catalog | c1e0692d42ee4935a7e1ae7fec1913ddab3054f2 | [
"Apache-2.0"
] | 7 | 2020-06-06T01:06:19.000Z | 2022-02-10T11:15:14.000Z | lib/python3.8/site-packages/django_elasticsearch_dsl_drf/tests/test_filtering_common.py | ervinpepic/Kodecta_media_catalog | c1e0692d42ee4935a7e1ae7fec1913ddab3054f2 | [
"Apache-2.0"
] | 1 | 2020-11-04T03:21:24.000Z | 2020-11-04T03:21:24.000Z | """
Test filtering backend.
"""
from __future__ import absolute_import
import unittest
from django.core.management import call_command
from nine.versions import DJANGO_GTE_1_10
import pytest
from rest_framework import status
from books import constants
import factories
from search_indexes.viewsets import BookDocumentViewSet
from ..constants import (
SEPARATOR_LOOKUP_COMPLEX_MULTIPLE_VALUE,
SEPARATOR_LOOKUP_COMPLEX_VALUE,
SEPARATOR_LOOKUP_FILTER,
SEPARATOR_LOOKUP_NAME,
)
from ..filter_backends import FilteringFilterBackend
from .base import (
BaseRestFrameworkTestCase,
CORE_API_AND_CORE_SCHEMA_ARE_INSTALLED,
CORE_API_AND_CORE_SCHEMA_MISSING_MSG,
)
from .data_mixins import AddressesMixin, BooksMixin
if DJANGO_GTE_1_10:
from django.urls import reverse
else:
from django.core.urlresolvers import reverse
__title__ = 'django_elasticsearch_dsl_drf.tests.test_filtering_common'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2017-2019 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = (
'TestFilteringCommon',
)
@pytest.mark.django_db
class TestFilteringCommon(BaseRestFrameworkTestCase,
AddressesMixin,
BooksMixin):
"""Test filtering common."""
pytestmark = pytest.mark.django_db
@classmethod
def setUpClass(cls):
"""Set up."""
# Testing simple documents: Publisher index.
cls.create_books()
# Testing nested objects: Addresses, cities and countries
cls.created_addresses()
# Update the Elasticsearch index
call_command('search_index', '--rebuild', '-f')
# Testing coreapi and coreschema
cls.backend = FilteringFilterBackend()
cls.view = BookDocumentViewSet()
cls.books_default_filter_lookup_url = reverse(
'bookdocument_default_filter_lookup-list',
kwargs={}
)
# ***********************************************************************
# ************************ Simple fields ********************************
# ***********************************************************************
def _field_filter_value(self, field_name, value, count):
"""Field filter value.
Usage example:
>>> self._field_filter_value(
>>> 'title__wildcard',
>>> self.prefix[3:-3],
>>> self.prefix_count
>>> )
"""
url = self.base_url[:]
data = {}
response = self.client.get(
url + '?{}={}'.format(field_name, value),
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(response.data['results']),
count
)
def _field_filter_multiple_values(self, url, field_name, values, count):
"""Field filter multiple values.
Usage example:
>>> self._field_filter_multiple_values(
>>> self.books_default_filter_lookup_url,
>>> 'authors',
>>> ['Author 1', 'Author 2'],
>>> 3
>>> )
"""
# url = self.base_url[:]
data = {}
params = '&'.join(['{}={}'.format(field_name, __v) for __v in values])
response = self.client.get(
url + '?{}'.format(params),
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(response.data['results']),
count
)
def _field_filter_term(self, field_name, filter_value):
"""Field filter term.
Example:
http://localhost:8000/api/articles/?tags=children
"""
self.authenticate()
url = self.base_url[:]
data = {}
# Should contain 22 results
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), self.all_count)
# Should contain only 10 results
filtered_response = self.client.get(
url + '?{}={}'.format(field_name, filter_value),
data
)
self.assertEqual(filtered_response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(filtered_response.data['results']),
self.published_count
)
def test_field_filter_term(self):
"""Field filter term."""
return self._field_filter_term(
'state',
constants.BOOK_PUBLISHING_STATUS_PUBLISHED
)
def test_field_filter_term_explicit(self):
"""Field filter term."""
return self._field_filter_term(
'state__term',
constants.BOOK_PUBLISHING_STATUS_PUBLISHED
)
def test_field_filter_range(self):
"""Field filter range.
Example:
http://localhost:8000/api/users/?age__range=16__67
"""
lower_id = self.published[0].id
upper_id = self.published[-1].id
value = '{lower_id}{separator}{upper_id}'.format(
lower_id=lower_id,
separator=SEPARATOR_LOOKUP_COMPLEX_VALUE,
upper_id=upper_id
)
return self._field_filter_value(
'id__range',
value,
self.published_count
)
def test_field_filter_range_with_boost(self):
"""Field filter range.
Example:
http://localhost:8000/api/users/?age__range=16__67__2.0
"""
lower_id = self.published[0].id
upper_id = self.published[-1].id
value = '{lower_id}{separator}{upper_id}{separator}{boost}'.format(
lower_id=lower_id,
upper_id=upper_id,
boost='2.0',
separator=SEPARATOR_LOOKUP_COMPLEX_VALUE
)
return self._field_filter_value(
'id__range',
value,
self.published_count
)
def test_field_filter_prefix(self):
"""Test filter prefix.
Example:
http://localhost:8000/api/articles/?tags__prefix=bio
"""
return self._field_filter_value(
'title__prefix',
self.prefix,
self.prefix_count
)
def test_field_filter_in(self):
"""Test filter in.
Example:
http://localhost:8000/api/articles/?id__in=1__2__3
"""
return self._field_filter_value(
'id__in',
SEPARATOR_LOOKUP_COMPLEX_VALUE.join(
[str(__b.id) for __b in self.prefixed]
),
self.prefix_count
)
def _field_filter_terms_list(self, field_name, in_values, count):
"""Field filter terms.
Example:
http://localhost:8000/api/articles/?id=1&id=2&id=3
"""
url = self.base_url[:]
data = {}
url_parts = ['{}={}'.format(field_name, val) for val in in_values]
response = self.client.get(
url + '?{}'.format('&'.join(url_parts)),
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(response.data['results']),
count
)
def test_field_filter_terms_list(self):
"""Test filter terms."""
return self._field_filter_terms_list(
'id',
[str(__b.id) for __b in self.prefixed],
self.prefix_count
)
def test_field_filter_terms_string(self):
"""Test filter terms.
Example:
http://localhost:8000/api/articles/?id__terms=1__2__3
"""
return self._field_filter_value(
'id__terms',
SEPARATOR_LOOKUP_COMPLEX_VALUE.join(
[str(__b.id) for __b in self.prefixed]
),
self.prefix_count
)
def test_field_filter_exists_true(self):
"""Test filter exists true.
Example:
http://localhost:8000/api/articles/?tags__exists=true
"""
return self._field_filter_value(
'tags__exists',
'true',
self.all_count
)
def test_field_filter_exists_false(self):
"""Test filter exists.
Example:
http://localhost:8000/api/articles/?non_existent__exists=false
"""
return self._field_filter_value(
'non_existent_field__exists',
'false',
self.all_count
)
def test_field_filter_wildcard(self):
"""Test filter wildcard.
Example:
http://localhost:8000/api/articles/?title__wildcard=*elusional*
"""
return self._field_filter_value(
'title__wildcard',
'*{}*'.format(self.prefix[1:6]),
self.prefix_count
)
def test_field_filter_exclude(self):
"""Test filter exclude.
Example:
http://localhost:8000/api/articles/?tags__exclude=children
"""
return self._field_filter_value(
'state__exclude',
constants.BOOK_PUBLISHING_STATUS_PUBLISHED,
self.all_count - self.published_count
)
def test_field_filter_isnull_true(self):
"""Test filter isnull true.
Example:
http://localhost:8000/api/articles/?null_field__isnull=true
"""
self._field_filter_value(
'null_field__isnull',
'true',
self.all_count
)
self._field_filter_value(
'tags__isnull',
'true',
self.no_tags_count
)
def test_field_filter_isnull_false(self):
"""Test filter isnull true.
Example:
http://localhost:8000/api/articles/?tags__isnull=false
"""
self._field_filter_value(
'null_field__isnull',
'false',
0
)
self._field_filter_value(
'tags__isnull',
'false',
self.all_count - self.no_tags_count
)
def test_field_filter_endswith(self):
"""Test filter endswith.
Example:
http://localhost:8000/api/articles/?state__endswith=lished
"""
return self._field_filter_value(
'state__endswith',
constants.BOOK_PUBLISHING_STATUS_PUBLISHED[4:],
self.published_count
)
def test_field_filter_contains(self):
"""Test filter contains.
Example:
http://localhost:8000/api/articles/?state__contains=lishe
"""
return self._field_filter_value(
'state__contains',
constants.BOOK_PUBLISHING_STATUS_PUBLISHED[4:-2],
self.published_count
)
def _field_filter_gte_lte(self, field_name, value, lookup, boost=None):
"""Field filter gt/gte/lt/lte.
Example:
http://localhost:8000/api/users/?id__gt=10
http://localhost:8000/api/users/?id__gte=10
http://localhost:8000/api/users/?id__lt=10
http://localhost:8000/api/users/?id__lte=10
"""
url = self.base_url[:]
data = {}
if boost is not None:
url += '?{field_name}__{lookup}={value}{separator}{boost}'.format(
field_name=field_name,
lookup=lookup,
value=value,
boost=boost,
separator=SEPARATOR_LOOKUP_COMPLEX_VALUE
)
else:
url += '?{field_name}__{lookup}={value}'.format(
field_name=field_name,
lookup=lookup,
value=value
)
response = self.client.get(
url,
data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
__mapping = {
'gt': self.assertGreater,
'gte': self.assertGreaterEqual,
'lt': self.assertLess,
'lte': self.assertLessEqual,
}
__func = __mapping.get(lookup)
if callable(__func):
for obj in response.data['results']:
__func(
obj['id'],
value
)
def test_field_filter_gt(self):
"""Field filter gt.
Example:
http://localhost:8000/api/users/?id__gt=10
:return:
"""
return self._field_filter_gte_lte('id', self.in_progress[0].id, 'gt')
def test_field_filter_gt_with_boost(self):
"""Field filter gt with boost.
Example:
http://localhost:8000/api/users/?id__gt=10__2.0
:return:
"""
# TODO: check boost value
return self._field_filter_gte_lte(
'id',
self.in_progress[0].id,
'gt',
'2.0'
)
def test_field_filter_gte(self):
"""Field filter gte.
Example:
http://localhost:8000/api/users/?id__gte=10
:return:
"""
return self._field_filter_gte_lte('id', self.in_progress[0].id, 'gte')
def test_field_filter_lt(self):
"""Field filter lt.
Example:
http://localhost:8000/api/users/?id__lt=10
:return:
"""
return self._field_filter_gte_lte('id', self.in_progress[0].id, 'lt')
def test_field_filter_lt_with_boost(self):
"""Field filter lt with boost.
Example:
http://localhost:8000/api/users/?id__lt=10__2.0
:return:
"""
# TODO: check boost value
return self._field_filter_gte_lte(
'id',
self.in_progress[0].id,
'lt',
'2.0'
)
def test_field_filter_lte(self):
"""Field filter lte.
Example:
http://localhost:8000/api/users/?id__lte=10
:return:
"""
return self._field_filter_gte_lte('id', self.in_progress[0].id, 'lte')
def test_ids_filter(self):
"""Test ids filter.
Example:
http://localhost:8000/api/articles/?ids=68__64__58
http://localhost:8000/api/articles/?ids=68&ids=64&ids=58
"""
__ids = [str(__obj.id) for __obj in self.published]
return self._field_filter_value(
'ids',
SEPARATOR_LOOKUP_COMPLEX_VALUE.join(__ids),
self.published_count
)
def test_ids_empty_filter(self):
"""Test ids filter with empty value. This should not fail.
Example:
http://localhost:8000/api/articles/?ids=
"""
__ids = []
return self._field_filter_value(
'ids',
SEPARATOR_LOOKUP_COMPLEX_VALUE.join(__ids),
0
)
def test_default_filter_lookup(self):
"""Test default filter lookup.
Example:
http://localhost:8000/search/books-default-filter-lookup/
?authors=Robin&authors=Luc
"""
# Create two authors
author_1 = factories.AuthorFactory(name='Author1')
author_2 = factories.AuthorFactory(name='Author2')
authors = [author_1, author_2]
# Add them to 3 books
self.published[0].authors.add(*authors)
self.published[1].authors.add(*authors)
self.published[2].authors.add(*authors)
# Update the Elasticsearch index
call_command('search_index', '--rebuild', '-f')
# Test
self._field_filter_multiple_values(
self.books_default_filter_lookup_url,
'authors',
authors,
3
)
# ***********************************************************************
# ************************ Nested fields ********************************
# ***********************************************************************
def _nested_field_filter_term(self, field_name, filter_value, count):
"""Nested field filter term.
Example:
http://localhost:8000/api/articles/?tags=children
"""
self.authenticate()
url = self.addresses_url[:]
data = {}
# Should contain only 32 results
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(response.data['results']),
self.all_addresses_count
)
# Should contain only 10 results
filtered_response = self.client.get(
url + '?{}={}'.format(field_name, filter_value),
data
)
self.assertEqual(filtered_response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(filtered_response.data['results']),
count
)
def test_nested_field_filter_term(self):
"""Nested field filter term."""
self._nested_field_filter_term(
'city',
'Yerevan',
self.addresses_in_yerevan_count
)
self._nested_field_filter_term(
'country',
'Armenia',
self.addresses_in_yerevan_count
)
self._nested_field_filter_term(
'city',
'Dublin',
self.addresses_in_dublin_count
)
# ***********************************************************************
# ************************* Other fields ********************************
# ***********************************************************************
def test_various_complex_fields(self):
"""Test various complex fields.
:return:
"""
data = {}
response = self.client.get(self.cities_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(self.city_detail_url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# ***********************************************************************
# ******************** Core api and core schema *************************
# ***********************************************************************
@unittest.skipIf(not CORE_API_AND_CORE_SCHEMA_ARE_INSTALLED,
CORE_API_AND_CORE_SCHEMA_MISSING_MSG)
def test_schema_fields_with_filter_fields_list(self):
"""Test schema field generator"""
fields = self.backend.get_schema_fields(self.view)
fields = [f.name for f in fields]
self.assertEqual(fields, list(self.view.filter_fields.keys()))
@unittest.skipIf(not CORE_API_AND_CORE_SCHEMA_ARE_INSTALLED,
CORE_API_AND_CORE_SCHEMA_MISSING_MSG)
def test_schema_field_not_required(self):
"""Test schema fields always not required"""
fields = self.backend.get_schema_fields(self.view)
fields = [f.required for f in fields]
for field in fields:
self.assertFalse(field)
if __name__ == '__main__':
unittest.main()
| 28.347697 | 78 | 0.549271 |
73ee62a37dd5f30f248479ad30c732359e5174a7 | 6,125 | py | Python | ibis/backends/clickhouse/tests/test_client.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | 1 | 2020-08-19T03:36:26.000Z | 2020-08-19T03:36:26.000Z | ibis/backends/clickhouse/tests/test_client.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | null | null | null | ibis/backends/clickhouse/tests/test_client.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | 2 | 2020-11-27T22:21:50.000Z | 2021-04-03T09:36:25.000Z | from io import StringIO
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.config as config
import ibis.expr.types as ir
from ibis import literal as L
pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
def test_get_table_ref(db):
table = db.functional_alltypes
assert isinstance(table, ir.TableExpr)
table = db['functional_alltypes']
assert isinstance(table, ir.TableExpr)
def test_run_sql(con, db):
query = 'SELECT * FROM {0}.`functional_alltypes`'.format(db.name)
table = con.sql(query)
fa = con.table('functional_alltypes')
assert isinstance(table, ir.TableExpr)
assert table.schema() == fa.schema()
expr = table.limit(10)
result = expr.execute()
assert len(result) == 10
def test_get_schema(con, db):
t = con.table('functional_alltypes')
schema = con.get_schema('functional_alltypes', database=db.name)
assert t.schema() == schema
def test_result_as_dataframe(con, alltypes):
expr = alltypes.limit(10)
ex_names = expr.schema().names
result = con.execute(expr)
assert isinstance(result, pd.DataFrame)
assert result.columns.tolist() == ex_names
assert len(result) == 10
def test_array_default_limit(con, alltypes):
result = con.execute(alltypes.float_col, limit=100)
assert len(result) == 100
def test_limit_overrides_expr(con, alltypes):
result = con.execute(alltypes.limit(10), limit=5)
assert len(result) == 5
def test_limit_equals_none_no_limit(alltypes):
with config.option_context('sql.default_limit', 10):
result = alltypes.execute(limit=None)
assert len(result) > 10
def test_verbose_log_queries(con, db):
queries = []
def logger(x):
queries.append(x)
with config.option_context('verbose', True):
with config.option_context('verbose_log', logger):
con.table('functional_alltypes', database=db.name)
expected = 'DESC {0}.`functional_alltypes`'.format(db.name)
assert len(queries) == 1
assert queries[0] == expected
def test_sql_query_limits(alltypes):
table = alltypes
with config.option_context('sql.default_limit', 100000):
# table has 25 rows
assert len(table.execute()) == 7300
# comply with limit arg for TableExpr
assert len(table.execute(limit=10)) == 10
# state hasn't changed
assert len(table.execute()) == 7300
# non-TableExpr ignores default_limit
assert table.count().execute() == 7300
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 7300
with config.option_context('sql.default_limit', 20):
# TableExpr observes default limit setting
assert len(table.execute()) == 20
# explicit limit= overrides default
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=23)) == 23
# non-TableExpr ignores default_limit
assert table.count().execute() == 7300
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 7300
# eliminating default_limit doesn't break anything
with config.option_context('sql.default_limit', None):
assert len(table.execute()) == 7300
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=10000)) == 7300
assert table.count().execute() == 7300
assert table.count().execute(limit=10) == 7300
def test_expr_compile_verify(alltypes):
expr = alltypes.double_col.sum()
assert isinstance(expr.compile(), str)
assert expr.verify()
def test_api_compile_verify(alltypes):
t = alltypes.timestamp_col
supported = t.year()
unsupported = t.rank()
assert ibis.clickhouse.verify(supported)
assert not ibis.clickhouse.verify(unsupported)
def test_database_repr(db):
assert db.name in repr(db)
def test_database_default_current_database(con):
db = con.database()
assert db.name == con.current_database
def test_embedded_identifier_quoting(alltypes):
t = alltypes
expr = t[[(t.double_col * 2).name('double(fun)')]]['double(fun)'].sum()
expr.execute()
def test_table_info(alltypes):
buf = StringIO()
alltypes.info(buf=buf)
assert buf.getvalue() is not None
def test_execute_exprs_no_table_ref(con):
cases = [(L(1) + L(2), 3)]
for expr, expected in cases:
result = con.execute(expr)
assert result == expected
# ExprList
exlist = ibis.api.expr_list(
[L(1).name('a'), ibis.now().name('b'), L(2).log().name('c')]
)
con.execute(exlist)
@pytest.mark.skip(reason="FIXME: it is raising KeyError: 'Unnamed: 0'")
def test_insert(con, alltypes, df):
drop = 'DROP TABLE IF EXISTS temporary_alltypes'
create = (
'CREATE TABLE IF NOT EXISTS '
'temporary_alltypes AS functional_alltypes'
)
con.raw_sql(drop)
con.raw_sql(create)
temporary = con.table('temporary_alltypes')
records = df[:10]
assert len(temporary.execute()) == 0
temporary.insert(records)
tm.assert_frame_equal(temporary.execute(), records)
def test_insert_with_less_columns(con, alltypes, df):
drop = 'DROP TABLE IF EXISTS temporary_alltypes'
create = (
'CREATE TABLE IF NOT EXISTS '
'temporary_alltypes AS functional_alltypes'
)
con.raw_sql(drop)
con.raw_sql(create)
temporary = con.table('temporary_alltypes')
records = df.loc[:10, ['string_col']].copy()
records['date_col'] = None
with pytest.raises(AssertionError):
temporary.insert(records)
def test_insert_with_more_columns(con, alltypes, df):
drop = 'DROP TABLE IF EXISTS temporary_alltypes'
create = (
'CREATE TABLE IF NOT EXISTS '
'temporary_alltypes AS functional_alltypes'
)
con.raw_sql(drop)
con.raw_sql(create)
temporary = con.table('temporary_alltypes')
records = df[:10].copy()
records['non_existing_column'] = 'raise on me'
with pytest.raises(AssertionError):
temporary.insert(records)
| 27.10177 | 75 | 0.673959 |
73ee67a911741d61a820c72ec5b875112bdbb96d | 589 | py | Python | var/spack/repos/builtin/packages/r-leaps/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-10T22:50:08.000Z | 2021-01-12T22:18:54.000Z | var/spack/repos/builtin/packages/r-leaps/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2021-01-08T22:23:53.000Z | 2022-03-30T11:08:17.000Z | var/spack/repos/builtin/packages/r-leaps/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RLeaps(RPackage):
"""leaps: Regression Subset Selection"""
homepage = "https://cloud.r-project.org/package=leaps"
url = "https://cloud.r-project.org/src/contrib/leaps_3.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/leaps"
version('3.0', sha256='55a879cdad5a4c9bc3b5697dd4d364b3a094a49d8facb6692f5ce6af82adf285')
| 34.647059 | 93 | 0.740238 |
73eea61795e3fa2a626d184f24b5dafb0c014615 | 1,452 | py | Python | test/test_user_permission.py | pollination/python-sdk | 599e8dbfc6e547c5e18aa903b27c70d7ffef84e5 | [
"RSA-MD"
] | 2 | 2020-01-30T23:28:59.000Z | 2020-05-06T16:43:47.000Z | test/test_user_permission.py | pollination/python-sdk | 599e8dbfc6e547c5e18aa903b27c70d7ffef84e5 | [
"RSA-MD"
] | 1 | 2020-10-02T18:00:25.000Z | 2020-10-02T18:00:25.000Z | test/test_user_permission.py | pollination/python-sdk | 599e8dbfc6e547c5e18aa903b27c70d7ffef84e5 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.16.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import pollination_sdk
from pollination_sdk.models.user_permission import UserPermission # noqa: E501
from pollination_sdk.rest import ApiException
class TestUserPermission(unittest.TestCase):
"""UserPermission unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test UserPermission
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = pollination_sdk.models.user_permission.UserPermission() # noqa: E501
if include_optional :
return UserPermission(
admin = False,
read = True,
write = False
)
else :
return UserPermission(
)
def testUserPermission(self):
"""Test UserPermission"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 25.928571 | 87 | 0.660468 |
73ef0362a27033e1024f5bb0212f1eaa45758e13 | 18,659 | py | Python | back/api/serializers.py | endorria/geoshop2 | 2a558710fedfaeb2319ba36bc84f4fc956e5f370 | [
"BSD-3-Clause"
] | null | null | null | back/api/serializers.py | endorria/geoshop2 | 2a558710fedfaeb2319ba36bc84f4fc956e5f370 | [
"BSD-3-Clause"
] | null | null | null | back/api/serializers.py | endorria/geoshop2 | 2a558710fedfaeb2319ba36bc84f4fc956e5f370 | [
"BSD-3-Clause"
] | null | null | null | import json
import copy
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.gis.gdal import GDALException
from django.contrib.gis.geos import Polygon, GEOSException, GEOSGeometry
from django.utils.translation import gettext_lazy as _
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from djmoney.contrib.django_rest_framework import MoneyField
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from allauth.account.adapter import get_adapter
from .helpers import zip_all_orderitems
from .models import (
Copyright, Contact, Document, DataFormat, Identity,
Metadata, MetadataContact, Order, OrderItem, OrderType,
Pricing, Product, ProductFormat, UserChange)
# Get the UserModel
UserModel = get_user_model()
class WKTPolygonField(serializers.Field):
"""
Polygons are serialized to POLYGON((Long, Lat)) notation
"""
def to_representation(self, value):
if isinstance(value, dict) or value is None:
return value
new_value = copy.copy(value)
new_value.transform(4326)
new_geom = []
return new_value.wkt or 'POLYGON EMPTY'
def to_internal_value(self, value):
if value == '' or value is None:
return value
if isinstance(value, GEOSGeometry):
# value already has the correct representation
return value
if isinstance(value, dict):
value = json.dumps(value)
try:
return GEOSGeometry(value)
except (GEOSException):
raise ValidationError(
_(
'Invalid format: string or unicode input unrecognized as GeoJSON, WKT EWKT or HEXEWKB.'
)
)
except (ValueError, TypeError, GDALException) as error:
raise ValidationError(
_('Unable to convert to python object: {}'.format(str(error)))
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = UserModel
exclude = [
'password', 'first_name', 'last_name', 'email',
'is_staff', 'is_superuser', 'is_active', 'groups',
'user_permissions']
class IdentitySerializer(serializers.ModelSerializer):
class Meta:
model = Identity
exclude = ['sap_id', 'contract_accepted', 'is_public', 'user']
class CopyrightSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Copyright
fields = '__all__'
class ContactSerializer(serializers.HyperlinkedModelSerializer):
belongs_to = serializers.HiddenField(
default=serializers.CurrentUserDefault(),
)
class Meta:
model = Contact
fields = '__all__'
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = '__all__'
class DataFormatSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DataFormat
fields = '__all__'
class OrderTypeSerializer(serializers.ModelSerializer):
class Meta:
model = OrderType
fields = '__all__'
class UserIdentitySerializer(UserSerializer):
"""
Flattens User and Identity.
"""
identity = IdentitySerializer(many=False)
def to_representation(self, instance):
"""Move fields from user to identity representation."""
representation = super().to_representation(instance)
identity_representation = representation.pop('identity')
for identity_key in identity_representation:
new_key = identity_key
if new_key in representation:
new_key = 'identity_' + identity_key
representation[new_key] = identity_representation[identity_key]
return representation
class MetadataIdentitySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Identity
fields = [
'url',
'first_name', 'last_name', 'email',
'phone', 'street', 'street2',
'company_name',
'postcode', 'city', 'country']
class MetadataContactSerializer(serializers.HyperlinkedModelSerializer):
contact_person = MetadataIdentitySerializer(read_only=True)
class Meta:
model = MetadataContact
fields = [
'contact_person',
'metadata_role']
# TODO: Test this, check for passing contexts ! Check public identities
class MetadataSerializer(serializers.HyperlinkedModelSerializer):
contact_persons = serializers.SerializerMethodField()
modified_user = serializers.StringRelatedField(read_only=True)
documents = DocumentSerializer(many=True)
copyright = CopyrightSerializer(many=False)
legend_tag = serializers.StringRelatedField()
image_tag = serializers.StringRelatedField()
legend_link = serializers.SerializerMethodField()
class Meta:
model = Metadata
fields = '__all__'
lookup_field = 'id_name'
extra_kwargs = {
'url': {'lookup_field': 'id_name'}
}
def get_contact_persons(self, obj):
"""obj is a Metadata instance. Returns list of dicts"""
qset = MetadataContact.objects.filter(metadata=obj)
return [
MetadataContactSerializer(m, context={
'request': self.context['request']
}).data for m in qset]
def get_legend_link(self, obj):
return obj.get_legend_link()
class OrderDigestSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializer showing a summary of an Order.
Always exclude geom here as it is used in lists of
orders and performance can be impacted.
"""
order_type = serializers.StringRelatedField()
class Meta:
model = Order
exclude = [
'geom', 'date_downloaded', 'client',
'processing_fee_currency', 'processing_fee',
'part_vat_currency', 'part_vat', 'extract_result',
'invoice_contact']
class OrderItemSerializer(serializers.ModelSerializer):
"""
A Basic serializer for order items
"""
price = MoneyField(max_digits=14, decimal_places=2,
required=False, allow_null=True, read_only=True)
data_format = serializers.SlugRelatedField(
required=False,
queryset=DataFormat.objects.all(),
slug_field='name'
)
product = serializers.SlugRelatedField(
queryset=Product.objects.all(),
slug_field='label')
product_id = serializers.PrimaryKeyRelatedField(read_only=True)
available_formats = serializers.ListField(read_only=True)
class Meta:
model = OrderItem
exclude = ['_price_currency', '_price', '_base_fee_currency',
'_base_fee', 'last_download', 'extract_result']
read_only_fields = ['price_status', 'order']
class OrderItemTextualSerializer(OrderItemSerializer):
"""
Same as OrderItem, without Order
"""
class Meta(OrderItemSerializer.Meta):
exclude = OrderItemSerializer.Meta.exclude + ['order']
class OrderSerializer(serializers.ModelSerializer):
"""
A complete Order serializer.
"""
order_type = serializers.SlugRelatedField(
queryset=OrderType.objects.all(),
slug_field='name',
help_text='Input the translated string value, for example "Privé"')
items = OrderItemTextualSerializer(many=True)
client = serializers.HiddenField(
default=serializers.CurrentUserDefault(),
)
class Meta:
model = Order
exclude = ['date_downloaded', 'extract_result']
read_only_fields = [
'date_ordered', 'date_processed',
'processing_fee_currency', 'processing_fee',
'total_cost_currency', 'total_cost',
'part_vat_currency', 'part_vat',
'status']
def create(self, validated_data):
items_data = validated_data.pop('items', None)
geom = validated_data.pop('geom', None)
order = Order(**validated_data)
order.geom = Polygon(geom.coords[0], srid=settings.DEFAULT_SRID)
order.save()
for item_data in items_data:
item = OrderItem.objects.create(order=order, **item_data)
item.set_price()
item.save()
if order.order_type and items_data:
order.set_price()
order.save()
return order
def update(self, instance, validated_data):
if instance.status != Order.OrderStatus.DRAFT:
raise serializers.ValidationError()
items_data = validated_data.pop('items', None)
geom = validated_data.pop('geom', None)
if geom is not None:
instance.geom = Polygon(geom.coords[0], srid=settings.DEFAULT_SRID)
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get(
'description', instance.description)
instance.invoice_contact = validated_data.get(
'invoice_contact', instance.invoice_contact)
instance.save()
update_products = []
if items_data is not None:
for item in items_data:
update_products.append(item.get('product').label)
# create / update / delete order_items on PUT (self.partial=False)
# update order_items on PATCH (self.partial=True)
order_items = list((instance.items).all())
if not self.partial:
for existing_item in order_items:
if existing_item.product.label not in update_products:
existing_item.delete()
if items_data:
for item_data in items_data:
oi_instance, created = OrderItem.objects.get_or_create(
order=instance,
product=item_data.get('product')
)
oi_instance.data_format = item_data.get('data_format', oi_instance.data_format)
oi_instance.product = item_data.get('product', oi_instance.product)
oi_instance.set_price()
oi_instance.save()
instance.set_price()
instance.save()
if instance.order_type:
if items_data or geom or 'order_type' in validated_data:
instance.set_price()
instance.save()
return instance
class ProductSerializer(serializers.ModelSerializer):
"""
Product serializer
"""
pricing = serializers.StringRelatedField(
read_only=True)
class Meta:
model = Product
read_only_fields = ['pricing', 'label', 'provider', 'group']
exclude = ['order', 'thumbnail_link', 'ts', 'metadata']
class ExtractOrderItemSerializer(OrderItemSerializer):
"""
Orderitem serializer for extract. Allows to upload file of orderitem.
"""
extract_result = serializers.FileField(required=False)
product = ProductSerializer(read_only=True)
data_format = serializers.StringRelatedField(read_only=True)
is_rejected = serializers.BooleanField(required=False)
class Meta(OrderItemSerializer.Meta):
exclude = ['_price_currency', '_base_fee_currency',
'_price', '_base_fee', 'order', 'status']
read_only_fields = [
'id', 'price', 'data_format', 'product', 'srid', 'last_download', 'price_status']
def update(self, instance, validated_data):
if instance.extract_result:
# deletes previous file in filesystem
instance.extract_result.delete()
instance.comment = validated_data.pop('comment', None)
is_rejected = validated_data.pop('is_rejected')
instance.extract_result = validated_data.pop('extract_result', '')
if is_rejected:
instance.status = OrderItem.OrderItemStatus.REJECTED
if instance.extract_result.name != '':
instance.status = OrderItem.OrderItemStatus.PROCESSED
instance.save()
status = instance.order.next_status_on_extract_input()
if status == Order.OrderStatus.PROCESSED:
zip_all_orderitems(instance.order)
instance.order.save()
return instance
class ExtractOrderSerializer(serializers.ModelSerializer):
"""
Order serializer for extract.
"""
order_type = serializers.SlugRelatedField(
queryset=OrderType.objects.all(),
slug_field='name',
help_text='Input the translated string value, for example "Privé"')
items = ExtractOrderItemSerializer(many=True)
client = UserIdentitySerializer()
invoice_contact = IdentitySerializer()
geom = WKTPolygonField()
geom_srid = serializers.IntegerField()
geom_area = serializers.FloatField()
class Meta:
model = Order
exclude = [
'date_downloaded', 'processing_fee_currency',
'total_without_vat_currency', 'part_vat_currency', 'total_with_vat_currency']
read_only_fields = [
'date_ordered', 'date_processed',
'processing_fee_currency', 'processing_fee',
'total_cost_currency', 'total_cost',
'part_vat_currency', 'part_vat',
'status', 'geom_area']
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
email = serializers.EmailField()
password_reset_form_class = PasswordResetForm
def validate_email(self, value):
# Create PasswordResetForm with the serializer
self.reset_form = self.password_reset_form_class(
data=self.initial_data)
if not self.reset_form.is_valid():
raise serializers.ValidationError(self.reset_form.errors)
return value
def save(self):
request = self.context.get('request')
# Set some values to trigger the send_email method.
opts = {
'domain_override': getattr(settings, 'FRONT_URL') + getattr(settings, 'FRONT_HREF'),
'use_https': request.is_secure(),
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': request,
'email_template_name': 'email_password_reset.html',
'html_email_template_name': 'email_password_reset.html'
}
self.reset_form.save(**opts)
class PasswordResetConfirmSerializer(serializers.Serializer):
"""
Serializer for setting a new user password.
"""
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
uid = serializers.CharField()
token = serializers.CharField()
set_password_form_class = SetPasswordForm
def validate(self, attrs):
self._errors = {}
# Decode the uidb64 to uid to get User object
try:
uid = force_text(urlsafe_base64_decode(attrs['uid']))
self.user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
raise ValidationError({'uid': ['Invalid value']})
# Construct SetPasswordForm instance
self.set_password_form = self.set_password_form_class(
user=self.user, data=attrs
)
if not self.set_password_form.is_valid():
raise serializers.ValidationError(self.set_password_form.errors)
if not default_token_generator.check_token(self.user, attrs['token']):
raise ValidationError({'token': ['Invalid value']})
return attrs
def save(self):
return self.set_password_form.save()
class PricingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Pricing
fields = '__all__'
class ProductFormatSerializer(serializers.ModelSerializer):
product = serializers.SlugRelatedField(
queryset=Product.objects.all(),
slug_field='label')
data_format = serializers.SlugRelatedField(
required=False,
queryset=DataFormat.objects.all(),
slug_field='name',
label='format')
class Meta:
model = ProductFormat
fields = '__all__'
class DataFormatListSerializer(ProductFormatSerializer):
product = None
class Meta:
model = ProductFormat
exclude = ['product']
class ProductDigestSerializer(serializers.ModelSerializer):
metadata = serializers.HyperlinkedRelatedField(
many=False,
read_only=True,
view_name='metadata-detail',
lookup_field='id_name'
)
class Meta:
model = Product
exclude = ['ts']
class RegisterSerializer(serializers.ModelSerializer):
"""
Serializer for user registration
"""
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(
_("The two password fields didn't match."))
return data
def create(self, validated_data):
password = validated_data.pop('password1')
validated_data.pop('password2')
user = UserModel(username=validated_data.pop('username'))
user.set_password(password)
identity_data = self.initial_data.copy()
for key in ['password1', 'password2', 'username']:
identity_data.pop(key)
identity_serializer = IdentitySerializer(data=identity_data)
identity_serializer.is_valid(raise_exception=True)
user.save()
identity_serializer.instance = user.identity
identity_serializer.save()
return user
class Meta:
model = UserModel
exclude = [
'password', 'last_login', 'date_joined',
'groups', 'user_permissions', 'is_staff',
'is_active', 'is_superuser']
class UserChangeSerializer(serializers.ModelSerializer):
class Meta:
model = UserChange
fields = '__all__'
class VerifyEmailSerializer(serializers.Serializer):
key = serializers.CharField()
| 33.201068 | 107 | 0.657163 |
73ef60f1c12f46a115767d410043537bffc8cbbe | 20,020 | py | Python | tf_agents/utils/eager_utils_test.py | wookayin/tensorflow-agents | ae3751dfeed52422a350227047648dd82297960b | [
"Apache-2.0"
] | null | null | null | tf_agents/utils/eager_utils_test.py | wookayin/tensorflow-agents | ae3751dfeed52422a350227047648dd82297960b | [
"Apache-2.0"
] | null | null | null | tf_agents/utils/eager_utils_test.py | wookayin/tensorflow-agents | ae3751dfeed52422a350227047648dd82297960b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.utils.eager_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tf_agents.utils import eager_utils
from tensorflow.python.eager import context # TF internal
from tensorflow.python.framework import test_util # TF internal
from tensorflow.python.keras.engine import network as keras_network # TF internal
def input_fn():
tf.set_random_seed(1)
inputs = tf.constant([[1, 2], [2, 3], [3, 4]], dtype=tf.float32)
labels = tf.constant([[0], [1], [2]])
return inputs, labels
class Network(keras_network.Network):
def __init__(self, name=None):
super(Network, self).__init__(name=name)
self._layer = tf.keras.layers.Dense(
3,
kernel_initializer=tf.ones_initializer(),
name='logits')
def call(self, inputs):
return self._layer(inputs)
class Model(object):
def __init__(self, name, network):
self._name = name
self._network = network
def __call__(self, inputs):
return self._network(inputs)
@property
def variables(self):
return self._network.variables
@property
def trainable_variables(self):
return self._network.trainable_variables
@eager_utils.future_in_eager_mode
def loss_fn(self, inputs, labels):
logits = self._network(inputs)
return tf.losses.sparse_softmax_cross_entropy(labels, logits)
@eager_utils.future_in_eager_mode
def minimize_loss(loss, optimizer):
return optimizer.minimize(loss)
class Aux(object):
def __init__(self):
pass
def method(self, inputs, labels, param=0):
assert isinstance(self, Aux), self
return inputs, labels, tf.convert_to_tensor(param)
def aux_function(inputs, labels, param=0):
return inputs, labels, tf.convert_to_tensor(param)
@parameterized.named_parameters(
('.func_eager', aux_function, context.eager_mode),
('.func_graph', aux_function, context.graph_mode),
('.method_eager', Aux().method, context.eager_mode),
('.method_graph', Aux().method, context.graph_mode),
)
class FutureTest(tf.test.TestCase, parameterized.TestCase):
def testCreate(self, func_or_method, run_mode):
with run_mode():
future = eager_utils.Future(input_fn)
self.assertTrue(callable(future))
self.assertIsInstance(future, eager_utils.Future)
inputs, labels = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
def testArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, labels)
inputs, labels, param = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method)
inputs, labels, param = future(inputs, labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCallOverwriteKwargsInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, param=1)
inputs, labels, param = future(inputs, labels, 0)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testKWArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(
func_or_method, inputs=inputs, labels=labels, param=1)
inputs, labels, param = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method)
inputs, labels, param = future(inputs=inputs, labels=labels, param=1)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testArgsAtInitKWArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, labels=labels)
inputs, labels, param = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtInitKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, param=1)
inputs, labels, param = future(labels=labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testOverwriteKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, param=-1)
inputs, labels, param = future(inputs, labels, param=1)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testArgsatInitOverwritedKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, param=-1)
inputs, labels, param = future(labels=labels, param=1)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testPartialArgsAtCallRaisesError(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs)
with self.assertRaisesRegexp(TypeError, 'argument'):
future(labels)
def testArgsAtInitArgsReplacedAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, labels, inputs)
inputs, labels, param = future(inputs, labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCallKWArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, labels=labels)
inputs, labels, param = future(inputs)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCallKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method)
inputs, labels, param = future(inputs, labels=labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
class FutureInEagerModeTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testCreate(self):
decorator = eager_utils.future_in_eager_mode(input_fn)
self.assertTrue(callable(decorator))
if context.executing_eagerly():
self.assertTrue(isinstance(decorator(), eager_utils.Future))
inputs, labels = decorator()()
else:
inputs, labels = decorator()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
def testDecorator(self):
@eager_utils.future_in_eager_mode
def aux_fn(inputs, labels):
return inputs, labels
self.assertTrue(callable(aux_fn))
inputs, labels = input_fn()
outputs = aux_fn(inputs, labels)
if context.executing_eagerly():
self.assertTrue(isinstance(outputs, eager_utils.Future))
inputs, labels = outputs.__call__()
else:
inputs, labels = outputs
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
def testDelayedArgs(self):
@eager_utils.future_in_eager_mode
def aux_fn(inputs, labels):
return inputs, labels
self.assertTrue(callable(aux_fn))
inputs, labels = input_fn()
outputs = aux_fn(inputs, labels)
if context.executing_eagerly():
self.assertTrue(isinstance(outputs, eager_utils.Future))
inputs, labels = outputs.__call__()
else:
inputs, labels = outputs
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
class EagerUtilsTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testModel(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
expected_loss = 1.098612
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(loss), expected_loss)
@test_util.run_in_graph_and_eager_modes()
def testLossDecreasesAfterTrainStep(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_step = minimize_loss(loss, optimizer)
initial_loss = 1.098612
final_loss = 1.064379
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(loss), initial_loss)
self.evaluate(train_step)
self.assertAllClose(self.evaluate(loss), final_loss)
class CreateTrainOpTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testLossDecreasesAfterTrainOp(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_step = eager_utils.create_train_step(loss, optimizer)
initial_loss = 1.098612
final_loss = 1.064379
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(train_step), initial_loss)
self.assertAllClose(self.evaluate(train_step), final_loss)
@test_util.run_in_graph_and_eager_modes()
def testCreateTrainOpWithTotalLossFn(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
model_2 = Model('model_2', Network())
loss_2 = model_2.loss_fn(inputs, labels)
@eager_utils.future_in_eager_mode
def tuple_loss(loss, loss_2):
return (loss() if callable(loss) else loss,
loss_2() if callable(loss_2) else loss_2)
tuple_loss_value = tuple_loss(loss, loss_2)
def first_element(tuple_value):
return tuple_value[0]
optimizer = tf.train.GradientDescentOptimizer(0.1)
loss = eager_utils.create_train_step(
tuple_loss_value, optimizer, total_loss_fn=first_element)
initial_loss = 1.098612
final_loss = 1.064379
self.evaluate(tf.global_variables_initializer())
train_step_model_0, train_step_model_1 = self.evaluate(loss)
self.assertAllClose(train_step_model_0, initial_loss)
self.assertAllClose(train_step_model_1, initial_loss)
train_step_model_0, train_step_model_1 = self.evaluate(loss)
self.assertAllClose(train_step_model_0, final_loss)
# model_1 was not updated since its loss is not being optimized: only
# the first element output was optimized.
self.assertAllClose(train_step_model_1, initial_loss)
@test_util.run_in_graph_and_eager_modes()
def testMultipleCallsTrainStep(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_step = eager_utils.create_train_step(loss, optimizer)
initial_loss = 1.098612
final_loss = 1.033917
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(train_step), initial_loss)
if context.executing_eagerly():
for _ in range(5):
self.evaluate(train_step(inputs, labels))
self.assertAllClose(self.evaluate(train_step(inputs, labels)), final_loss)
else:
for _ in range(5):
self.evaluate(train_step)
self.assertAllClose(self.evaluate(train_step), final_loss)
@test_util.run_in_graph_and_eager_modes()
def testVariablesToTrain(self):
inputs, labels = input_fn()
model = Model('model', Network())
if context.executing_eagerly():
variables_to_train = lambda: model.trainable_variables
else:
model(inputs)
variables_to_train = model.trainable_variables
self.assertEqual(len(variables_to_train), 2)
loss = model.loss_fn(inputs, labels)
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_step = eager_utils.create_train_step(
loss, optimizer, variables_to_train=variables_to_train)
initial_loss = 1.098612
final_loss = 1.064379
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(train_step), initial_loss)
self.assertAllClose(self.evaluate(train_step), final_loss)
self.assertEqual(len(model.trainable_variables), 2)
class HasSelfClsArgTest(tf.test.TestCase):
def testDirect(self):
def func():
pass
func2 = lambda: 0
class A(object):
def method(self):
pass
@classmethod
def class_method(cls):
pass
@staticmethod
def static_method():
pass
self.assertFalse(eager_utils.has_self_cls_arg(func))
self.assertFalse(eager_utils.has_self_cls_arg(func2))
self.assertFalse(eager_utils.has_self_cls_arg(A.static_method))
self.assertTrue(eager_utils.has_self_cls_arg(A.method))
self.assertTrue(eager_utils.has_self_cls_arg(A().method))
self.assertTrue(eager_utils.has_self_cls_arg(A.class_method))
self.assertTrue(eager_utils.has_self_cls_arg(A().class_method))
self.assertTrue(eager_utils.has_self_cls_arg(A.__dict__['method']))
self.assertTrue(eager_utils.has_self_cls_arg(A.__dict__['class_method']))
self.assertFalse(eager_utils.has_self_cls_arg(A.__dict__['static_method']))
def testDecorator(self):
def decorator(method):
@functools.wraps(method)
def _decorator(*args, **kwargs):
method(*args, **kwargs)
return _decorator
class A(object):
@decorator
def method(self):
pass
@staticmethod
@decorator
def static_method():
pass
@classmethod
@decorator
def class_method(cls):
pass
self.assertTrue(eager_utils.has_self_cls_arg(A.method))
self.assertTrue(eager_utils.has_self_cls_arg(A.class_method))
self.assertFalse(eager_utils.has_self_cls_arg(A.static_method))
@eager_utils.np_function
def meshgrid(low, high, nx=2, ny=3):
x = np.linspace(low, high, nx)
y = np.linspace(low, high, ny)
return np.meshgrid(x, y)
@eager_utils.np_function(get_output_dtypes=lambda _: np.float32)
def mean(x):
return np.mean(x)
class NpFunctionTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testMeshGrid(self):
xv, yv = meshgrid(tf.constant(0), tf.constant(1))
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [.5, .5], [1., 1.]])
xv, yv = meshgrid(tf.constant(0.), tf.constant(1.))
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [.5, .5], [1., 1.]])
@test_util.run_in_graph_and_eager_modes()
def testMeshGridKwargs(self):
xv, yv = meshgrid(tf.constant(0), tf.constant(1), nx=2, ny=2)
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes()
def testVariables(self):
a, b = tf.Variable(0), tf.Variable(1)
xv, yv = meshgrid(a, b, nx=2, ny=2)
self.evaluate(tf.initializers.global_variables())
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [1., 1.]])
def testPlaceHolder(self):
a = tf.placeholder(tf.float32, shape=())
b = tf.placeholder(tf.float32, shape=())
xv, yv = meshgrid(a, b, nx=2, ny=2)
self.evaluate(tf.initializers.global_variables())
with self.session() as sess:
xv, yv = sess.run([xv, yv], {a: 0, b: 1})
self.assertAllEqual(xv, [[0., 1.], [0., 1.]])
self.assertAllEqual(yv, [[0., 0.], [1., 1.]])
def testPlaceHolderWithDefault(self):
a = tf.placeholder_with_default(0, ())
b = tf.placeholder_with_default(1, ())
xv, yv = meshgrid(a, b, nx=2, ny=2)
self.evaluate(tf.initializers.global_variables())
with self.session() as sess:
xv_np, yv_np = sess.run([xv, yv])
self.assertAllEqual(xv_np, [[0., 1.], [0., 1.]])
self.assertAllEqual(yv_np, [[0., 0.], [1., 1.]])
with self.session() as sess:
xv_np, yv_np = sess.run([xv, yv], {a: 0., b: 2.})
self.assertAllEqual(xv_np, [[0., 2.], [0., 2.]])
self.assertAllEqual(yv_np, [[0., 0.], [2., 2.]])
@test_util.run_in_graph_and_eager_modes()
def testGetOutputDtypesInts2Floats(self):
x = tf.constant([1, 2, 3])
mean_x = mean(x)
self.assertEqual(self.evaluate(mean_x), 2.)
def testGetOutputDtypesFloats2Floats(self):
x = tf.constant([1., 2., 3.])
mean_x = mean(x)
self.assertEqual(self.evaluate(mean_x), 2.)
@eager_utils.np_function(get_output_dtypes=lambda *args: np.float32)
def np_descent(x, d, mu, n_epochs):
n = len(x)
f = 2 / n
y = np.zeros(n)
err = np.zeros(n)
w = np.zeros(2)
grad = np.zeros(2)
for _ in itertools.repeat(None, n_epochs):
np.subtract(d, y, out=err)
grad[:] = [f * np.sum(err), f * np.dot(err, x)]
w = w + mu * grad
y = w[0] + w[1] * x
return w
class NpDescentTest(tf.test.TestCase):
def setUp(self):
np.random.seed(444)
n = 10000
sigma = 0.1
noise = sigma * np.random.randn(n)
self._x = np.linspace(0, 2, n)
self._d = 3 + 2 * self._x + noise
@test_util.run_in_graph_and_eager_modes()
def testSolve(self):
x, d = tf.constant(self._x), tf.constant(self._d)
w = np_descent(x, d, mu=0.001, n_epochs=10000)
self.assertAllClose([2.96, 2.03], self.evaluate(w), atol=0.01, rtol=0.01)
if __name__ == '__main__':
tf.test.main()
| 35.061296 | 82 | 0.685365 |
73efdfd63c8f43ce97aa52580cb741308de9a835 | 3,263 | py | Python | logya/server.py | yaph/logya | 9647f58a0b8653b56ad64332e235a76cab3acda9 | [
"MIT"
] | 12 | 2015-03-04T03:23:56.000Z | 2020-11-17T08:09:17.000Z | logya/server.py | elaOnMars/logya | a9f256ac8840e21b348ac842b35683224e25b613 | [
"MIT"
] | 78 | 2015-01-05T11:40:41.000Z | 2022-01-23T21:05:39.000Z | logya/server.py | elaOnMars/logya | a9f256ac8840e21b348ac842b35683224e25b613 | [
"MIT"
] | 6 | 2015-04-20T06:58:42.000Z | 2022-01-31T00:36:29.000Z | # -*- coding: utf-8 -*-
import http.server
import socketserver
from shutil import copyfile
from urllib.parse import unquote, urlparse
from logya.core import Logya
from logya.content import read, write_collection, write_page
from logya.template import env
class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
"""SimpleHTTPRequestHandler based class to return resources."""
L: Logya
def __init__(self, *args):
super(HTTPRequestHandler, self).__init__(*args, directory=self.L.paths.public.as_posix())
def do_GET(self):
update_resource(self.path, self.L)
super(HTTPRequestHandler, self).do_GET()
def update_page(url: str, L: Logya):
"""Update content or collection page."""
if content := L.doc_index.get(url):
path_rel = content['path'].relative_to(L.paths.content)
content['doc'] = read(content['path'], path_rel, L.markdown_extensions)
if L.collections:
L.update_collections(content['doc'])
# Always write doc because of possible template changes.
write_page(L.paths.public, content['doc'])
L.info(f'Refreshed doc: {url}')
return True
if content := L.collection_index.get(url):
write_collection(L.paths.public, content)
L.info(f'Refreshed collection: {url}')
return True
def update_resource(path: str, L: Logya) -> None:
"""Update resource corresponding to given url.
Resources that exist in the `static` directory are updated if they are newer than the destination file.
For other HTML resources the whole `L.doc_index` is updated and the destination is newly written."""
# Use only the actual path and ignore possible query params (see issue #3).
url = unquote(urlparse(path).path)
url_rel = url.lstrip('/')
# If a static file is requested update it and return.
src_static = L.paths.static.joinpath(url_rel)
if src_static.is_file():
dst_static = L.paths.public.joinpath(url_rel)
dst_static.parent.mkdir(exist_ok=True)
if not dst_static.exists() or src_static.stat().st_mtime > dst_static.stat().st_mtime:
L.info(f'Update static resource: {dst_static}')
copyfile(src_static, dst_static)
return
# Update content or collection existing in respective index.
if update_page(url, L):
return
# Rebuild indexes for other HTML file requests and try again to update page in case of new content.
if url.endswith(('/', '.html', '.htm')):
L.info(f'Rebuild site for request URL: {url}')
L.build()
if not update_page(url, L):
L.info(f'URL not found: {url}')
def serve(dir_site: str, verbose: bool, host: str, port: int, **kwargs) -> None:
L = Logya(dir_site=dir_site, verbose=verbose)
L.build()
# Make Logya object accessible to server.
HTTPRequestHandler.L = L
# Make sure absolute links work.
base_url = f'http://{host}:{port}'
env.globals['base_url'] = base_url
# Avoid "OSError: [Errno 98] Address already in use"
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer((host, port), HTTPRequestHandler) as httpd:
print(f'Serving on {base_url}')
httpd.serve_forever() | 35.857143 | 107 | 0.676984 |
73eff2d6f52986eb40a45e562536088394182a2f | 786 | py | Python | crypto.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | null | null | null | crypto.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | 1 | 2022-03-12T00:57:37.000Z | 2022-03-12T00:57:37.000Z | crypto.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | null | null | null | from Crypto.Cipher import AES
from Crypto import Random
import base64
class MightyCrypto:
block_size = AES.block_size
cipher_method = AES
__pad = lambda self,s: s + (self.block_size - len(s) % self.block_size) * chr(self.block_size - len(s) % self.block_size)
__unpad = lambda self,s: s[:-ord(s[len(s) - 1:])]
def cipher(self, key, *args, **kwargs):
return self.ciper_method.new(kwargs.get("key")[:32], AES.MODE_CBC, kwargs.get("iv"))
def encrypt_data(self, *args, **kwargs):
raw = self.__pad(kwargs.get("data"))
return base64.b64encode(self.cipher(**kwargs).encrypt(raw))
def decrypt_data(self, *args, **kwargs):
raw = base64.b64decode(kwargs.get("data"))
return self.__unpad(self.cipher(**kwargs).decrypt(raw))
| 35.727273 | 125 | 0.659033 |
73eff44e441e6872513ad393f4b1202ea079c5eb | 2,255 | py | Python | test/integration_test.py | JNRowe-retired/restfulie-py | 8ac2bc401068c7bae6da3d107b004835618165d7 | [
"Apache-2.0"
] | 3 | 2015-03-09T09:17:46.000Z | 2016-05-03T02:51:25.000Z | test/integration_test.py | JNRowe-retired/restfulie-py | 8ac2bc401068c7bae6da3d107b004835618165d7 | [
"Apache-2.0"
] | null | null | null | test/integration_test.py | JNRowe-retired/restfulie-py | 8ac2bc401068c7bae6da3d107b004835618165d7 | [
"Apache-2.0"
] | 1 | 2020-12-14T23:23:26.000Z | 2020-12-14T23:23:26.000Z | from restfulie.restfulie import Restfulie
from threading import Semaphore
class integration_test:
def should_perform_ordinary_requests(self):
body = Restfulie.at("http://localhost:20144/hello").get().body
assert "Response for" in body
assert "/hello" in body
def should_perform_requests_with_parameters_as_kwargs(self):
response = Restfulie.at("http://localhost:20144").post(action="test")
print response.body
body = response.body
assert "This is a test" in body
def should_perform_requests_with_parameters_as_dict(self):
d = {"action": "test"}
response = Restfulie.at("http://localhost:20144").post(**d)
print response.body
body = response.body
assert "This is a test" in body
def should_perform_ordinary_requests_with_simple_auth(self):
r = Restfulie.at("http://localhost:20144/auth").auth('test', 'test')
response = r.get()
body = response.body
assert "worked" in body
def should_perform_async_requests(self):
barrier = Semaphore(0)
def callback(response):
body = response.body
assert "Response for" in body
assert "/hello" in body
barrier.release()
r = Restfulie.at("http://localhost:20144/hello").async(callback).get()
barrier.acquire()
assert "Response for" in r.body
assert "/hello" in r.body
def should_perform_async_requests_with_arguments_to_the_callback(self):
barrier = Semaphore(0)
def callback(response, extra1, extra2):
body = response.body
assert "Response for" in body
assert "/hello" in body
assert extra1 == "first"
assert extra2 == "second"
barrier.release()
r = Restfulie.at("http://localhost:20144/hello")
r = r.async(callback, args=("first", "second")).get()
barrier.acquire()
assert "Response for" in r.body
assert "/hello" in r.body
def should_perform_async_requests_without_callback(self):
r = Restfulie.at("http://localhost:20144/hello").async().get()
assert "Response for" in r.body
assert "/hello" in r.body
| 33.656716 | 78 | 0.627494 |
73f0114394700e2ee038acddf776bd307c2784f0 | 106 | py | Python | hfpy_code/chapter8/page264.py | leobarros/use_cabeca_python | 4e0897a68fb7ef669ec05eab7cba9412baa0e85e | [
"Apache-2.0"
] | 1 | 2016-04-01T04:31:52.000Z | 2016-04-01T04:31:52.000Z | hfpython_code/hfpy_code/chapter8/page264.py | tdean1995/HFPythonSandbox | dc72257e4353c5bca7a2c401d18587c6d799f9a1 | [
"Apache-2.0"
] | null | null | null | hfpython_code/hfpy_code/chapter8/page264.py | tdean1995/HFPythonSandbox | dc72257e4353c5bca7a2c401d18587c6d799f9a1 | [
"Apache-2.0"
] | 1 | 2020-06-02T17:47:22.000Z | 2020-06-02T17:47:22.000Z | import android
app = android.Android()
msg = "Hello from Head First Python on Android"
app.makeToast(msg)
| 21.2 | 47 | 0.764151 |
73f01c9b789f762f2c6d2f9a2f7b79f9c6dc8b23 | 555 | py | Python | DailyCodingProblem/85_Facebook_Conditional_Ops_Using_Math_on_32bit_Ints.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | 5 | 2019-09-07T17:31:17.000Z | 2022-03-05T09:59:46.000Z | DailyCodingProblem/85_Facebook_Conditional_Ops_Using_Math_on_32bit_Ints.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | null | null | null | DailyCodingProblem/85_Facebook_Conditional_Ops_Using_Math_on_32bit_Ints.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | 2 | 2019-09-07T17:31:24.000Z | 2019-10-28T16:10:52.000Z | """
This problem was asked by Facebook.
Given three 32-bit integers x, y, and b, return x if b is 1 and y if b is 0,
using only mathematical or bit operations. You can assume b can only be 1 or 0.
"""
import numpy as np
def conditional_op(x, y, b):
condition_1 = (b ^ np.uint32(0))
condition_2 = (b ^ np.uint32(1))
return (x*condition_1) | (y*condition_2)
if __name__ == '__main__':
x = np.uint32(44)
y = np.uint32(5)
print(conditional_op(x,y, b = np.uint32(1))) # 44
print(conditional_op(x, y, b=np.uint32(0))) # 5
| 22.2 | 79 | 0.637838 |
73f02c01c038aac22cc29387c7f5836f6d3ddbfd | 5,901 | py | Python | test/conftest.py | javister/krista-backup | f8852c20afdf483e842ff22497bdd80eedc30c78 | [
"Apache-2.0"
] | 7 | 2020-07-28T06:53:02.000Z | 2022-03-18T05:23:03.000Z | test/conftest.py | javister/krista-backup | f8852c20afdf483e842ff22497bdd80eedc30c78 | [
"Apache-2.0"
] | 1 | 2020-11-25T16:13:26.000Z | 2020-11-25T16:13:26.000Z | test/conftest.py | javister/krista-backup | f8852c20afdf483e842ff22497bdd80eedc30c78 | [
"Apache-2.0"
] | 1 | 2020-07-28T13:47:09.000Z | 2020-07-28T13:47:09.000Z | # -*- encoding: utf-8 -*-
import os
import secrets
import tarfile
import docker
import pytest
from .docker_utils import INFINITY_PROCESS, CONTAINERS_OPTS, ENV
def pytest_addoption(parser):
"""Реализация дополнительных опций.
Добавляет опции для выбора тестируемых версий python3,
подверсия передаётся цифрой:
-2 (3.2), -5 (3.5), -25 (3.2 и 3.5)
Добавляет опцию для проверки сжатой версии:
--wrapped (по-умолчанию false)
"""
for os_name in CONTAINERS_OPTS.keys():
parser.addoption(
'-{0}'.format(os_name),
action='store_true',
help='run tests in {0}.'.format(os_name),
)
parser.addoption(
'--all',
action='store_true',
help='run test in all versions.',
)
parser.addoption(
'--wrapped',
action='store_true',
help='use wrapped form.',
)
def pytest_generate_tests(metafunc):
"""Изменяет порядок генерации тестов.
Генерирует тесты для необходимых ОС и подставляет соответствующую через
фикстуру container.
Также подставляет конфигурацию для сжатой версии, если был тест
выполняется с аргументом --wrapped.
"""
if 'container' in metafunc.fixturenames:
testing_os = set()
if metafunc.config.getoption('all'):
chosen_versions = list(CONTAINERS_OPTS.keys())
else:
for os_name in CONTAINERS_OPTS.keys():
if metafunc.config.getoption(os_name):
testing_os.add(os_name)
if metafunc.config.getoption('5') or not testing_os:
# добавлять python3.5, если версия не выбрана
testing_os.add('5')
chosen_versions = list(testing_os & set(CONTAINERS_OPTS.keys()))
metafunc.parametrize(
'container',
chosen_versions,
indirect=['container'],
ids=['python3.{v}'.format(v=ver) for ver in chosen_versions],
)
if 'kristabackup_tar' in metafunc.fixturenames:
if metafunc.config.getoption('wrapped'):
params = [('../out', 'KristaBackup')]
ids = ['wrapped']
else:
params = [('../KristaBackup', 'KristaBackup.py')]
ids = ['unwrapped']
metafunc.parametrize(
'kristabackup_tar',
params,
indirect=['kristabackup_tar'],
ids=ids,
)
def pytest_sessionfinish(session, exitstatus): # noqa: ignore=W0613
"""Чистит мусор после тестов.
Запускается в конце сессии.
"""
docker_client = docker.from_env()
for os_entry in CONTAINERS_OPTS.values():
if os_entry.get('prepared'):
docker_client.images.remove(os_entry.get('prepared').short_id)
@pytest.fixture(scope='session')
def kristabackup_tar(request):
"""Фикстура для создания архива с приложением.
Уже содержит в себе config.yaml. Архив удаляется после завершения всех
тестов.
Args:
request: содержит параметр param (tuple), включающий путь
к исходникам и имя исполняемого файла
Yields:
(tar_archive_stream, executable): архивированные данные приложения и
имя исполняемого файла
"""
path, executable = request.param
arch_src = 'kristabackup_{0}.tar'.format(secrets.token_hex(4))
with tarfile.open(arch_src, mode='w') as tar:
tar.add(path, arcname='KristaBackup')
tar.add(
'test_config.yaml',
arcname='KristaBackup/config.yaml',
)
with open(arch_src, 'rb') as tar_archive:
yield (tar_archive.read(), executable)
os.remove(arch_src)
@pytest.fixture(scope='class')
def container(request, kristabackup_tar): # noqa: ignore=W0621, WPS442
"""Фикстура для создания докер контейнера.
Название требуемой версии берётся из request.params, а соответствующая
конфигурация хранится в docker_utils.CONTAINERS_OPTS.
После выполнения теста контейнер уничтожается.
Args:
request: содержит параметр param имеющий значение требуемого окружения
kristabackup_tar: результат фикстуры kristabackup_tar
Yields:
container: докер контейнер
"""
py3version = request.param
created_containers = []
yield lambda: _container(
py3version,
kristabackup_tar,
created_containers,
)
for xcontainer in created_containers:
xcontainer.stop()
xcontainer.remove()
def _container(
py3version,
kristabackup_tar, # noqa: W0621, WPS442
created_containers=None,
):
config = CONTAINERS_OPTS[py3version]
docker_client = docker.from_env()
if config.get('prepared', None):
# если образ контейнера был использован ранее и сохранён,
# то он переиспользуется
xcontainer = docker_client.containers.run(
config.get('prepared'),
INFINITY_PROCESS,
detach=True,
environment=ENV,
)
else:
image = docker_client.images.get(config['docker_image'])
if image is None:
image = docker_client.images.pull(config['docker_image'])
xcontainer = docker_client.containers.run(
image,
INFINITY_PROCESS,
detach=True,
environment=ENV,
)
xcontainer.put_archive('/opt/', kristabackup_tar[0])
xcontainer.exec_run(
'ln -s /opt/KristaBackup/{0} {1}'.format(
kristabackup_tar[1],
config['link'],
),
)
xcontainer.exec_run(
'ln -fs /usr/bin/python3.{0} /usr/bin/python3'.format(
py3version,
),
)
config['prepared'] = xcontainer.commit()
if created_containers is not None:
created_containers.append(xcontainer)
return xcontainer
def pytest_configure():
pytest.shared = {}
| 28.785366 | 78 | 0.618709 |
73f049df8cec66fe88bc870c0ecc4353f4babe36 | 16,300 | py | Python | rotkehlchen/kraken.py | michaelsproul/rotkehlchen | 410987682c02919dfd4f4b025836c5c191782b35 | [
"BSD-3-Clause"
] | 1 | 2021-04-23T21:50:19.000Z | 2021-04-23T21:50:19.000Z | rotkehlchen/kraken.py | iamonuwa/rotkehlchen | 6f237830bd5ad14d0b8f95a4e77cdeebfe671759 | [
"BSD-3-Clause"
] | null | null | null | rotkehlchen/kraken.py | iamonuwa/rotkehlchen | 6f237830bd5ad14d0b8f95a4e77cdeebfe671759 | [
"BSD-3-Clause"
] | 1 | 2021-12-19T20:01:30.000Z | 2021-12-19T20:01:30.000Z | #!/usr/bin/env python
#
# Good kraken and python resource:
# https://github.com/zertrin/clikraken/tree/master/clikraken
import hmac
import hashlib
import base64
import time
from urllib.parse import urlencode
from requests import Response
from rotkehlchen.utils import (
query_fiat_pair,
retry_calls,
rlk_jsonloads,
convert_to_int,
cache_response_timewise,
)
from rotkehlchen.order_formatting import AssetMovement
from rotkehlchen.exchange import Exchange
from rotkehlchen.errors import RecoverableRequestError, RemoteError
from rotkehlchen.fval import FVal
from rotkehlchen import typing
from typing import Optional, Tuple, Dict, List, Union, cast
import logging
logger = logging.getLogger(__name__)
KRAKEN_TO_WORLD = {
'XDAO': 'DAO',
'XETC': 'ETC',
'XETH': 'ETH',
'XLTC': 'LTC',
'XREP': 'REP',
'XXBT': 'BTC',
'XXMR': 'XMR',
'XXRP': 'XRP',
'XZEC': 'ZEC',
'ZEUR': 'EUR',
'ZUSD': 'USD',
'ZGBP': 'GBP',
'ZCAD': 'CAD',
'ZJPY': 'JPY',
'XMLN': 'MLN',
'XICN': 'ICN',
'GNO': 'GNO',
'BCH': 'BCH',
'XXLM': 'XLM',
'DASH': 'DASH',
'EOS': 'EOS',
'USDT': 'USDT',
'KFEE': 'KFEE',
}
WORLD_TO_KRAKEN = {
'ETC': 'XETC',
'ETH': 'XETH',
'LTC': 'XLTC',
'REP': 'XREP',
'BTC': 'XXBT',
'XMR': 'XXMR',
'XRP': 'XXRP',
'ZEC': 'XZEC',
'EUR': 'ZEUR',
'USD': 'ZUSD',
'GBP': 'ZGBP',
'CAD': 'ZCAD',
'JPY': 'ZJPY',
'DAO': 'XDAO',
'MLN': 'XMLN',
'ICN': 'XICN',
'GNO': 'GNO',
'BCH': 'BCH',
'XLM': 'XXLM',
'DASH': 'DASH',
'EOS': 'EOS',
'USDT': 'USDT',
'KFEE': 'KFEE',
}
def kraken_to_world_pair(pair):
if len(pair) == 6:
p1 = pair[:3]
p2 = pair[3:]
return p1 + '_' + p2
else:
p1 = pair[:4]
p2 = pair[4:]
world_p1 = KRAKEN_TO_WORLD[p1]
world_p2 = KRAKEN_TO_WORLD[p2]
return world_p1 + '_' + world_p2
class Kraken(Exchange):
def __init__(
self,
api_key: typing.ApiKey,
secret: typing.ApiSecret,
data_dir: typing.FilePath,
):
super(Kraken, self).__init__('kraken', api_key, secret, data_dir)
self.apiversion = '0'
self.uri = 'https://api.kraken.com/{}/'.format(self.apiversion)
# typing TODO: Without a union of str and Asset we get lots of warning
# How can this be avoided without too much pain?
self.usdprice: Dict[Union[typing.Asset, str], FVal] = {}
self.eurprice: Dict[Union[typing.Asset, str], FVal] = {}
self.session.headers.update({
'API-Key': self.api_key,
})
def first_connection(self):
if self.first_connection_made:
return
resp = self.query_private(
'TradeVolume',
req={'pair': 'XETHXXBT', 'fee-info': True}
)
with self.lock:
# Assuming all fees are the same for all pairs that we trade here,
# as long as they are normal orders on normal pairs.
self.taker_fee = FVal(resp['fees']['XETHXXBT']['fee'])
# Note from kraken api: If an asset pair is on a maker/taker fee
# schedule, the taker side is given in "fees" and maker side in
# "fees_maker". For pairs not on maker/taker, they will only be
# given in "fees".
if 'fees_maker' in resp:
self.maker_fee = FVal(resp['fees_maker']['XETHXXBT']['fee'])
else:
self.maker_fee = self.taker_fee
self.tradeable_pairs = self.query_public('AssetPairs')
self.first_connection_made = True
# Also need to do at least a single pass of the main logic for the ticker
self.main_logic()
def validate_api_key(self) -> Tuple[bool, str]:
try:
self.query_private('Balance', req={})
except (RemoteError, ValueError) as e:
error = str(e)
if 'Error: Incorrect padding' in error:
return False, 'Provided API Key or secret is in invalid Format'
elif 'EAPI:Invalid key' in error:
return False, 'Provided API Key is invalid'
else:
raise
return True, ''
def check_and_get_response(self, response: Response, method: str) -> dict:
if response.status_code in (520, 525, 504):
raise RecoverableRequestError('kraken', 'Usual kraken 5xx shenanigans')
elif response.status_code != 200:
raise RemoteError(
'Kraken API request {} for {} failed with HTTP status '
'code: {}'.format(
response.url,
method,
response.status_code,
))
result = rlk_jsonloads(response.text)
if result['error']:
if isinstance(result['error'], list):
error = result['error'][0]
else:
error = result['error']
if 'Rate limit exceeded' in error:
raise RecoverableRequestError('kraken', 'Rate limited exceeded')
else:
raise RemoteError(error)
return result['result']
def _query_public(self, method: str, req: Optional[dict] = None) -> dict:
"""API queries that do not require a valid key/secret pair.
Arguments:
method -- API method name (string, no default)
req -- additional API request parameters (default: {})
"""
if req is None:
req = {}
urlpath = self.uri + 'public/' + method
response = self.session.post(urlpath, data=req)
return self.check_and_get_response(response, method)
def query_public(self, method: str, req: Optional[dict] = None) -> dict:
return retry_calls(5, 'kraken', method, self._query_public, method, req)
def query_private(self, method: str, req: Optional[dict] = None) -> dict:
return retry_calls(5, 'kraken', method, self._query_private, method, req)
def _query_private(self, method: str, req: Optional[dict] = None) -> dict:
"""API queries that require a valid key/secret pair.
Arguments:
method -- API method name (string, no default)
req -- additional API request parameters (default: {})
"""
if req is None:
req = {}
urlpath = '/' + self.apiversion + '/private/' + method
with self.lock:
# Protect this section, or else
req['nonce'] = int(1000 * time.time())
post_data = urlencode(req)
# any unicode strings must be turned to bytes
hashable = (str(req['nonce']) + post_data).encode()
message = urlpath.encode() + hashlib.sha256(hashable).digest()
signature = hmac.new(
base64.b64decode(self.secret),
message,
hashlib.sha512
)
self.session.headers.update({
'API-Sign': base64.b64encode(signature.digest())
})
response = self.session.post(
'https://api.kraken.com' + urlpath,
data=post_data.encode()
)
return self.check_and_get_response(response, method)
def world_to_kraken_pair(self, pair: str) -> str:
p1, p2 = pair.split('_')
kraken_p1 = WORLD_TO_KRAKEN[p1]
kraken_p2 = WORLD_TO_KRAKEN[p2]
if kraken_p1 + kraken_p2 in self.tradeable_pairs:
pair = kraken_p1 + kraken_p2
elif kraken_p2 + kraken_p1 in self.tradeable_pairs:
pair = kraken_p2 + kraken_p1
else:
raise ValueError('Unknown pair "{}" provided'.format(pair))
return pair
# ---- General exchanges interface ----
def main_logic(self):
if not self.first_connection_made:
return
self.ticker = self.query_public(
'Ticker',
req={'pair': ','.join(self.tradeable_pairs.keys())}
)
self.eurprice['BTC'] = FVal(self.ticker['XXBTZEUR']['c'][0])
self.usdprice['BTC'] = FVal(self.ticker['XXBTZUSD']['c'][0])
self.eurprice['ETH'] = FVal(self.ticker['XETHZEUR']['c'][0])
self.usdprice['ETH'] = FVal(self.ticker['XETHZUSD']['c'][0])
self.eurprice['REP'] = FVal(self.ticker['XREPZEUR']['c'][0])
self.eurprice['XMR'] = FVal(self.ticker['XXMRZEUR']['c'][0])
self.usdprice['XMR'] = FVal(self.ticker['XXMRZUSD']['c'][0])
self.eurprice['ETC'] = FVal(self.ticker['XETCZEUR']['c'][0])
self.usdprice['ETC'] = FVal(self.ticker['XETCZUSD']['c'][0])
def find_fiat_price(self, asset: typing.Asset) -> FVal:
"""Find USD/EUR price of asset. The asset should be in the kraken style.
e.g.: XICN. Save both prices in the kraken object and then return the
USD price.
"""
if asset == 'KFEE':
# Kraken fees have no value
return FVal(0)
if asset == 'XXBT':
return self.usdprice['BTC']
# TODO: This is pretty ugly. Find a better way to check out kraken pairs
# without this ugliness.
pair = asset + 'XXBT'
pair2 = asset + 'XBT'
if pair2 in self.tradeable_pairs:
pair = pair2
if pair not in self.tradeable_pairs:
raise ValueError(
'Could not find a BTC tradeable pair in kraken for "{}"'.format(asset)
)
btc_price = FVal(self.ticker[pair]['c'][0])
common_name = KRAKEN_TO_WORLD[asset]
with self.lock:
self.usdprice[common_name] = btc_price * self.usdprice['BTC']
self.eurprice[common_name] = btc_price * self.eurprice['BTC']
return self.usdprice[common_name]
@cache_response_timewise()
def query_balances(self) -> Tuple[Optional[dict], str]:
try:
self.first_connection()
old_balances = self.query_private('Balance', req={})
# find USD price of EUR
with self.lock:
self.usdprice['EUR'] = query_fiat_pair('EUR', 'USD')
except RemoteError as e:
msg = (
'Kraken API request failed. Could not reach kraken due '
'to {}'.format(e)
)
logger.error(msg)
return None, msg
balances = dict()
for k, v in old_balances.items():
v = FVal(v)
if v == FVal(0):
continue
common_name = KRAKEN_TO_WORLD[k]
entry = {}
entry['amount'] = v
if common_name in self.usdprice:
entry['usd_value'] = v * self.usdprice[common_name]
else:
entry['usd_value'] = v * self.find_fiat_price(k)
balances[common_name] = entry
return balances, ''
def query_until_finished(
self,
endpoint: str,
keyname: str,
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
extra_dict: Optional[dict] = None,
) -> List:
""" Abstracting away the functionality of querying a kraken endpoint where
you need to check the 'count' of the returned results and provide sufficient
calls with enough offset to gather all the data of your query.
"""
result: List = list()
logger.debug(
f'Querying Kraken {endpoint} from {start_ts} to '
f'{end_ts} with extra_dict {extra_dict}',
)
response = self._query_endpoint_for_period(
endpoint=endpoint,
start_ts=start_ts,
end_ts=end_ts,
extra_dict=extra_dict
)
count = response['count']
offset = len(response[keyname])
result.extend(response[keyname].values())
logger.debug(f'Kraken {endpoint} Query Response with count:{count}')
while offset < count:
logger.debug(
f'Querying Kraken {endpoint} from {start_ts} to {end_ts} '
f'with offset {offset} and extra_dict {extra_dict}',
)
response = self._query_endpoint_for_period(
endpoint=endpoint,
start_ts=start_ts,
end_ts=end_ts,
offset=offset,
extra_dict=extra_dict
)
assert count == response['count']
response_length = len(response[keyname])
offset += response_length
if response_length == 0 and offset != count:
# If we have provided specific filtering then this is a known
# issue documented below, so skip the warning logging
# https://github.com/rotkehlchenio/rotkehlchen/issues/116
if extra_dict:
break
# it is possible that kraken misbehaves and either does not
# send us enough results or thinks it has more than it really does
logger.warning(
'Missing {} results when querying kraken endpoint {}'.format(
count - offset, endpoint)
)
break
result.extend(response[keyname].values())
return result
def query_trade_history(
self,
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
end_at_least_ts: typing.Timestamp,
) -> List:
with self.lock:
cache = self.check_trades_cache(start_ts, end_at_least_ts)
cache = cast(List, cache)
if cache is not None:
return cache
result = self.query_until_finished('TradesHistory', 'trades', start_ts, end_ts)
with self.lock:
# before returning save it in the disk for future reference
self.update_trades_cache(result, start_ts, end_ts)
return result
def _query_endpoint_for_period(
self,
endpoint: str,
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
offset: Optional[int] = None,
extra_dict: Optional[dict] = None,
) -> dict:
request: Dict[str, Union[typing.Timestamp, int]] = dict()
request['start'] = start_ts
request['end'] = end_ts
if offset is not None:
request['ofs'] = offset
if extra_dict is not None:
request.update(extra_dict)
result = self.query_private(endpoint, request)
return result
def query_deposits_withdrawals(
self,
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
end_at_least_ts: typing.Timestamp,
) -> List:
with self.lock:
cache = self.check_trades_cache(
start_ts,
end_at_least_ts,
special_name='deposits_withdrawals'
)
if cache is not None:
result = cache
else:
result = self.query_until_finished(
endpoint='Ledgers',
keyname='ledger',
start_ts=start_ts,
end_ts=end_ts,
extra_dict=dict(type='deposit'),
)
result.extend(self.query_until_finished(
endpoint='Ledgers',
keyname='ledger',
start_ts=start_ts,
end_ts=end_ts,
extra_dict=dict(type='withdrawal'),
))
with self.lock:
self.update_trades_cache(
result,
start_ts,
end_ts,
special_name='deposits_withdrawals'
)
movements = list()
for movement in result:
movements.append(AssetMovement(
exchange='kraken',
category=movement['type'],
# Kraken timestamps have floating point
timestamp=convert_to_int(movement['time'], accept_only_exact=False),
asset=KRAKEN_TO_WORLD[movement['asset']],
amount=FVal(movement['amount']),
fee=FVal(movement['fee'])
))
return movements
| 34.029228 | 87 | 0.550123 |
73f05be05a0ca9a51041c2b53ba4ced655fcdaba | 5,758 | py | Python | adv_exp/mi_fgsm_attack.py | alessandrodepalma/oval-bab | 014b6ee5071508430c8e515bbae725306db68fe1 | [
"MIT"
] | 6 | 2021-06-28T18:06:53.000Z | 2022-03-09T09:00:50.000Z | adv_exp/mi_fgsm_attack.py | alessandrodepalma/oval-bab | 014b6ee5071508430c8e515bbae725306db68fe1 | [
"MIT"
] | null | null | null | adv_exp/mi_fgsm_attack.py | alessandrodepalma/oval-bab | 014b6ee5071508430c8e515bbae725306db68fe1 | [
"MIT"
] | 2 | 2021-06-28T18:06:13.000Z | 2021-09-03T11:24:26.000Z | import torch
import random
import math
import torch.nn as nn
import torch.distributions as dist
from adv_exp.attack_class import Attack_Class
default_params = {
'iters': 40,
'optimizer': None,
'num_adv_ex': 5,
'lr': 1e-4,
'check_adv': 100,
'mu': 0.1,
'decay_alpha': False,
'original_alpha': True,
}
class MI_FGSM_Attack(Attack_Class):
def __init__(self, params=None, cpu=False, store_loss_progress=False, model=None, data=None):
self.__name__ = 'MI_FGSM__attack'
self.params = dict(default_params, **params) if params is not None else default_params
self.cpu = cpu
self.store_loss_progress = store_loss_progress
self.model = model
self.data = data
def create_adv_examples(self, data=None, model=None, return_criterion="all", init_tensor=None,
target=None, gpu=False, return_iters=False, multi_targets=False):
with torch.enable_grad():
assert return_criterion in ["one", "half", "all", "not_early"]
# self.targeted_attack = type(target) != type(None)
self.targeted_attack = not isinstance(target, type(None))
if model is None:
model = self.model
if data is None:
data = self.data
x, y, x_lbs, x_ubs = data
if gpu and torch.cuda.is_available():
x = x.cuda()
x_lbs = x_lbs.cuda()
x_ubs = x_ubs.cuda()
model.cuda()
device = x.device
iters = self.params['iters']
num_adv = self.params['num_adv_ex']
if device.type == 'cpu':
labels = torch.LongTensor([y]*num_adv, device=device)
else:
labels = torch.cuda.LongTensor([y]*num_adv, device=device)
if multi_targets:
num_classes = model[-1].out_features
num_per_class = math.ceil(self.params['num_adv_ex'] / (num_classes - 1))
target_list = [[k]*num_per_class for k in range(num_classes) if k != y]
target_list = [item for sublist in target_list for item in sublist]
random.shuffle(target_list)
target_list = target_list[:self.params['num_adv_ex']]
target = torch.LongTensor(target_list).unsqueeze(1)
if gpu:
target = target.cuda()
# Calculate the mean of the normal distribution in logit space
prior = dist.Uniform(low=x_lbs, high=x_ubs)
images = prior.sample(torch.Size([num_adv])) # Alg1 line 2
if not isinstance(init_tensor, type(None)):
if images[0].size() == init_tensor.size():
images[0] = init_tensor
elif images[0].size() == init_tensor[0].size():
images[:init_tensor.size()[0]] = init_tensor
else:
print("image size", images.size(), images[0].size())
print("init tensor size", init_tensor.size(), init_tensor[0].size())
input("images and init tensor not compatible")
if self.params['optimizer']:
if self.params['optimizer'] == 'default':
alpha = self.params['lr']
images.requires_grad = True
else:
print("optimizer", self.params['optimizer'])
raise NotImplementedError
if not isinstance(target, type(None)):
self.loss_type = 'targeted_loss'
else:
self.loss_type = 'CE_loss'
self.CE_loss = nn.CrossEntropyLoss()
loss = nn.CrossEntropyLoss()
self.loss_progress = []
g_vec = torch.zeros_like(images)
mu = self.params['mu']
if self.params['original_alpha']:
alpha = ((x_ubs[-1] - x_lbs[-1])/2) / iters
eps = float(((x_ubs[-1] - x_lbs[-1]).view(-1)[0])/2)
alpha = eps/iters
else:
alpha = self.params['lr']
for i in range(iters):
images.requires_grad = True
outputs = model(images)
model.zero_grad()
cost = self._loss(outputs, labels, target).to(device)
cost.backward()
g_vec = mu * g_vec + images.grad/torch.norm(images.grad, p=1)
adv_images = images + alpha*g_vec.sign()
images = torch.max(torch.min(adv_images, x_ubs), x_lbs).detach_()
if self.params['decay_alpha']:
alpha = alpha * (float(i+1)/float(i+2))
if self.store_loss_progress:
self.loss_progress.append(cost.detach())
if i % self.params['check_adv'] == 0:
outputs = model(images)
succ, sum_, mean_ = self.success_tensor(outputs, y, target)
if return_criterion == "all" and mean_ == 1:
break
elif return_criterion == "one" and mean_ > 0:
print("return early, iter ", i)
break
elif return_criterion == "half" and mean_ >= 0.5:
break
succ, sum_, mean_ = self.success_tensor(outputs, y, target)
output = model(images)
_, loss = self._loss(outputs, labels, target, return_vector=True)
loss = -loss.to(device)
if return_iters:
return images, succ, i
else:
return images, succ, loss
| 38.386667 | 98 | 0.521883 |
73f0e5998a1e76cd9e18d354a5608364237f2c33 | 5,251 | py | Python | controllers/sync.py | gurlinthewurld/eden | 726aea55c95ee33f48dace63f76496e22e529157 | [
"MIT"
] | 1 | 2018-01-06T12:58:32.000Z | 2018-01-06T12:58:32.000Z | controllers/sync.py | gurlinthewurld/eden | 726aea55c95ee33f48dace63f76496e22e529157 | [
"MIT"
] | null | null | null | controllers/sync.py | gurlinthewurld/eden | 726aea55c95ee33f48dace63f76496e22e529157 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Synchronization Controllers """
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = T("Synchronization")
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def config():
""" Synchronization Settings Controller """
# Get the record ID of the first and only record
table = s3db.sync_config
record = db().select(table.id, limitby=(0, 1)).first()
if not record:
record_id = table.insert()
else:
record_id = record.id
# Can't do anything else than update here
r = s3_request(args=[str(record_id), "update"], extension="html")
return r(list_btn=None)
# -----------------------------------------------------------------------------
def repository():
""" Repository Management Controller """
tabs = [(T("Configuration"), None),
(T("Resources"), "task"),
(T("Schedule"), "job"),
(T("Log"), "log")
]
s3db.set_method("sync", "repository",
method="register", action=current.sync)
crud_form = s3base.S3SQLCustomForm("resource_name",
"last_pull",
"last_push",
"mode",
"strategy",
"update_method",
"update_policy",
"conflict_policy",
s3base.S3SQLInlineComponent(
"resource_filter",
label = T("Filters"),
fields = ["tablename",
"filter_string",
]
),
)
s3db.configure("sync_task", crud_form=crud_form)
def prep(r):
if r.interactive:
if r.component and r.id:
if r.component.alias == "job":
s3task.configure_tasktable_crud(
function="sync_synchronize",
args = [r.id],
vars = dict(user_id = auth.user is not None and auth.user.id or 0),
period = 600, # seconds, so 10 mins
)
s3.cancel = URL(c="sync", f="repository",
args=[str(r.id), r.component.alias])
return True
s3.prep = prep
def postp(r, output):
if r.interactive and r.id:
if r.component and r.component.alias == "job":
s3.actions = [
dict(label=str(T("Reset")),
_class="action-btn",
url=URL(c="sync", f="repository",
args=[str(r.id), "job", "[id]", "reset"]))
]
s3_action_buttons(r)
return output
s3.postp = postp
rheader = lambda r: s3db.sync_rheader(r, tabs=tabs)
return s3_rest_controller("sync", "repository", rheader=rheader)
# -----------------------------------------------------------------------------
def sync():
""" Synchronization """
if "resource" in request.get_vars:
tablename = request.get_vars["resource"]
if "_" in tablename:
# URL variables from peer:
# repository ID, msince and sync filters
get_vars = Storage(include_deleted=True)
_vars = request.get_vars
for k, v in _vars.items():
if k in ("repository", "msince") or \
k[0] == "[" and "]" in k:
get_vars[k] = v
# Request
prefix, name = tablename.split("_", 1)
r = s3_request(prefix=prefix,
name=name,
args=["sync"],
get_vars=get_vars)
# Response
output = r()
return output
raise HTTP(400, body=s3mgr.ERROR.BAD_REQUEST)
# -----------------------------------------------------------------------------
def log():
""" Log Reader """
if "return" in request.get_vars:
c, f = request.get_vars["return"].split(".", 1)
list_btn = URL(c=c, f=f, args="sync_log")
else:
list_btn = URL(c="sync", f="log", vars=request.get_vars)
list_btn = A(T("List all Entries"), _href=list_btn, _class="action-btn")
output = s3_rest_controller("sync", "log",
subtitle=None,
rheader=s3base.S3SyncLog.rheader,
list_btn=list_btn)
return output
# END =========================================================================
| 36.72028 | 92 | 0.394782 |
73f0fb16e6a93eff1e85e69b197fbe2c876265a0 | 1,433 | py | Python | channels_graphql_ws/__init__.py | Abnormally/DjangoChannelsGraphqlWs | f741c20706901fc37f37fe08f6894c5614d2ab7d | [
"MIT"
] | 259 | 2018-06-27T12:33:54.000Z | 2022-03-26T23:09:04.000Z | channels_graphql_ws/__init__.py | Abnormally/DjangoChannelsGraphqlWs | f741c20706901fc37f37fe08f6894c5614d2ab7d | [
"MIT"
] | 83 | 2018-06-27T15:04:25.000Z | 2022-03-12T19:05:40.000Z | channels_graphql_ws/__init__.py | Abnormally/DjangoChannelsGraphqlWs | f741c20706901fc37f37fe08f6894c5614d2ab7d | [
"MIT"
] | 59 | 2018-07-02T20:29:07.000Z | 2022-03-12T16:10:53.000Z | # Copyright (C) DATADVANCE, 2010-2021
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Websocket GraphQL server with subscriptions.
Django Channels based WebSocket GraphQL server with Graphene-like
subscriptions.
"""
from .client import GraphqlWsClient, GraphqlWsResponseError
from .graphql_ws_consumer import GraphqlWsConsumer
from .subscription import Subscription
from .transport import GraphqlWsTransportAiohttp
| 43.424242 | 72 | 0.795534 |
73f16f43edffb9b2e1d443a60cf1f6ccf30912ae | 2,824 | py | Python | server/danesfield_server/workflow_steps/run_metrics.py | Kitware/Danesfield-App | 70d9f5c7c5af2ca92d274c99175783ea43087592 | [
"Apache-2.0"
] | 25 | 2018-12-19T08:45:13.000Z | 2022-03-27T14:20:42.000Z | server/danesfield_server/workflow_steps/run_metrics.py | Kitware/Danesfield-App | 70d9f5c7c5af2ca92d274c99175783ea43087592 | [
"Apache-2.0"
] | 10 | 2018-11-30T22:21:45.000Z | 2022-01-21T19:43:47.000Z | server/danesfield_server/workflow_steps/run_metrics.py | Kitware/Danesfield-App | 70d9f5c7c5af2ca92d274c99175783ea43087592 | [
"Apache-2.0"
] | 7 | 2018-12-03T21:42:53.000Z | 2021-09-07T17:09:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
from ..algorithms import runMetrics
from ..constants import DanesfieldStep
from ..settings import PluginSettings
from ..workflow_step import DanesfieldWorkflowStep
from ..workflow_utilities import (
getOptions,
getWorkingSet,
isClsImage,
isDsmImage,
isMtlImage,
)
class RunMetricsStep(DanesfieldWorkflowStep):
"""
Step that runs the pubgeo core3d metrics.
"""
def __init__(self):
super(RunMetricsStep, self).__init__(DanesfieldStep.RUN_METRICS)
self.addDependency(DanesfieldStep.CLASSIFY_MATERIALS)
self.addDependency(DanesfieldStep.BUILDINGS_TO_DSM)
self.addDependency(DanesfieldStep.FIT_DTM)
def run(self, jobInfo, outputFolder):
# Get working sets
initWorkingSet = getWorkingSet(DanesfieldStep.INIT, jobInfo)
classifyMaterialsWorkingSet = getWorkingSet(
DanesfieldStep.CLASSIFY_MATERIALS, jobInfo
)
buildingsToDsmWorkingSet = getWorkingSet(
DanesfieldStep.BUILDINGS_TO_DSM, jobInfo
)
dtmWorkingSet = getWorkingSet(DanesfieldStep.FIT_DTM, jobInfo)
runMetricsOptions = getOptions(self.name, jobInfo)
classifyMaterialsOptions = getOptions(
DanesfieldStep.CLASSIFY_MATERIALS, jobInfo
)
# Using the "model" passed through the classify materials
# options as the reference file prefix for now
referencePrefix = classifyMaterialsOptions.get("model")
referenceFolder = self.getFolderFromSetting(
PluginSettings.REFERENCE_DATA_FOLDER_ID
)
# Get DTM
dtmFile = self.getSingleFile(dtmWorkingSet)
# Get CLS
clsFile = self.getSingleFile(buildingsToDsmWorkingSet, isClsImage)
# Get DSM
dsmFile = self.getSingleFile(buildingsToDsmWorkingSet, isDsmImage)
# Get MTL
mtlFile = self.getSingleFile(classifyMaterialsWorkingSet, isMtlImage)
# Run algorithm
runMetrics(
initWorkingSetName=initWorkingSet["name"],
stepName=self.name,
requestInfo=jobInfo.requestInfo,
jobId=jobInfo.jobId,
outputFolder=outputFolder,
referenceFolder=referenceFolder,
referencePrefix=referencePrefix,
dtmFile=dtmFile,
dsmFile=dsmFile,
clsFile=clsFile,
mtlFile=mtlFile,
**runMetricsOptions
)
| 33.223529 | 79 | 0.641289 |
73f180acd83494861d9d4e2a6197ebf2703c1eba | 785 | py | Python | tests/test_preprocessing.py | chiragjn/laserembeddings | 37f2aaf723966f24fe0a8d473241725fba46f691 | [
"BSD-3-Clause"
] | null | null | null | tests/test_preprocessing.py | chiragjn/laserembeddings | 37f2aaf723966f24fe0a8d473241725fba46f691 | [
"BSD-3-Clause"
] | null | null | null | tests/test_preprocessing.py | chiragjn/laserembeddings | 37f2aaf723966f24fe0a8d473241725fba46f691 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from laserembeddings import Laser
from laserembeddings.preprocessing import Tokenizer, BPE
def test_tokenizer():
assert Tokenizer('en').tokenize("Let's do it!") == "let 's do it !"
with pytest.raises(NotImplementedError):
Tokenizer(romanize=True)
assert Tokenizer(
'en', descape=True).tokenize("Let's do it & pass that test!"
) == "let 's do it & pass that test !"
with pytest.raises(AssertionError):
Tokenizer(lower_case=False)
def test_bpe():
with open(Laser.DEFAULT_BPE_VOCAB_FILE, 'r', encoding='utf-8') as f_vocab:
bpe = BPE(Laser.DEFAULT_BPE_CODES_FILE, f_vocab)
assert bpe.encode_tokens(
"the tests are passing") == 'the test@@ s are passing'
| 30.192308 | 78 | 0.643312 |
73f18503c8e32506b63fd04d9956c916f3709ae9 | 3,363 | py | Python | Gamle scripts/old tests/systematic_L2.py | MadsAW/machine-learning-on-materials | 6101c7e3d12be54b12391c78442294198a39cc9b | [
"MIT"
] | 2 | 2018-10-10T09:32:34.000Z | 2019-03-28T08:42:31.000Z | Gamle scripts/old tests/systematic_L2.py | MadsAW/machine-learning-on-materials | 6101c7e3d12be54b12391c78442294198a39cc9b | [
"MIT"
] | null | null | null | Gamle scripts/old tests/systematic_L2.py | MadsAW/machine-learning-on-materials | 6101c7e3d12be54b12391c78442294198a39cc9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 09:57:18 2018
@author: Simon
"""
import os
from createLargerFeatureMatrix import simpleLargeMatrix
import pickle
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
import numpy as np
path = "Saved matrices/11-10-2018 11.36/sorted_Cutoff25_noSingleElementKrystals/"
featureMatrixFile = "train_featureMatrix.npy"
atomicSymbolsListFile = "train_pickledAtomicSymbolsList.txt"
energiesFile = "train_pickledEnergies.txt"
largeFeatureMatrix, mappedAtomicNumber = simpleLargeMatrix(path,featureMatrixFile, atomicSymbolsListFile)
with open(path+energiesFile, "rb") as pickleFile:
energies = pickle.load(pickleFile)
largeFeatureMatrix.shape = (largeFeatureMatrix.shape[0], -1)
X = largeFeatureMatrix
Y = np.array(energies)
#Load validation set
featureMatrixFileValidate = "validate_featureMatrix.npy"
atomicSymbolsListFileValidate = "validate_pickledAtomicSymbolsList.txt"
energiesFileValidate = "validate_pickledEnergies.txt"
largeFeatureMatrixValidate, mappedAtomicNumberValidate = simpleLargeMatrix(path,featureMatrixFileValidate, atomicSymbolsListFileValidate)
with open(path+energiesFileValidate, "rb") as pickleFile:
energiesValidate = pickle.load(pickleFile)
largeFeatureMatrixValidate.shape = (largeFeatureMatrixValidate.shape[0], -1)
X_v = largeFeatureMatrixValidate
Y_v = np.array(energiesValidate)
def f(lam):
#Create model
model = Sequential()
inputShape = np.shape(X)[1:]
model.add(Dense(800, input_shape=inputShape, activation='relu', kernel_regularizer=regularizers.l2(lam)))
model.add(Dense(400, activation='relu', kernel_regularizer=regularizers.l2(lam)))
model.add(Dense(1, kernel_regularizer=regularizers.l2(lam)))
#Compile model
model.compile(loss='mse', optimizer='adam', metrics=["mse"])
print(model.summary())
#Fit the model. This is where the hard computing happens.
#Number of epochs is number of iterations through dataset
#Batch size is number of iterations before weights are changed.
model.fit(X, Y, epochs=40, batch_size=50)
#Evaluate model efficiency
scores = model.evaluate(X, Y)
print("\n%s: %.2f eV" % (model.metrics_names[1], scores[1]))
#Make predictions
predictions = model.predict(X)
predictionsValidate = model.predict(X_v)
return predictions, predictionsValidate
outs = []
lam_list = [5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]
for lam in lam_list:
predictions, predictionsValidate =f(lam)
print("Lambda = " + str(lam))
a=0
for i in range(len(predictions)):
a+=(energies[i]-predictions[i])**2
rmse=np.sqrt(a/len(energies))
print("RMSE on training data "+str(rmse))
#Make predictions on validation set
a=0
for i in range(len(predictionsValidate)):
a+=(energiesValidate[i]-predictionsValidate[i])**2
rmseValidate=np.sqrt(a/len(energiesValidate))
print("RMSE on validation data "+str(rmseValidate))
outs.append(["Lambda = " + str(lam),"RMSE on training data "+str(rmse),"RMSE on validation data "+str(rmseValidate)])
for i in outs:
print("")
for j in i:
print(j)
print("DONE") | 24.727941 | 137 | 0.708296 |
73f195bec7d93819944b73e253d44348b8a38f7b | 5,406 | py | Python | userbot/plugins/afk.py | darklord9899/IndianBot | 03ca4d44e42c4d4bfa0a38d2f780e1c912bc8625 | [
"MIT"
] | null | null | null | userbot/plugins/afk.py | darklord9899/IndianBot | 03ca4d44e42c4d4bfa0a38d2f780e1c912bc8625 | [
"MIT"
] | null | null | null | userbot/plugins/afk.py | darklord9899/IndianBot | 03ca4d44e42c4d4bfa0a38d2f780e1c912bc8625 | [
"MIT"
] | null | null | null | """AFK Plugin for IndianBot
Syntax: .afk REASON"""
import asyncio
import datetime
from telethon import events
from telethon.tl import functions, types
from userbot.utils import admin_cmd
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
USER_AFK = {}
afk_time = None
last_afk_message = {}
@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602
async def set_not_afk(event):
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
current_message = event.message.message
if ".afk" not in current_message and "yes" in USER_AFK: # pylint:disable=E0602
try:
await borg.send_message( # pylint:disable=E0602
Config.PLUGIN_CHANNEL, # pylint:disable=E0602
"Mine Owner has gone for some Important work he is very busy🥳🥳🥳"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message( # pylint:disable=E0602
event.chat_id,
"Please set `PLUGIN_CHANNEL` " + \
"for the proper functioning of afk functionality " + \
"in @IndianArMyGiveaway\n\n `{}`".format(str(e)),
reply_to=event.message.id,
silent=True
)
USER_AFK = {} # pylint:disable=E0602
afk_time = None # pylint:disable=E0602
@borg.on(admin_cmd(pattern=r"afk ?(.*)"))
async def _(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global reason
USER_AFK = {}
afk_time = None
last_afk_message = {}
reason = event.pattern_match.group(1)
if not USER_AFK: # pylint:disable=E0602
last_seen_status = await borg( # pylint:disable=E0602
functions.account.GetPrivacyRequest(
types.InputPrivacyKeyStatusTimestamp()
)
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now() # pylint:disable=E0602
USER_AFK = f"yes: {reason}" # pylint:disable=E0602
if reason:
await event.edit(f"My Master is Away, and Reason is {reason}")
else:
await event.edit(f"My Master is Away")
await asyncio.sleep(5)
await event.delete()
try:
await borg.send_message( # pylint:disable=E0602
Config.PLUGIN_CHANNEL, # pylint:disable=E0602
f"My Master is Away, and Reason is {reason}"
)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602
@borg.on(events.NewMessage( # pylint:disable=E0602
incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)
))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
afk_since = "**a while ago**"
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return False
if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602
if afk_time: # pylint:disable=E0602
now = datetime.datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h{int(minutes)}m` **ago**"
elif minutes > 0:
afk_since = f"`{int(minutes)}m{int(seconds)}s` **ago**"
else:
afk_since = f"`{int(seconds)}s` **ago**"
msg = None
message_to_reply = f"My Master Has Been Gone For {afk_since}\nWhere He Is: Probably Alone in his Dreams " + \
f"\n\n__ I'll back in a few hours__\n**REASON**: {reason}" \
if reason \
else f"**Important Notice**\n\n[This User Is Ded Forever...](https://telegra.ph//file/a53fa950ff31781d5930a.jpg) "
msg = await event.reply(message_to_reply)
await asyncio.sleep(5)
if event.chat_id in last_afk_message: # pylint:disable=E0602
await last_afk_message[event.chat_id].delete() # pylint:disable=E0602
last_afk_message[event.chat_id] = msg # pylint:disable=E0602
| 40.646617 | 126 | 0.596929 |
73f195cd3e7abad86c55285d4fc2cc5c530b71ba | 5,968 | py | Python | recipes/thesis_experiments_fsn/leakage_removal/fullsubnet_aec/fsn_informed_subband_baseline.py | wimmerb/SpeechEnhancers | 6b4bb708eb25901056615880d4e95d5fc5add48b | [
"MIT"
] | null | null | null | recipes/thesis_experiments_fsn/leakage_removal/fullsubnet_aec/fsn_informed_subband_baseline.py | wimmerb/SpeechEnhancers | 6b4bb708eb25901056615880d4e95d5fc5add48b | [
"MIT"
] | null | null | null | recipes/thesis_experiments_fsn/leakage_removal/fullsubnet_aec/fsn_informed_subband_baseline.py | wimmerb/SpeechEnhancers | 6b4bb708eb25901056615880d4e95d5fc5add48b | [
"MIT"
] | null | null | null | import torch
from torch.nn import functional
from audio_zen.acoustics.feature import drop_band
from audio_zen.model.base_model import BaseModel
from audio_zen.model.module.sequence_model import SequenceModel
class Model(BaseModel):
def __init__(self,
num_freqs,
look_ahead,
sequence_model,
sb_num_neighbors,
sb_output_activate_function,
sb_model_hidden_size,
norm_type="offline_laplace_norm",
num_groups_in_drop_band=2,
weight_init=True,
variation=None,
):
"""
FullSubNet model (cIRM mask)
Args:
num_freqs: Frequency dim of the input
sb_num_neighbors: Number of the neighbor frequencies in each side
look_ahead: Number of use of the future frames
sequence_model: Chose one sequence model as the basic model (GRU, LSTM)
"""
super().__init__()
assert sequence_model in ("GRU", "LSTM"), f"{self.__class__.__name__} only support GRU and LSTM."
self.sb_model = SequenceModel(
input_size= (sb_num_neighbors * 2 + 1) + (sb_num_neighbors * 2 + 1),
output_size=2,
hidden_size=sb_model_hidden_size,
num_layers=2,
bidirectional=False,
sequence_model=sequence_model,
output_activate_function=sb_output_activate_function
)
self.sb_num_neighbors = sb_num_neighbors
self.look_ahead = look_ahead
self.norm = self.norm_wrapper(norm_type)
self.num_groups_in_drop_band = num_groups_in_drop_band
self.variation=variation
if weight_init:
self.apply(self.weight_init)
def forward(self, noisy_mag, bgm_mag):
"""
Args:
noisy_mag: noisy magnitude spectrogram
Returns:
The real part and imag part of the enhanced spectrogram
Shapes:
noisy_mag: [B, 1, F, T]
return: [B, 2, F, T]
"""
assert noisy_mag.shape == bgm_mag.shape #when this is assured, we can check derive the below info just from noisy_mag
assert noisy_mag.dim() == 4
bgm_mag = functional.pad(bgm_mag, [0, self.look_ahead]) # Pad the look ahead
noisy_mag = functional.pad(noisy_mag, [0, self.look_ahead]) # Pad the look ahead
batch_size, num_channels, num_freqs, num_frames = noisy_mag.size()
assert num_channels == 1, f"{self.__class__.__name__} takes the mag feature as inputs."
# Unfold noisy input, [B, N=F, C, F_s, T]
noisy_mag_unfolded = self.unfold(noisy_mag, num_neighbor=self.sb_num_neighbors)
noisy_mag_unfolded = noisy_mag_unfolded.reshape(batch_size, num_freqs, self.sb_num_neighbors * 2 + 1, num_frames)
noisy_mag_unfolded = self.norm(noisy_mag_unfolded)
# Unfold bgm input, [B, N=F, C, F_s, T]
bgm_mag_unfolded = self.unfold(bgm_mag, num_neighbor=self.sb_num_neighbors)
bgm_mag_unfolded = bgm_mag_unfolded.reshape(batch_size, num_freqs, self.sb_num_neighbors * 2 + 1, num_frames)
bgm_mag_unfolded = self.norm(bgm_mag_unfolded)
# Concatenation, [B, F, (F_s + F_s), T]
sb_input = torch.cat([noisy_mag_unfolded, bgm_mag_unfolded], dim=2)
#sb_input = self.norm(sb_input) #TODO maybe this helps
# Speeding up training without significant performance degradation. These will be updated to the paper later.
if batch_size > 1:
sb_input = drop_band(sb_input.permute(0, 2, 1, 3), num_groups=self.num_groups_in_drop_band) # [B, (F_s + F_s), F//num_groups, T]
num_freqs = sb_input.shape[2]
sb_input = sb_input.permute(0, 2, 1, 3) # [B, F//num_groups, (F_s + F_s), T]
sb_input = sb_input.reshape(
batch_size * num_freqs,
(self.sb_num_neighbors * 2 + 1) + (self.sb_num_neighbors * 2 + 1),
num_frames
)
# [B * F, (F_s + F_s), T] => [B * F, 2, T] => [B, F, 2, T]
sb_mask = self.sb_model(sb_input)
sb_mask = sb_mask.reshape(batch_size, num_freqs, 2, num_frames).permute(0, 2, 1, 3).contiguous()
output = sb_mask[:, :, :, self.look_ahead:]
return output
if __name__ == "__main__":
import datetime
with torch.no_grad():
model = Model(
sb_num_neighbors=15,
fb_num_neighbors=0,
num_freqs=257,
look_ahead=2,
sequence_model="LSTM",
fb_output_activate_function="ReLU",
sb_output_activate_function=None,
fb_model_hidden_size=512,
sb_model_hidden_size=384,
weight_init=False,
norm_type="offline_laplace_norm",
num_groups_in_drop_band=2,
)
# ipt = torch.rand(3, 800) # 1.6s
# ipt_len = ipt.shape[-1]
# # 1000 frames (16s) - 5.65s (35.31%,纯模型) - 5.78s
# # 500 frames (8s) - 3.05s (38.12%,纯模型) - 3.04s
# # 200 frames (3.2s) - 1.19s (37.19%,纯模型) - 1.20s
# # 100 frames (1.6s) - 0.62s (38.75%,纯模型) - 0.65s
# start = datetime.datetime.now()
#
# complex_tensor = torch.stft(ipt, n_fft=512, hop_length=256)
# mag = (complex_tensor.pow(2.).sum(-1) + 1e-8).pow(0.5 * 1.0).unsqueeze(1)
# print(f"STFT: {datetime.datetime.now() - start}, {mag.shape}")
#
# enhanced_complex_tensor = model(mag).detach().permute(0, 2, 3, 1)
# print(enhanced_complex_tensor.shape)
# print(f"Model Inference: {datetime.datetime.now() - start}")
#
# enhanced = torch.istft(enhanced_complex_tensor, 512, 256, length=ipt_len)
# print(f"iSTFT: {datetime.datetime.now() - start}")
#
# print(f"{datetime.datetime.now() - start}")
ipt = torch.rand(3, 1, 257, 200)
print(model(ipt).shape)
| 40.324324 | 141 | 0.602379 |
73f1c8361b2c932e26c4b0aa51c5c23ca620ae03 | 63,412 | py | Python | Tools/msi/msi.py | bendmorris/static-python | 2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473 | [
"PSF-2.0"
] | 164 | 2015-01-04T07:04:07.000Z | 2022-01-06T03:18:56.000Z | Tools/msi/msi.py | bendmorris/static-python | 2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473 | [
"PSF-2.0"
] | 3 | 2015-08-27T07:35:26.000Z | 2016-04-07T16:35:39.000Z | Tools/msi/msi.py | bendmorris/static-python | 2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473 | [
"PSF-2.0"
] | 35 | 2015-06-11T05:35:55.000Z | 2022-01-11T19:32:00.000Z | # Python MSI Generator
# (C) 2003 Martin v. Loewis
# See "FOO" in comments refers to MSDN sections with the title FOO.
import msilib, schema, sequence, os, glob, time, re, shutil, zipfile
import subprocess, tempfile
from msilib import Feature, CAB, Directory, Dialog, Binary, add_data
import uisample
from win32com.client import constants
from distutils.spawn import find_executable
# Settings can be overridden in config.py below
# 0 for official python.org releases
# 1 for intermediate releases by anybody, with
# a new product code for every package.
snapshot = 1
# 1 means that file extension is px, not py,
# and binaries start with x
testpackage = 0
# Location of build tree
srcdir = os.path.abspath("../..")
# Text to be displayed as the version in dialogs etc.
# goes into file name and ProductCode. Defaults to
# current_version.day for Snapshot, current_version otherwise
full_current_version = None
# Is Tcl available at all?
have_tcl = True
# path to PCbuild directory
PCBUILD="PCbuild"
# msvcrt version
MSVCR = "100"
# Name of certificate in default store to sign MSI with
certname = None
# Make a zip file containing the PDB files for this build?
pdbzip = True
try:
from config import *
except ImportError:
pass
# Extract current version from Include/patchlevel.h
lines = open(srcdir + "/Include/patchlevel.h").readlines()
major = minor = micro = level = serial = None
levels = {
'PY_RELEASE_LEVEL_ALPHA':0xA,
'PY_RELEASE_LEVEL_BETA': 0xB,
'PY_RELEASE_LEVEL_GAMMA':0xC,
'PY_RELEASE_LEVEL_FINAL':0xF
}
for l in lines:
if not l.startswith("#define"):
continue
l = l.split()
if len(l) != 3:
continue
_, name, value = l
if name == 'PY_MAJOR_VERSION': major = value
if name == 'PY_MINOR_VERSION': minor = value
if name == 'PY_MICRO_VERSION': micro = value
if name == 'PY_RELEASE_LEVEL': level = levels[value]
if name == 'PY_RELEASE_SERIAL': serial = value
short_version = major+"."+minor
# See PC/make_versioninfo.c
FIELD3 = 1000*int(micro) + 10*level + int(serial)
current_version = "%s.%d" % (short_version, FIELD3)
# This should never change. The UpgradeCode of this package can be
# used in the Upgrade table of future packages to make the future
# package replace this one. See "UpgradeCode Property".
# upgrade_code gets set to upgrade_code_64 when we have determined
# that the target is Win64.
upgrade_code_snapshot='{92A24481-3ECB-40FC-8836-04B7966EC0D5}'
upgrade_code='{65E6DE48-A358-434D-AA4F-4AF72DB4718F}'
upgrade_code_64='{6A965A0C-6EE6-4E3A-9983-3263F56311EC}'
if snapshot:
current_version = "%s.%s.%s" % (major, minor, int(time.time()/3600/24))
if full_current_version is None:
full_current_version = current_version
extensions = [
'pyexpat.pyd',
'select.pyd',
'unicodedata.pyd',
'winsound.pyd',
'_bz2.pyd',
'_elementtree.pyd',
'_socket.pyd',
'_ssl.pyd',
'_testcapi.pyd',
'_tkinter.pyd',
'_msi.pyd',
'_ctypes.pyd',
'_ctypes_test.pyd',
'_sqlite3.pyd',
'_hashlib.pyd',
'_multiprocessing.pyd',
'_lzma.pyd',
'_decimal.pyd',
'_testbuffer.pyd',
'_sha3.pyd',
'_testimportmultiple.pyd',
]
# Well-known component UUIDs
# These are needed for SharedDLLs reference counter; if
# a different UUID was used for each incarnation of, say,
# python24.dll, an upgrade would set the reference counter
# from 1 to 2 (due to what I consider a bug in MSI)
# Using the same UUID is fine since these files are versioned,
# so Installer will always keep the newest version.
# NOTE: All uuids are self generated.
pythondll_uuid = {
"24":"{9B81E618-2301-4035-AC77-75D9ABEB7301}",
"25":"{2e41b118-38bd-4c1b-a840-6977efd1b911}",
"26":"{34ebecac-f046-4e1c-b0e3-9bac3cdaacfa}",
"27":"{4fe21c76-1760-437b-a2f2-99909130a175}",
"30":"{6953bc3b-6768-4291-8410-7914ce6e2ca8}",
"31":"{4afcba0b-13e4-47c3-bebe-477428b46913}",
"32":"{3ff95315-1096-4d31-bd86-601d5438ad5e}",
"33":"{f7581ca4-d368-4eea-8f82-d48c64c4f047}",
"34":"{7A0C5812-2583-40D9-BCBB-CD7485F11377}",
} [major+minor]
# Compute the name that Sphinx gives to the docfile
docfile = micro
if level < 0xf:
if level == 0xC:
docfile += "rc%s" % (serial,)
else:
docfile += '%x%s' % (level, serial)
docfile = 'python%s%s%s.chm' % (major, minor, docfile)
# Build the mingw import library, libpythonXY.a
# This requires 'nm' and 'dlltool' executables on your PATH
def build_mingw_lib(lib_file, def_file, dll_file, mingw_lib):
warning = "WARNING: %s - libpythonXX.a not built"
nm = find_executable('nm')
dlltool = find_executable('dlltool')
if not nm or not dlltool:
print(warning % "nm and/or dlltool were not found")
return False
nm_command = '%s -Cs %s' % (nm, lib_file)
dlltool_command = "%s --dllname %s --def %s --output-lib %s" % \
(dlltool, dll_file, def_file, mingw_lib)
export_match = re.compile(r"^_imp__(.*) in python\d+\.dll").match
f = open(def_file,'w')
f.write("LIBRARY %s\n" % dll_file)
f.write("EXPORTS\n")
nm_pipe = os.popen(nm_command)
for line in nm_pipe.readlines():
m = export_match(line)
if m:
f.write(m.group(1)+"\n")
f.close()
exit = nm_pipe.close()
if exit:
print(warning % "nm did not run successfully")
return False
if os.system(dlltool_command) != 0:
print(warning % "dlltool did not run successfully")
return False
return True
# Target files (.def and .a) go in PCBuild directory
lib_file = os.path.join(srcdir, PCBUILD, "python%s%s.lib" % (major, minor))
def_file = os.path.join(srcdir, PCBUILD, "python%s%s.def" % (major, minor))
dll_file = "python%s%s.dll" % (major, minor)
mingw_lib = os.path.join(srcdir, PCBUILD, "libpython%s%s.a" % (major, minor))
have_mingw = build_mingw_lib(lib_file, def_file, dll_file, mingw_lib)
# Determine the target architecture
if os.system("nmake /nologo /c /f msisupport.mak") != 0:
raise RuntimeError("'nmake /f msisupport.mak' failed")
dll_path = os.path.join(srcdir, PCBUILD, dll_file)
msilib.set_arch_from_file(dll_path)
if msilib.pe_type(dll_path) != msilib.pe_type("msisupport.dll"):
raise SystemError("msisupport.dll for incorrect architecture")
if msilib.Win64:
upgrade_code = upgrade_code_64
if snapshot:
product_code = msilib.gen_uuid()
else:
# official release: generate UUID from the download link that the file will have
import uuid
product_code = uuid.uuid3(uuid.NAMESPACE_URL,
'http://www.python.org/ftp/python/%s.%s.%s/python-%s%s.msi' %
(major, minor, micro, full_current_version, msilib.arch_ext))
product_code = '{%s}' % product_code
if testpackage:
ext = 'px'
testprefix = 'x'
else:
ext = 'py'
testprefix = ''
if msilib.Win64:
SystemFolderName = "[System64Folder]"
registry_component = 4|256
else:
SystemFolderName = "[SystemFolder]"
registry_component = 4
msilib.reset()
# condition in which to install pythonxy.dll in system32:
# a) it is Windows 9x or
# b) it is NT, the user is privileged, and has chosen per-machine installation
sys32cond = "(Windows9x or (Privileged and ALLUSERS))"
def build_database():
"""Generate an empty database, with just the schema and the
Summary information stream."""
if snapshot:
uc = upgrade_code_snapshot
else:
uc = upgrade_code
if msilib.Win64:
productsuffix = " (64-bit)"
else:
productsuffix = ""
# schema represents the installer 2.0 database schema.
# sequence is the set of standard sequences
# (ui/execute, admin/advt/install)
msiname = "python-%s%s.msi" % (full_current_version, msilib.arch_ext)
db = msilib.init_database(msiname,
schema, ProductName="Python "+full_current_version+productsuffix,
ProductCode=product_code,
ProductVersion=current_version,
Manufacturer=u"Python Software Foundation",
request_uac = True)
# The default sequencing of the RemoveExistingProducts action causes
# removal of files that got just installed. Place it after
# InstallInitialize, so we first uninstall everything, but still roll
# back in case the installation is interrupted
msilib.change_sequence(sequence.InstallExecuteSequence,
"RemoveExistingProducts", 1510)
msilib.add_tables(db, sequence)
# We cannot set ALLUSERS in the property table, as this cannot be
# reset if the user choses a per-user installation. Instead, we
# maintain WhichUsers, which can be "ALL" or "JUSTME". The UI manages
# this property, and when the execution starts, ALLUSERS is set
# accordingly.
add_data(db, "Property", [("UpgradeCode", uc),
("WhichUsers", "ALL"),
("ProductLine", "Python%s%s" % (major, minor)),
])
db.Commit()
return db, msiname
def remove_old_versions(db):
"Fill the upgrade table."
start = "%s.%s.0" % (major, minor)
# This requests that feature selection states of an older
# installation should be forwarded into this one. Upgrading
# requires that both the old and the new installation are
# either both per-machine or per-user.
migrate_features = 1
# See "Upgrade Table". We remove releases with the same major and
# minor version. For an snapshot, we remove all earlier snapshots. For
# a release, we remove all snapshots, and all earlier releases.
if snapshot:
add_data(db, "Upgrade",
[(upgrade_code_snapshot, start,
current_version,
None, # Ignore language
migrate_features,
None, # Migrate ALL features
"REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT"
else:
add_data(db, "Upgrade",
[(upgrade_code, start, current_version,
None, migrate_features, None, "REMOVEOLDVERSION"),
(upgrade_code_snapshot, start, "%s.%d.0" % (major, int(minor)+1),
None, migrate_features, None, "REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT;REMOVEOLDVERSION"
props += ";TARGETDIR;DLLDIR;LAUNCHERDIR"
# Installer collects the product codes of the earlier releases in
# these properties. In order to allow modification of the properties,
# they must be declared as secure. See "SecureCustomProperties Property"
add_data(db, "Property", [("SecureCustomProperties", props)])
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
if kw.get("bitmap", True):
self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 135, 10, 220, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
def add_ui(db):
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
add_data(db, 'ActionText', uisample.ActionText)
add_data(db, 'UIText', uisample.UIText)
# Bitmaps
if not os.path.exists(srcdir+r"\PC\python_icon.exe"):
raise RuntimeError("Run icons.mak in PC directory")
add_data(db, "Binary",
[("PythonWin", msilib.Binary(r"%s\PCbuild\installer.bmp" % srcdir)), # 152x328 pixels
("py.ico",msilib.Binary(srcdir+r"\PC\py.ico")),
])
add_data(db, "Icon",
[("python_icon.exe", msilib.Binary(srcdir+r"\PC\python_icon.exe"))])
# Scripts
# CheckDir sets TargetExists if TARGETDIR exists.
# UpdateEditIDLE sets the REGISTRY.tcl component into
# the installed/uninstalled state according to both the
# Extensions and TclTk features.
add_data(db, "Binary", [("Script", msilib.Binary("msisupport.dll"))])
# See "Custom Action Type 1"
if msilib.Win64:
CheckDir = "CheckDir"
UpdateEditIDLE = "UpdateEditIDLE"
else:
CheckDir = "_CheckDir@4"
UpdateEditIDLE = "_UpdateEditIDLE@4"
add_data(db, "CustomAction",
[("CheckDir", 1, "Script", CheckDir)])
if have_tcl:
add_data(db, "CustomAction",
[("UpdateEditIDLE", 1, "Script", UpdateEditIDLE)])
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair")])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
compileargs = r'-Wi "[TARGETDIR]Lib\compileall.py" -f -x "bad_coding|badsyntax|site-packages|py2_|lib2to3\\tests|venv\\scripts" "[TARGETDIR]Lib"'
lib2to3args = r'-c "import lib2to3.pygram, lib2to3.patcomp;lib2to3.patcomp.PatternCompiler()"'
# See "CustomAction Table"
add_data(db, "CustomAction", [
# msidbCustomActionTypeFirstSequence + msidbCustomActionTypeTextData + msidbCustomActionTypeProperty
# See "Custom Action Type 51",
# "Custom Action Execution Scheduling Options"
("InitialTargetDir", 307, "TARGETDIR",
"[WindowsVolume]Python%s%s" % (major, minor)),
("SetDLLDirToTarget", 307, "DLLDIR", "[TARGETDIR]"),
("SetDLLDirToSystem32", 307, "DLLDIR", SystemFolderName),
("SetLauncherDirToTarget", 307, "LAUNCHERDIR", "[TARGETDIR]"),
("SetLauncherDirToWindows", 307, "LAUNCHERDIR", "[WindowsFolder]"),
# msidbCustomActionTypeExe + msidbCustomActionTypeSourceFile
# See "Custom Action Type 18"
("CompilePyc", 18, "python.exe", compileargs),
("CompilePyo", 18, "python.exe", "-O "+compileargs),
("CompileGrammar", 18, "python.exe", lib2to3args),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
("InitialTargetDir", 'TARGETDIR=""', 750),
# In the user interface, assume all-users installation if privileged.
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("SetLauncherDirToWindows", 'LAUNCHERDIR="" and ' + sys32cond, 753),
("SetLauncherDirToTarget", 'LAUNCHERDIR="" and not ' + sys32cond, 754),
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, "AdminUISequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
("SetLauncherDirToTarget", 'LAUNCHERDIR=""', 752),
])
# Prepend TARGETDIR to the system path, and remove it on uninstall.
add_data(db, "Environment",
[("PathAddition", "=-*Path", "[TARGETDIR];[~]", "REGISTRY.path")])
# Execute Sequences
add_data(db, "InstallExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("SetLauncherDirToWindows", 'LAUNCHERDIR="" and ' + sys32cond, 753),
("SetLauncherDirToTarget", 'LAUNCHERDIR="" and not ' + sys32cond, 754),
("UpdateEditIDLE", None, 1050),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
add_data(db, "AdminExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
("SetLauncherDirToTarget", 'LAUNCHERDIR=""', 752),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
# See "ControlEvent Table". Parameters are the event, the parameter
# to the action, and optionally the condition for the event, and the order
# of events.
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Complete the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Acknowledgements", 135, 95, 220, 120, 0x30003,
"Special Windows thanks to:\n"
" Mark Hammond, without whose years of freely \n"
" shared Windows expertise, Python for Windows \n"
" would still be Python for DOS.")
c = exit_dialog.text("warning", 135, 200, 220, 40, 0x30003,
"{\\VerdanaRed9}Warning: Python 2.5.x is the last "
"Python release for Windows 9x.")
c.condition("Hide", "NOT Version9X")
exit_dialog.text("Description", 135, 235, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
costing.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 135, 70, 220, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 135, 110, 220, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 135, 135, 220, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
c = seldlg.text("Existing", 135, 25, 235, 30, 0x30003,
"{\VerdanaRed9}This update will replace your existing [ProductLine] installation.")
c.condition("Hide", 'REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""')
seldlg.text("Description", 135, 50, 220, 40, 0x30003,
"Please select a directory for the [ProductName] files.")
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("DoAction", "CheckDir", "TargetExistsOk<>1", order=1)
# If the target exists, but we found that we are going to remove old versions, don't bother
# confirming that the target directory exists. Strictly speaking, we should determine that
# the target directory is indeed the target of the product that we are going to remove, but
# I don't know how to do that.
c.event("SpawnDialog", "ExistingDirectoryDlg", 'TargetExists=1 and REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""', 2)
c.event("SetTargetPath", "TARGETDIR", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 3)
c.event("SpawnWaitDialog", "WaitForCostingDlg", "CostingComplete=1", 4)
c.event("NewDialog", "SelectFeaturesDlg", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 5)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 135, 70, 172, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 135, 90, 208, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 135, 230, 206, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# SelectFeaturesDlg
features = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal|track_disk_space,
title, "Tree", "Next", "Cancel")
features.title("Customize [ProductName]")
features.text("Description", 135, 35, 220, 15, 0x30003,
"Select the way you want features to be installed.")
features.text("Text", 135,45,220,30, 3,
"Click on the icons in the tree below to change the way features will be installed.")
c=features.back("< Back", "Next")
c.event("NewDialog", "SelectDirectoryDlg")
c=features.next("Next >", "Cancel")
c.mapping("SelectionNoItems", "Enabled")
c.event("SpawnDialog", "DiskCostDlg", "OutOfDiskSpace=1", order=1)
c.event("EndDialog", "Return", "OutOfDiskSpace<>1", order=2)
c=features.cancel("Cancel", "Tree")
c.event("SpawnDialog", "CancelDlg")
# The browse property is not used, since we have only a single target path (selected already)
features.control("Tree", "SelectionTree", 135, 75, 220, 95, 7, "_BrowseProperty",
"Tree of selections", "Back", None)
#c=features.pushbutton("Reset", 42, 243, 56, 17, 3, "Reset", "DiskCost")
#c.mapping("SelectionNoItems", "Enabled")
#c.event("Reset", "0")
features.control("Box", "GroupBox", 135, 170, 225, 90, 1, None, None, None, None)
c=features.xbutton("DiskCost", "Disk &Usage", None, 0.10)
c.mapping("SelectionNoItems","Enabled")
c.event("SpawnDialog", "DiskCostDlg")
c=features.xbutton("Advanced", "Advanced", None, 0.30)
c.event("SpawnDialog", "AdvancedDlg")
c=features.text("ItemDescription", 140, 180, 210, 40, 3,
"Multiline description of the currently selected item.")
c.mapping("SelectionDescription","Text")
c=features.text("ItemSize", 140, 225, 210, 33, 3,
"The size of the currently selected item.")
c.mapping("SelectionSize", "Text")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 135, 60, 235, 80, 3,
"WhichUsers", "", "Next")
g.condition("Disable", "VersionNT=600") # Not available on Vista and Windows 2008
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 235, 20, "Install just for me (not available on Windows Vista)")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", order = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Advanced Dialog.
advanced = PyDialog(db, "AdvancedDlg", x, y, w, h, modal, title,
"CompilePyc", "Ok", "Ok")
advanced.title("Advanced Options for [ProductName]")
# A radio group with two options: allusers, justme
advanced.checkbox("CompilePyc", 135, 60, 230, 50, 3,
"COMPILEALL", "Compile .py files to byte code after installation", "Ok")
c = advanced.cancel("Ok", "CompilePyc", name="Ok") # Button just has location of cancel button.
c.event("EndDialog", "Return")
#####################################################################
# Existing Directory dialog
dlg = Dialog(db, "ExistingDirectoryDlg", 50, 30, 200, 80, modal, title,
"No", "No", "No")
dlg.text("Title", 10, 20, 180, 40, 3,
"[TARGETDIR] exists. Are you sure you want to overwrite existing files?")
c=dlg.pushbutton("Yes", 30, 60, 55, 17, 3, "Yes", "No")
c.event("[TargetExists]", "0", order=1)
c.event("[TargetExistsOk]", "1", order=2)
c.event("EndDialog", "Return", order=3)
c=dlg.pushbutton("No", 115, 60, 55, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 135, 63, 230, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 135, 108, 230, 60, 3,
"MaintenanceForm_Action", "", "Next")
g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
# See "Feature Table". The feature level is 1 for all features,
# and the feature attributes are 0 for the DefaultFeature, and
# FollowParent for all other features. The numbers are the Display
# column.
def add_features(db):
# feature attributes:
# msidbFeatureAttributesFollowParent == 2
# msidbFeatureAttributesDisallowAdvertise == 8
# Features that need to be installed with together with the main feature
# (i.e. additional Python libraries) need to follow the parent feature.
# Features that have no advertisement trigger (e.g. the test suite)
# must not support advertisement
global default_feature, tcltk, htmlfiles, tools, testsuite, ext_feature, private_crt, prepend_path
default_feature = Feature(db, "DefaultFeature", "Python",
"Python Interpreter and Libraries",
1, directory = "TARGETDIR")
shared_crt = Feature(db, "SharedCRT", "MSVCRT", "C Run-Time (system-wide)", 0,
level=0)
private_crt = Feature(db, "PrivateCRT", "MSVCRT", "C Run-Time (private)", 0,
level=0)
add_data(db, "Condition", [("SharedCRT", 1, sys32cond),
("PrivateCRT", 1, "not "+sys32cond)])
# We don't support advertisement of extensions
ext_feature = Feature(db, "Extensions", "Register Extensions",
"Make this Python installation the default Python installation", 3,
parent = default_feature, attributes=2|8)
if have_tcl:
tcltk = Feature(db, "TclTk", "Tcl/Tk", "Tkinter, IDLE, pydoc", 5,
parent = default_feature, attributes=2)
htmlfiles = Feature(db, "Documentation", "Documentation",
"Python HTMLHelp File", 7, parent = default_feature)
tools = Feature(db, "Tools", "Utility Scripts",
"Python utility scripts (Tools/)", 9,
parent = default_feature, attributes=2)
testsuite = Feature(db, "Testsuite", "Test suite",
"Python test suite (Lib/test/)", 11,
parent = default_feature, attributes=2|8)
# prepend_path is an additional feature which is to be off by default.
# Since the default level for the above features is 1, this needs to be
# at least level higher.
prepend_path = Feature(db, "PrependPath", "Add python.exe to Path",
"Prepend [TARGETDIR] to the system Path variable. "
"This allows you to type 'python' into a command "
"prompt without needing the full path.", 13,
parent = default_feature, attributes=2|8,
level=2)
def extract_msvcr100():
# Find the redistributable files
if msilib.Win64:
arch = "x64"
else:
arch = "x86"
dir = os.path.join(os.environ['VS100COMNTOOLS'], r"..\..\VC\redist\%s\Microsoft.VC100.CRT" % arch)
result = []
installer = msilib.MakeInstaller()
# At least for VS2010, manifests are no longer provided
name = "msvcr100.dll"
path = os.path.join(dir, name)
kw = {'src':path}
kw['version'] = installer.FileVersion(path, 0)
kw['language'] = installer.FileVersion(path, 1)
return name, kw
def generate_license():
import shutil, glob
out = open("LICENSE.txt", "w")
shutil.copyfileobj(open(os.path.join(srcdir, "LICENSE")), out)
shutil.copyfileobj(open("crtlicense.txt"), out)
for name, pat, file in (("bzip2","bzip2-*", "LICENSE"),
("openssl", "openssl-*", "LICENSE"),
("Tcl", "tcl8*", "license.terms"),
("Tk", "tk8*", "license.terms"),
("Tix", "tix-*", "license.terms")):
out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name)
dirs = glob.glob(srcdir+"/../"+pat)
if not dirs:
raise ValueError, "Could not find "+srcdir+"/../"+pat
if len(dirs) > 2 and not snapshot:
raise ValueError, "Multiple copies of "+pat
dir = dirs[0]
shutil.copyfileobj(open(os.path.join(dir, file)), out)
out.close()
class PyDirectory(Directory):
"""By default, all components in the Python installer
can run from source."""
def __init__(self, *args, **kw):
if "componentflags" not in kw:
kw['componentflags'] = 2 #msidbComponentAttributesOptional
Directory.__init__(self, *args, **kw)
def hgmanifest():
# Fetch file list from Mercurial
process = subprocess.Popen(['hg', 'manifest'], stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
# Create nested directories for file tree
result = {}
for line in stdout.splitlines():
components = line.split('/')
d = result
while len(components) > 1:
d1 = d.setdefault(components[0], {})
d = d1
del components[0]
d[components[0]] = None
return result
# See "File Table", "Component Table", "Directory Table",
# "FeatureComponents Table"
def add_files(db):
installer = msilib.MakeInstaller()
hgfiles = hgmanifest()
cab = CAB("python")
tmpfiles = []
# Add all executables, icons, text files into the TARGETDIR component
root = PyDirectory(db, cab, None, srcdir, "TARGETDIR", "SourceDir")
default_feature.set_current()
root.add_file("README.txt", src="README")
root.add_file("NEWS.txt", src="Misc/NEWS")
generate_license()
root.add_file("LICENSE.txt", src=os.path.abspath("LICENSE.txt"))
root.start_component("python.exe", keyfile="python.exe")
root.add_file("%s/python.exe" % PCBUILD)
root.start_component("pythonw.exe", keyfile="pythonw.exe")
root.add_file("%s/pythonw.exe" % PCBUILD)
# msidbComponentAttributesSharedDllRefCount = 8, see "Component Table"
dlldir = PyDirectory(db, cab, root, srcdir, "DLLDIR", ".")
launcherdir = PyDirectory(db, cab, root, srcdir, "LAUNCHERDIR", ".")
# msidbComponentAttributes64bit = 256; this disables registry redirection
# to allow setting the SharedDLLs key in the 64-bit portion even for a
# 32-bit installer.
# XXX does this still allow to install the component on a 32-bit system?
# Pick up 32-bit binary always
launchersrc = PCBUILD
if launchersrc.lower() == 'pcbuild\\x64-pgo':
launchersrc = 'PCBuild\\win32-pgo'
if launchersrc.lower() == 'pcbuild\\amd64':
launchersrc = 'PCBuild'
launcher = os.path.join(srcdir, launchersrc, "py.exe")
launcherdir.start_component("launcher", flags = 8+256, keyfile="py.exe")
launcherdir.add_file(launcher,
version=installer.FileVersion(launcher, 0),
language=installer.FileVersion(launcher, 1))
launcherw = os.path.join(srcdir, launchersrc, "pyw.exe")
launcherdir.start_component("launcherw", flags = 8+256, keyfile="pyw.exe")
launcherdir.add_file(launcherw,
version=installer.FileVersion(launcherw, 0),
language=installer.FileVersion(launcherw, 1))
pydll = "python%s%s.dll" % (major, minor)
pydllsrc = os.path.join(srcdir, PCBUILD, pydll)
dlldir.start_component("DLLDIR", flags = 8, keyfile = pydll, uuid = pythondll_uuid)
pyversion = installer.FileVersion(pydllsrc, 0)
if not snapshot:
# For releases, the Python DLL has the same version as the
# installer package.
assert pyversion.split(".")[:3] == current_version.split(".")
dlldir.add_file("%s/python%s%s.dll" % (PCBUILD, major, minor),
version=pyversion,
language=installer.FileVersion(pydllsrc, 1))
DLLs = PyDirectory(db, cab, root, srcdir + "/" + PCBUILD, "DLLs", "DLLS|DLLs")
# msvcr90.dll: Need to place the DLL and the manifest into the root directory,
# plus another copy of the manifest in the DLLs directory, with the manifest
# pointing to the root directory
root.start_component("msvcr90", feature=private_crt)
# Results are ID,keyword pairs
crtdll, kwds = extract_msvcr100()
root.add_file(crtdll, **kwds)
# Copy the manifest
# Actually, don't do that anymore - no DLL in DLLs should have a manifest
# dependency on msvcr90.dll anymore, so this should not be necessary
#manifest_dlls = manifest[0]+".root"
#open(manifest_dlls, "w").write(open(manifest[1]['src']).read().replace("msvcr","../msvcr"))
#DLLs.start_component("msvcr90_dlls", feature=private_crt)
#DLLs.add_file(manifest[0], src=os.path.abspath(manifest_dlls))
# Now start the main component for the DLLs directory;
# no regular files have been added to the directory yet.
DLLs.start_component()
# Check if _ctypes.pyd exists
have_ctypes = os.path.exists(srcdir+"/%s/_ctypes.pyd" % PCBUILD)
if not have_ctypes:
print("WARNING: _ctypes.pyd not found, ctypes will not be included")
extensions.remove("_ctypes.pyd")
# Add all .py files in Lib, except tkinter, test
dirs = []
pydirs = [(root, "Lib", hgfiles["Lib"], default_feature)]
while pydirs:
# Commit every now and then, or else installer will complain
db.Commit()
parent, dir, files, feature = pydirs.pop()
if dir.startswith("plat-"):
continue
if dir in ["tkinter", "idlelib", "turtledemo"]:
if not have_tcl:
continue
feature = tcltk
tcltk.set_current()
elif dir in ('test', 'tests'):
feature = testsuite
elif not have_ctypes and dir == "ctypes":
continue
feature.set_current()
lib = PyDirectory(db, cab, parent, dir, dir, "%s|%s" % (parent.make_short(dir), dir))
dirs.append(lib)
has_py = False
for name, subdir in files.items():
if subdir is None:
assert os.path.isfile(os.path.join(lib.absolute, name))
if name == 'README':
lib.add_file("README.txt", src="README")
else:
lib.add_file(name)
has_py = has_py or name.endswith(".py") or name.endswith(".pyw")
else:
assert os.path.isdir(os.path.join(lib.absolute, name))
pydirs.append((lib, name, subdir, feature))
if has_py:
lib.remove_pyc()
# Add DLLs
default_feature.set_current()
lib = DLLs
lib.add_file("py.ico", src=srcdir+"/PC/py.ico")
lib.add_file("pyc.ico", src=srcdir+"/PC/pyc.ico")
dlls = []
tclfiles = []
for f in extensions:
if f=="_tkinter.pyd":
continue
if not os.path.exists(srcdir + "/" + PCBUILD + "/" + f):
print("WARNING: Missing extension", f)
continue
dlls.append(f)
lib.add_file(f)
lib.add_file('python3.dll')
# Add sqlite
if msilib.msi_type=="Intel64;1033":
sqlite_arch = "/ia64"
elif msilib.msi_type=="x64;1033":
sqlite_arch = "/amd64"
tclsuffix = "64"
else:
sqlite_arch = ""
tclsuffix = ""
lib.add_file("sqlite3.dll")
if have_tcl:
if not os.path.exists("%s/%s/_tkinter.pyd" % (srcdir, PCBUILD)):
print("WARNING: Missing _tkinter.pyd")
else:
lib.start_component("TkDLLs", tcltk)
lib.add_file("_tkinter.pyd")
dlls.append("_tkinter.pyd")
tcldir = os.path.normpath(srcdir+("/../tcltk%s/bin" % tclsuffix))
for f in glob.glob1(tcldir, "*.dll"):
lib.add_file(f, src=os.path.join(tcldir, f))
# check whether there are any unknown extensions
for f in glob.glob1(srcdir+"/"+PCBUILD, "*.pyd"):
if f.endswith("_d.pyd"): continue # debug version
if f in dlls: continue
print("WARNING: Unknown extension", f)
# Add headers
default_feature.set_current()
lib = PyDirectory(db, cab, root, "include", "include", "INCLUDE|include")
lib.glob("*.h")
lib.add_file("pyconfig.h", src="../PC/pyconfig.h")
# Add import libraries
lib = PyDirectory(db, cab, root, PCBUILD, "libs", "LIBS|libs")
for f in dlls:
lib.add_file(f.replace('pyd','lib'))
lib.add_file('python%s%s.lib' % (major, minor))
lib.add_file('python3.lib')
# Add the mingw-format library
if have_mingw:
lib.add_file('libpython%s%s.a' % (major, minor))
if have_tcl:
# Add Tcl/Tk
tcldirs = [(root, '../tcltk%s/lib' % tclsuffix, 'tcl')]
tcltk.set_current()
while tcldirs:
parent, phys, dir = tcldirs.pop()
lib = PyDirectory(db, cab, parent, phys, dir, "%s|%s" % (parent.make_short(dir), dir))
if not os.path.exists(lib.absolute):
continue
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
tcldirs.append((lib, f, f))
else:
lib.add_file(f)
# Add tools
tools.set_current()
tooldir = PyDirectory(db, cab, root, "Tools", "Tools", "TOOLS|Tools")
for f in ['i18n', 'pynche', 'Scripts']:
lib = PyDirectory(db, cab, tooldir, f, f, "%s|%s" % (tooldir.make_short(f), f))
lib.glob("*.py")
lib.glob("*.pyw", exclude=['pydocgui.pyw'])
lib.remove_pyc()
lib.glob("*.txt")
if f == "pynche":
x = PyDirectory(db, cab, lib, "X", "X", "X|X")
x.glob("*.txt")
if os.path.exists(os.path.join(lib.absolute, "README")):
lib.add_file("README.txt", src="README")
if f == 'Scripts':
lib.add_file("2to3.py", src="2to3")
lib.add_file("pydoc3.py", src="pydoc3")
lib.add_file("pyvenv.py", src="pyvenv")
if have_tcl:
lib.start_component("pydocgui.pyw", tcltk, keyfile="pydocgui.pyw")
lib.add_file("pydocgui.pyw")
# Add documentation
htmlfiles.set_current()
lib = PyDirectory(db, cab, root, "Doc", "Doc", "DOC|Doc")
lib.start_component("documentation", keyfile=docfile)
lib.add_file(docfile, src="build/htmlhelp/"+docfile)
cab.commit(db)
for f in tmpfiles:
os.unlink(f)
# See "Registry Table", "Component Table"
def add_registry(db):
# File extensions, associated with the REGISTRY.def component
# IDLE verbs depend on the tcltk feature.
# msidbComponentAttributesRegistryKeyPath = 4
# -1 for Root specifies "dependent on ALLUSERS property"
tcldata = []
if have_tcl:
tcldata = [
("REGISTRY.tcl", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"py.IDLE")]
add_data(db, "Component",
# msidbComponentAttributesRegistryKeyPath = 4
[("REGISTRY", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"InstallPath"),
("REGISTRY.doc", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"Documentation"),
("REGISTRY.path", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
None),
("REGISTRY.def", msilib.gen_uuid(), "TARGETDIR", registry_component,
None, None)] + tcldata)
# See "FeatureComponents Table".
# The association between TclTk and pythonw.exe is necessary to make ICE59
# happy, because the installer otherwise believes that the IDLE and PyDoc
# shortcuts might get installed without pythonw.exe being install. This
# is not true, since installing TclTk will install the default feature, which
# will cause pythonw.exe to be installed.
# REGISTRY.tcl is not associated with any feature, as it will be requested
# through a custom action
tcldata = []
if have_tcl:
tcldata = [(tcltk.id, "pythonw.exe")]
add_data(db, "FeatureComponents",
[(default_feature.id, "REGISTRY"),
(htmlfiles.id, "REGISTRY.doc"),
(prepend_path.id, "REGISTRY.path"),
(ext_feature.id, "REGISTRY.def")] +
tcldata
)
# Extensions are not advertised. For advertised extensions,
# we would need separate binaries that install along with the
# extension.
pat = r"Software\Classes\%sPython.%sFile\shell\%s\command"
ewi = "Edit with IDLE"
pat2 = r"Software\Classes\%sPython.%sFile\DefaultIcon"
pat3 = r"Software\Classes\%sPython.%sFile"
pat4 = r"Software\Classes\%sPython.%sFile\shellex\DropHandler"
tcl_verbs = []
if have_tcl:
tcl_verbs=[
("py.IDLE", -1, pat % (testprefix, "", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -e "%1"',
"REGISTRY.tcl"),
("pyw.IDLE", -1, pat % (testprefix, "NoCon", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -e "%1"',
"REGISTRY.tcl"),
]
add_data(db, "Registry",
[# Extensions
("py.ext", -1, r"Software\Classes\."+ext, "",
"Python.File", "REGISTRY.def"),
("pyw.ext", -1, r"Software\Classes\."+ext+'w', "",
"Python.NoConFile", "REGISTRY.def"),
("pyc.ext", -1, r"Software\Classes\."+ext+'c', "",
"Python.CompiledFile", "REGISTRY.def"),
("pyo.ext", -1, r"Software\Classes\."+ext+'o', "",
"Python.CompiledFile", "REGISTRY.def"),
# MIME types
("py.mime", -1, r"Software\Classes\."+ext, "Content Type",
"text/plain", "REGISTRY.def"),
("pyw.mime", -1, r"Software\Classes\."+ext+'w', "Content Type",
"text/plain", "REGISTRY.def"),
#Verbs
("py.open", -1, pat % (testprefix, "", "open"), "",
r'"[LAUNCHERDIR]py.exe" "%1" %*', "REGISTRY.def"),
("pyw.open", -1, pat % (testprefix, "NoCon", "open"), "",
r'"[LAUNCHERDIR]pyw.exe" "%1" %*', "REGISTRY.def"),
("pyc.open", -1, pat % (testprefix, "Compiled", "open"), "",
r'"[LAUNCHERDIR]py.exe" "%1" %*', "REGISTRY.def"),
] + tcl_verbs + [
#Icons
("py.icon", -1, pat2 % (testprefix, ""), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyw.icon", -1, pat2 % (testprefix, "NoCon"), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyc.icon", -1, pat2 % (testprefix, "Compiled"), "",
r'[DLLs]pyc.ico', "REGISTRY.def"),
# Descriptions
("py.txt", -1, pat3 % (testprefix, ""), "",
"Python File", "REGISTRY.def"),
("pyw.txt", -1, pat3 % (testprefix, "NoCon"), "",
"Python File (no console)", "REGISTRY.def"),
("pyc.txt", -1, pat3 % (testprefix, "Compiled"), "",
"Compiled Python File", "REGISTRY.def"),
# Drop Handler
("py.drop", -1, pat4 % (testprefix, ""), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyw.drop", -1, pat4 % (testprefix, "NoCon"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyc.drop", -1, pat4 % (testprefix, "Compiled"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
])
# PATHEXT
add_data(db, "Environment",
[("PathExtAddition", "=-*PathExt", "[~];.PY", "REGISTRY.def")])
# Registry keys
prefix = r"Software\%sPython\PythonCore\%s" % (testprefix, short_version)
add_data(db, "Registry",
[("InstallPath", -1, prefix+r"\InstallPath", "", "[TARGETDIR]", "REGISTRY"),
("InstallGroup", -1, prefix+r"\InstallPath\InstallGroup", "",
"Python %s" % short_version, "REGISTRY"),
("PythonPath", -1, prefix+r"\PythonPath", "",
r"[TARGETDIR]Lib;[TARGETDIR]DLLs", "REGISTRY"),
("Documentation", -1, prefix+r"\Help\Main Python Documentation", "",
"[TARGETDIR]Doc\\"+docfile , "REGISTRY.doc"),
("Modules", -1, prefix+r"\Modules", "+", None, "REGISTRY"),
("AppPaths", -1, r"Software\Microsoft\Windows\CurrentVersion\App Paths\Python.exe",
"", r"[TARGETDIR]Python.exe", "REGISTRY.def"),
("DisplayIcon", -1,
r"Software\Microsoft\Windows\CurrentVersion\Uninstall\%s" % product_code,
"DisplayIcon", "[TARGETDIR]python.exe", "REGISTRY")
])
# Shortcuts, see "Shortcut Table"
add_data(db, "Directory",
[("ProgramMenuFolder", "TARGETDIR", "."),
("MenuDir", "ProgramMenuFolder", "PY%s%s|%sPython %s.%s" % (major,minor,testprefix,major,minor))])
add_data(db, "RemoveFile",
[("MenuDir", "TARGETDIR", None, "MenuDir", 2)])
tcltkshortcuts = []
if have_tcl:
tcltkshortcuts = [
("IDLE", "MenuDir", "IDLE|IDLE (Python GUI)", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Lib\idlelib\idle.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
("PyDoc", "MenuDir", "MODDOCS|Module Docs", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Tools\scripts\pydocgui.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
]
add_data(db, "Shortcut",
tcltkshortcuts +
[# Advertised shortcuts: targets are features, not files
("Python", "MenuDir", "PYTHON|Python (command line)", "python.exe",
default_feature.id, None, None, None, "python_icon.exe", 2, None, "TARGETDIR"),
# Advertising the Manual breaks on (some?) Win98, and the shortcut lacks an
# icon first.
#("Manual", "MenuDir", "MANUAL|Python Manuals", "documentation",
# htmlfiles.id, None, None, None, None, None, None, None),
## Non-advertised shortcuts: must be associated with a registry component
("Manual", "MenuDir", "MANUAL|Python Manuals", "REGISTRY.doc",
"[#%s]" % docfile, None,
None, None, None, None, None, None),
("Uninstall", "MenuDir", "UNINST|Uninstall Python", "REGISTRY",
SystemFolderName+"msiexec", "/x%s" % product_code,
None, None, None, None, None, None),
])
db.Commit()
def build_pdbzip():
pdbexclude = ['kill_python.pdb', 'make_buildinfo.pdb',
'make_versioninfo.pdb']
path = "python-%s%s-pdb.zip" % (full_current_version, msilib.arch_ext)
pdbzip = zipfile.ZipFile(path, 'w')
for f in glob.glob1(os.path.join(srcdir, PCBUILD), "*.pdb"):
if f not in pdbexclude and not f.endswith('_d.pdb'):
pdbzip.write(os.path.join(srcdir, PCBUILD, f), f)
pdbzip.close()
db,msiname = build_database()
try:
add_features(db)
add_ui(db)
add_files(db)
add_registry(db)
remove_old_versions(db)
db.Commit()
finally:
del db
# Merge CRT into MSI file. This requires the database to be closed.
mod_dir = os.path.join(os.environ["ProgramFiles"], "Common Files", "Merge Modules")
if msilib.Win64:
modules = ["Microsoft_VC100_CRT_x64.msm"]
else:
modules = ["Microsoft_VC100_CRT_x86.msm"]
for i, n in enumerate(modules):
modules[i] = os.path.join(mod_dir, n)
def merge(msi, feature, rootdir, modules):
cab_and_filecount = []
# Step 1: Merge databases, extract cabfiles
m = msilib.MakeMerge2()
m.OpenLog("merge.log")
m.OpenDatabase(msi)
for module in modules:
print module
m.OpenModule(module,0)
m.Merge(feature, rootdir)
print "Errors:"
for e in m.Errors:
print e.Type, e.ModuleTable, e.DatabaseTable
print " Modkeys:",
for s in e.ModuleKeys: print s,
print
print " DBKeys:",
for s in e.DatabaseKeys: print s,
print
cabname = tempfile.mktemp(suffix=".cab")
m.ExtractCAB(cabname)
cab_and_filecount.append((cabname, len(m.ModuleFiles)))
m.CloseModule()
m.CloseDatabase(True)
m.CloseLog()
# Step 2: Add CAB files
i = msilib.MakeInstaller()
db = i.OpenDatabase(msi, constants.msiOpenDatabaseModeTransact)
v = db.OpenView("SELECT LastSequence FROM Media")
v.Execute(None)
maxmedia = -1
while 1:
r = v.Fetch()
if not r: break
seq = r.IntegerData(1)
if seq > maxmedia:
maxmedia = seq
print "Start of Media", maxmedia
for cabname, count in cab_and_filecount:
stream = "merged%d" % maxmedia
msilib.add_data(db, "Media",
[(maxmedia+1, maxmedia+count, None, "#"+stream, None, None)])
msilib.add_stream(db, stream, cabname)
os.unlink(cabname)
maxmedia += count
# The merge module sets ALLUSERS to 1 in the property table.
# This is undesired; delete that
v = db.OpenView("DELETE FROM Property WHERE Property='ALLUSERS'")
v.Execute(None)
v.Close()
db.Commit()
merge(msiname, "SharedCRT", "TARGETDIR", modules)
# certname (from config.py) should be (a substring of)
# the certificate subject, e.g. "Python Software Foundation"
if certname:
os.system('signtool sign /n "%s" '
'/t http://timestamp.verisign.com/scripts/timestamp.dll '
'/d "Python %s" '
'%s' % (certname, full_current_version, msiname))
if pdbzip:
build_pdbzip()
| 44.499649 | 196 | 0.601873 |
73f1d4d802bc7b0ac1fa717d2a19529b691ca757 | 3,274 | py | Python | vscoscrape/constants.py | mvabdi/vsco-scraper | c95bcc0a8f434144dd80754e9ee0524a916a6141 | [
"MIT"
] | 94 | 2018-01-24T20:49:04.000Z | 2022-03-20T12:26:30.000Z | vscoscrape/constants.py | mvabdi/VSCOScrape | 190a6a167b8aed2b4cfff9dcd5e4c92238a78345 | [
"MIT"
] | 22 | 2018-04-09T21:33:20.000Z | 2022-03-28T23:22:11.000Z | vscoscrape/constants.py | mvabdi/VSCOScrape | 190a6a167b8aed2b4cfff9dcd5e4c92238a78345 | [
"MIT"
] | 25 | 2018-02-14T01:38:56.000Z | 2021-12-27T22:11:21.000Z | #!/usr/bin/env python3
# This file holds the constants used in the scraper object
import random
user_agents = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
]
visitvsco = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "keep-alive",
"Host": "vsco.co",
"Upgrade-Insecure-Requests": "1",
"User-Agent": random.choice(user_agents),
}
visituserinfo = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "keep-alive",
"Host": "vsco.co",
"Referer": "http://vsco.co/bob/images/1",
"User-Agent": random.choice(user_agents),
}
media = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "keep-alive",
"Host": "vsco.co",
"Referer": "http://vsco.co/bob/images/1",
"User-Agent": random.choice(user_agents),
"X-Client-Build": "1",
"X-Client-Platform": "web",
}
| 52.806452 | 134 | 0.648137 |
73f22609ed8a51b3bb692b848e8ff427af4f3471 | 13 | py | Python | v0.90/dc/models/__init__.py | iaiting/Flask-and-pywebview-followup-application-gui | b665334403b4a8471b5f28054ee2dc7adda7d9fc | [
"MIT"
] | 53 | 2015-01-15T04:38:29.000Z | 2022-01-26T12:34:14.000Z | script.module.ytpsearch/resources/__init__.py | muaddibttv/tantrumrepo | f5529dfb072cfe0c621db22374daf1456fe00c12 | [
"Beerware"
] | 2 | 2016-11-24T09:24:34.000Z | 2018-10-29T06:55:07.000Z | script.module.ytpsearch/resources/__init__.py | muaddibttv/tantrumrepo | f5529dfb072cfe0c621db22374daf1456fe00c12 | [
"Beerware"
] | 8 | 2018-04-03T15:46:42.000Z | 2020-07-17T18:12:11.000Z | #Nothing here | 13 | 13 | 0.846154 |
73f22b024976c68d28f46e927100e8915f3c9f4b | 14,679 | py | Python | ninecms/admin.py | Wtower/django-ninecms | e500010fb11f06c8dfe8d8c9c4d2aab0b15bc127 | [
"BSD-3-Clause"
] | 49 | 2015-11-17T16:18:02.000Z | 2021-12-10T15:01:03.000Z | ninecms/admin.py | Wtower/django-ninecms | e500010fb11f06c8dfe8d8c9c4d2aab0b15bc127 | [
"BSD-3-Clause"
] | 39 | 2015-11-18T12:20:14.000Z | 2022-01-19T10:47:19.000Z | ninecms/admin.py | Wtower/django-ninecms | e500010fb11f06c8dfe8d8c9c4d2aab0b15bc127 | [
"BSD-3-Clause"
] | 9 | 2016-04-07T12:12:05.000Z | 2021-02-04T15:42:02.000Z | """ Admin objects declaration for Nine CMS """
__author__ = 'George Karakostas'
__copyright__ = 'Copyright 2015, George Karakostas'
__licence__ = 'BSD-3'
__email__ = 'gkarak@9-dev.com'
from django.contrib import admin, messages
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from mptt.admin import MPTTModelAdmin
# noinspection PyPackageRequirements
from guardian.shortcuts import get_objects_for_user
from ninecms import models, forms, views
# noinspection PyMethodMayBeStatic
@admin.register(models.PageType)
class PageTypeAdmin(admin.ModelAdmin):
""" Get a list of Page Types """
list_display = ('name', 'description', 'url_pattern', 'elements', 'operations')
list_editable = ('description', 'url_pattern')
search_fields = ['name']
form = forms.PageTypeForm
save_as = True
def elements(self, obj):
""" Return a custom column with blocks in the page type
:param obj: a page type object
:return: column output
"""
return obj.pagelayoutelement_set.count()
elements.short_description = "Blocks"
def operations(self, obj):
""" Return a custom column with operations edit, perms
:param obj: a node object
:return: column output
"""
return ' | '.join((
'<a href="%s">%s</a>' % (reverse('admin:ninecms_pagetype_change', args=(obj.id,)), _("edit")),
'<a href="%s">%s</a>' % (reverse('admin:ninecms_pagetype_perms', args=(obj.id,)), _("permissions")),
))
operations.allow_tags = True
def get_urls(self):
""" Override urls to add permissions view
:return: urls list
"""
urls = [
url(r'^(?P<type_id>\d+)/perms/$', self.admin_site.admin_view(views.ContentTypePermsView.as_view()),
name='ninecms_pagetype_perms')
]
return urls + super(PageTypeAdmin, self).get_urls()
class NodeRevisionInline(admin.StackedInline):
""" Node Revision stacked inline to be displayed in Nodes (NodeAdmin) """
model = models.NodeRevision
extra = 0
class ImageInline(admin.StackedInline):
""" Images inline to be displayed in Nodes (NodeAdmin) """
model = models.Image
form = forms.ImageForm
extra = 0
template = 'admin/ninecms/image/stacked.html'
class FileInline(admin.StackedInline):
""" Files inline to be displayed in Nodes (NodeAdmin) """
model = models.File
form = forms.FileForm
extra = 0
class VideoInline(admin.StackedInline):
""" Videos inline to be displayed in Nodes (NodeAdmin) """
model = models.Video
form = forms.VideoForm
extra = 0
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
@admin.register(models.Node)
class NodeAdmin(admin.ModelAdmin):
""" Get a list of Nodes, also use inlines in Node form """
list_display = ('title', 'page_type', 'language', 'alias', 'user', 'status', 'promote', 'sticky', 'created',
'changed', 'original_translation', 'redirect', 'operations')
list_editable = ('status', 'promote', 'sticky', 'redirect')
list_filter = ['page_type', 'created', 'changed']
search_fields = ['title', 'summary', 'body', 'highlight']
actions = ['node_publish', 'node_unpublish', 'node_promote', 'node_demote', 'node_sticky', 'node_unsticky',
'node_reset_alias']
date_hierarchy = 'created'
form = forms.ContentNodeEditForm
# fieldsets returned from overridden get_fieldsets method below
inlines = [ImageInline, FileInline, VideoInline, NodeRevisionInline]
def operations(self, obj):
""" Return a custom column with 9cms operations view, edit
:param obj: a node object
:return: column output
"""
return ' | '.join((
'<a href="%s" target="_blank">%s</a>' % (obj.get_absolute_url(), _("view")),
'<a href="%s">%s</a>' % (reverse('admin:ninecms_node_change', args=(obj.id,)), _("edit")),
))
operations.allow_tags = True
def node_publish(self, request, queryset):
""" Mark all selected nodes as published setting status True
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
r = queryset.update(status=True)
messages.success(request, _("%d nodes successfully updated as published.") % r)
node_publish.short_description = _("Mark selected nodes status as published")
def node_unpublish(self, request, queryset):
""" Mark all selected nodes as unpublished setting status False
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
r = queryset.update(status=False)
messages.success(request, _("%d nodes successfully updated as not published.") % r)
node_unpublish.short_description = _("Mark selected nodes status as not published")
def node_promote(self, request, queryset):
""" Mark all selected nodes as promoted setting promote True
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
r = queryset.update(promote=True)
messages.success(request, _("%d nodes successfully updated as promoted.") % r)
node_promote.short_description = _("Mark selected nodes as promoted")
def node_demote(self, request, queryset):
""" Mark all selected nodes as not promoted setting promote False
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
r = queryset.update(promote=False)
messages.success(request, _("%d nodes successfully updated as not promoted.") % r)
node_demote.short_description = _("Mark selected nodes as not promoted")
def node_sticky(self, request, queryset):
""" Mark all selected nodes as sticky setting True
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
r = queryset.update(sticky=True)
messages.success(request, _("%d nodes successfully updated as sticky.") % r)
node_sticky.short_description = _("Mark selected nodes as sticky")
def node_unsticky(self, request, queryset):
""" Mark all selected nodes as not sticky setting False
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
r = queryset.update(sticky=False)
messages.success(request, _("%d nodes successfully updated as not sticky.") % r)
node_unsticky.short_description = _("Mark selected nodes as not sticky")
def node_reset_alias(self, request, queryset):
""" Reset url alias for all selected nodes
:param request: the request object
:param queryset: the Node queryset
:return: None
"""
for node in queryset:
node.alias = ''
node.save()
messages.success(request, _("%d nodes successfully updated.") % len(queryset))
node_reset_alias.short_description = _("Reset url alias for all selected nodes")
def check_perm(self, request, obj, perm):
""" Check if a user has permission on the Node
:param request: the request object
:param obj: the Node object, if any
:param perm: the permission to check: has meaning for values 'change', 'delete'
:return: bool
"""
if not obj:
return request.user.has_perm('ninecms.%s_node' % perm)
types = get_objects_for_user(request.user, 'ninecms.%s_node_pagetype' % perm, klass=models.PageType)
return obj.page_type in types
def has_change_permission(self, request, obj=None):
""" Check user permission on Node change
:param request: the request object
:param obj: the Node object
:return: bool
"""
return self.check_perm(request, obj, 'change')
def has_delete_permission(self, request, obj=None):
""" Check user permission on Node delete
:param request: the request object
:param obj: the Node object
:return: bool
"""
return self.check_perm(request, obj, 'delete')
def get_actions(self, request):
""" Override actions list to check for perms
If the user sees the actions, then he sees the list, so he already has the change perm
:param request: the request object
:return: actions list
"""
actions = super(NodeAdmin, self).get_actions(request)
if not request.user.has_perm('ninecms.delete_node') and 'delete_selected' in actions:
del actions['delete_selected']
return actions
def get_queryset(self, request):
""" Return only objects on which user has permission
:param request: the request object
:return: Node queryset
"""
qs = super(NodeAdmin, self).get_queryset(request)
types = get_objects_for_user(request.user, 'ninecms.change_node_pagetype', klass=models.PageType)
return qs.filter(page_type__id__in=types.values_list('id'))
def get_form(self, request, obj=None, **kwargs):
""" Override form to pass the current user
:param request: the request object
:param obj: the current node if any
:param kwargs: keyword arguments
:return: overridden form
"""
form = super(NodeAdmin, self).get_form(request, obj, **kwargs)
form.current_user = request.user
return form
def get_fieldsets(self, request, obj=None):
""" Provide different fieldsets depending on user level
:param request: the request object
:param obj: the current node if any
:return: a dictionary of fieldsets
"""
if request.user.is_superuser:
return (
("Node", {'fields': ('page_type', 'language', 'alias', 'title')}),
("Body", {'fields': ('highlight', 'summary', 'body', 'link')}),
("Node management", {'fields': ('status', 'promote', 'sticky', 'redirect', 'user',
'created', 'original_translation', 'weight')}),
("Terms", {'fields': ('terms',)}),
)
else:
return (
("Node", {'fields': ('page_type', 'language', 'title')}),
("Body", {'fields': ('highlight', 'summary', 'body', 'link')}),
("Node management", {'fields': ('status', 'promote', 'sticky', 'user',
'created', 'original_translation', 'weight')}),
("Terms", {'fields': ('terms',)}),
)
def get_changeform_initial_data(self, request):
""" Set initial values
:param request: the request object
:return: a dictionary with initial values
"""
return {'user': request.user, 'promote': False, 'sticky': False, 'redirect': False}
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
""" Override queryset of page types field to respect permissions
Restrict users field for non-superusers to same user
:param db_field: the database field name
:param request: the request object
:param kwargs: keyword arguments such as the queryset
:return: parent method return
"""
if db_field.name == 'page_type':
page_types = get_objects_for_user(request.user, 'ninecms.add_node_pagetype', klass=models.PageType)
if len(page_types) < 1 and not request.user.is_superuser:
raise PermissionDenied
kwargs['queryset'] = page_types
elif db_field.name == 'user' and not request.user.is_superuser:
kwargs['queryset'] = User.objects.filter(pk=request.user.pk)
return super(NodeAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
""" Override choices of languages field to respect settings
:param db_field: the database field name
:param request: the request object
:param kwargs: keyword arguments such as the queryset
:return: parent method return
"""
if db_field.name == 'language':
kwargs['choices'] = (('', '---------'),) + settings.LANGUAGES
return super(NodeAdmin, self).formfield_for_choice_field(db_field, request, **kwargs)
@admin.register(models.MenuItem)
class MenuItemAdmin(MPTTModelAdmin):
""" Get a list of Menu Items """
list_display = ('title', 'language', 'path', 'disabled', 'weight')
search_fields = ['path', 'title']
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
""" Override choices of languages field to respect settings
:param db_field: the database field name
:param request: the request object
:param kwargs: keyword arguments such as the queryset
:return: parent method return
"""
if db_field.name == 'language':
kwargs['choices'] = (('', '---------'),) + settings.LANGUAGES
return super(MenuItemAdmin, self).formfield_for_choice_field(db_field, request, **kwargs)
# noinspection PyMethodMayBeStatic
@admin.register(models.ContentBlock)
class ContentBlockAdmin(admin.ModelAdmin):
""" Get a list of blocks """
list_display = ('name', 'type', 'node', 'menu_item', 'signal', 'page_types_list')
list_filter = ['type']
filter_vertical = ('page_types', )
def page_types_list(self, obj):
""" Return a custom column with page types in which each block is an element
:param obj: a block object
:return: column output
"""
r = []
for page_type in obj.page_types.all():
r.append(
'<a href="%s">%s</a>' % (reverse('admin:ninecms_pagetype_change', args=(page_type.id,)), page_type))
return ', '.join(r)
page_types_list.allow_tags = True
page_types_list.short_description = _("Page types")
@admin.register(models.TaxonomyTerm)
class TaxonomyTermAdmin(MPTTModelAdmin):
""" Get a list of Taxonomy Terms """
list_display = ('name', 'description_node', 'weight')
filter_vertical = ('nodes', )
admin.site.site_header = _("9cms administration")
admin.site.site_title = "9cms"
admin.site.index_template = 'admin/ninecms/index.html'
| 41.349296 | 116 | 0.637373 |
73f23b53888497de3bab3a71d9bd788505b06db6 | 118,239 | py | Python | resources/gethisco.py | LvanWissen/saa-bevolkingsregisters | 07beb9b5a7a171c1aa07c5a86d691ada51b5fef4 | [
"MIT"
] | null | null | null | resources/gethisco.py | LvanWissen/saa-bevolkingsregisters | 07beb9b5a7a171c1aa07c5a86d691ada51b5fef4 | [
"MIT"
] | null | null | null | resources/gethisco.py | LvanWissen/saa-bevolkingsregisters | 07beb9b5a7a171c1aa07c5a86d691ada51b5fef4 | [
"MIT"
] | null | null | null | """sparql
SERVICE <https://api.druid.datalegend.net/datasets/iisg/HISCO/services/HISCO/sparql> {
?hiscoOccupation a schema:Occupation ;
schema:name ?occName ; # NB Dit matcht op hoofdletters + taal.
schema:occupationalCategory ?hiscoCategory .
?hiscoCategory schema:codeValue ?hiscoCode ;
schema:name ?hiscoCategoryName .
}
"""
occupations = ["occupation",
"oppasster",
"conducteur en tapper",
"houtfactor",
"naaimoeder",
"groentewinkelier",
"verenzoekster",
"nachtwacht",
"sjouwer aan de stad",
"kookster",
"secretaris Portugees-Israëlietische Gemeente",
"boekhouder",
"chirurgijnsleerling",
"sledewerker",
"handschoenwaster",
"scheepssmid",
"korsettennaaister",
"klerk bij de spoorwegen",
"mattenventer",
"koopman in bloemen",
"kermisreiziger",
"siroopsuikerbakker",
"bewaarster tweede klasse",
"onderwijzer",
"kunstbloemenfabrikant",
"huidenvetter",
"kantoorloopster",
"pachter tapper en biljart",
"commissionair in veren",
"minnekind",
"diamantsnijdersleerling",
"gepensioneerd eerste luitenant",
"gepensioneerd kok bij de Marine",
"loopknecht",
"logement en koffiehuis",
"wasman",
"adjunct-commies",
"leerling Marine",
"pandjeshoudster",
"fabrikant in witte zeep",
"smid en kleermaker",
"kruier en schoorsteenveger",
"ezellinnenbaas",
"aardappelwinkelier",
"dekenstikster",
"arbeider spoorwegen",
"baardscheerdersknecht",
"koopman in vee",
"in effecten",
"geldophaler",
"kantenwasvrouw",
"student te Utrecht",
"commissionair in manufacturen",
"aanspreker en bode",
"brouwersknecht",
"boekwinkelierster",
"bediende-pupil",
"scheepsbouwmeester",
"winkelierster in komenij",
"oudroestman",
"spoorbediende",
"handelsleerling",
"doctor in de genees- heel- en verloskunde",
"oud-ijzerverkoper",
"kruier en huisbewaarder",
"pandhuishoudster",
"olieslijter",
"kalkmeester",
"postbeambte",
"kousenwever",
"commies-ontvanger",
"geëmployeerde bij de Rhijnspoorweg (Rijnwaag)",
"besteller bij de Hollandsche Spoorweg",
"papierdozenmaker",
"lakenkoper",
"in bokking",
"bestuurder van het Gesticht",
"koopman in drogerijen",
"derde stuurman",
"bierbrouwersknecht",
"groenwinkel",
"secretaris O.I. Mij. van Admin. in lijfrenten",
"negotieur",
"bagagemeester",
"bakkersleerling",
"diamantslijper en tapper",
"pluimenfabriek",
"boekhouder van een rederij",
"brieventassenmaker",
"militair gepensioneerd",
"wijnkoper en tapper",
"gasblusser",
"aardappelverkoopster",
"oppasser van de kaas",
"winkelierster en baker",
"roosjesslijper",
"blokwerker",
"sigarenwinkelier",
"sergeant-majoor",
"beddenkoper",
"mangelhoudster",
"smid bij de Rhijnspoorweg",
"ziekenknecht",
"tapper en molenaar",
"pleistergieter",
"gouvernante",
"schoenwinkelier",
"assistent-commies",
"muzikant",
"gepensioneerd brievenbesteller",
"vrachtrijder Hollandsche Spoorwegmaatschappij",
"turf- en houtverkoopster",
"architect en makelaar",
"waagmaker",
"portier bij een suikerfabriek",
"verzorgster",
"meubelstoffeerder",
"militair",
"muzikante",
"turfloodsknecht",
"hoofdcommies bij de rijksbelastingen",
"wolwerker",
"professor",
"magazijnbewaarder",
"commissionair en reiziger",
"directeur stadsdrukkerij",
"winkelier in mosterd",
"glanzer",
"pottenbakkersknecht",
"dokterskoetsier",
"plaatzager",
"secretaris Amstelland",
"bode bij de burgerlijke stand",
"schoenpoetser",
"winkelier in lakens",
"spiegelmakersknecht",
"schoenmakersgezel",
"bordenverkoper",
"aan de korenmarkt",
"vroeger zeeman",
"kerkelijk geëmployeerde",
"kinderjuffrouw",
"goudsmidsleerling",
"rijksschatter en conciërge",
"controleur bij de Bank van Lening",
"gegageerd turfdrager",
"turfstekersknecht",
"apotheker en koopman",
"timmermans- en molenknecht",
"supercarga",
"oppasser",
"werkman",
"baanwerker",
"draagbandenmaker",
"koopman en kantoorbediende",
"rookverdrijver",
"pontpachter Westerdok",
"krantenbrenger",
"marinier",
"interne geneesheer",
"marktknecht",
"muziekonderwijzer",
"glasfabrikant",
"bode bij Artis",
"zagerij en houtkoperij",
"meubelhandelaar",
"kurkensnijdster",
"leerling in de bouwkunst",
"knecht bij de Hollandsche Spoorwegmaatschappij",
"slijter",
"brouwersmolenaar",
"logementhouder en tapper",
"gasfabriek",
"blekerij",
"in goud en zilver",
"huisonderwijzeres",
"oudroestkoper en verkoper",
"slijter in drank",
"trompetter bij de dragonders",
"vergulder en winkelier",
"winkelbediende en schoenmaker",
"haringventer",
"metselaar en makelaar",
"kruierij",
"hout- en turfkoopman",
"waterschepperij",
"instructeur zwemschool",
"handwerkster",
"slepersbaas",
"geweermakersleerling",
"verschieter",
"vleeshouwer",
"mr. scheepssloper",
"melkverkoper",
"opzichter van de landsgebouwen",
"huisbaas",
"behanger",
"landmeter",
"tapijtfabrikant",
"opzichter waterleiding",
"secretaris van het Grootboek",
"binnenvaarder",
"schoenbewerkster",
"spijsmoeder",
"officier der infanterie",
"broodweger",
"kleermaker en scheerder",
"assuradeur en koopman",
"koopman in eau de Cologne",
"laarzenmaker en herenknecht",
"ambtenaar bij de Nederlandsche Handelmaatschappij",
"vleesinkoper",
"commissionair spiegelfabriek",
"aardappelschillenloopster",
"steendraagster",
"parser",
"wasserij",
"bouwopzichter",
"stijfmoeder",
"mangelstrijkster",
"stovenmaker",
"waarnemend rijksontvanger",
"sjouwerman en zeeman",
"redacteur",
"verfkopersbediende",
"kapitein en plaatsvervangend majoor",
"stadsbaggerman",
"opzichter fabriek Van Gend & Loos",
"winkelier en fabrikant",
"uitgediende knecht",
"naaistersleerlinge",
"weefster",
"koopman in tingoed",
"verificateur",
"zaagselkoopman",
"turf- en houtwinkeltje",
"aandraagster",
"machinist rookverdrijver",
"catechismuslerares",
"employé bij het loodswezen",
"gezondheidsbakker",
"courantenverkoper",
"besteller bij de Rhjnspoorweg",
"leerling timmerman",
"weeskind",
"sjouwer landswerf",
"hondenkoopster",
"bieraffaire",
"spijkersmid",
"scheepsagent",
"raffinadeursknecht",
"eerste luitenant",
"pakhuisbediende",
"procuratie",
"turf en hout potten en pannen",
"groenteventer",
"waterschepper",
"verkopersknecht",
"bediende in Artis",
"scheepsbeschieter",
"boekopruimer",
"confituur",
"waarneemster",
"groentemansknecht",
"logementknecht",
"boekbindersmaat",
"slaapstee- en sjouwerman",
"smederij",
"strohandelaar",
"hoedenfabrikeur",
"geelgieter",
"stokviskoper",
"handelsessayeur",
"gruttersleerling",
"verlakkersknecht",
"beambte bij het Ministerie van Financiën Wenen",
"in water en vuur",
"tweede luitenant der infanterie Oostindisch leger",
"korenmeester",
"kunstkoper",
"suikermaler",
"uitdrager en barbier",
"banketbakkersknecht",
"gaarkeukenhoudster",
"tuinman",
"onderwijzer aan het gymnasium",
"toezichthouder Nederlands Hervormde Gemeente",
"kledenmaker",
"lakei",
"scheepsheelmeester",
"kamferstoker",
"koopman in stokken",
"kleerkoopster",
"bediende van de handel",
"kuiper",
"stroopbrander",
"stoomwerkersknecht",
"azijnmaker",
"onderwijzeres in talen",
"kantoorloper",
"knecht bij een aardappelwinkel",
"ziekenvader",
"pakhuiskistenmaker",
"laarzenwinkel",
"directeur asssurantie",
"Rhijnspoorweg",
"waagknecht op de varkenmarkt",
"roeier van het Zeerecht",
"puinwerker",
"zilversmid en kashouder",
"visventster",
"baardscheerster",
"gistkoopster",
"kapitein bij de generale staf",
"visvrouw",
"slachtersknecht",
"krankbezoeker",
"metselaarsleerling",
"koopman in tabak",
"leesbibliotheekhoudster",
"visman",
"goudslagersknecht",
"proponent",
"binnenvader",
"schuitenmakersknecht",
"in manufacturen",
"banketbakker",
"steenkolenknecht",
"tabaksmaker",
"dampbadhouder",
"kastelein in een sociëteit",
"koffieverlezer",
"koekjesbakker",
"directrice",
"in appelen",
"waagdrager en tabakswinkelier",
"reizend kleermaker",
"mr. stukadoor",
"landbouwersleerling",
"zaakwaarnemer en boekhouder",
"beerman",
"pianostemmer",
"lid Ged.Staten Noord-Holland",
"winkelier in papier",
"baakster",
"olie en traan",
"tappersbediende",
"ondernemer van publieke verkopingen",
"stovenman",
"administrateur van een plantage",
"voorzanger",
"gruttenkoopman",
"koopman en reiziger",
"in loterij",
"tinnegieter",
"winkelier en postbeambte",
"oudroest",
"behangseldrukker",
"voddenverkoopster",
"in aardappelen",
"kamenier",
"koopman in garens",
"lopend koopman",
"winkelier in hoeden",
"aardappels",
"intendant bij de quarantaine",
"viltbereider",
"veerman",
"stoombootbediende",
"ijzerwerker",
"ijker der zandschepen",
"groenmansknecht",
"diamantzetter",
"huismeester",
"fabriek van brandstof",
"assurantiën",
"koffiemolenmaker",
"rijkscommies",
"gagement",
"vleesdrager",
"ambtenaar bij het postkantoor",
"pensioen van de stad",
"venter in kunstbloemen",
"soldaat",
"kandidaat-notaris",
"cantante",
"reizend op de kermis",
"koopman in gist",
"officier van administratie bij de Marine",
"politieambtenaar",
"danseuse",
"pettenwinkelierster",
"in zoute vis",
"gepensioneerd ritmeester",
"leerkoper en schachtenmaker",
"in turf en hout",
"muilenmaker",
"kantenwasser",
"zout- en zeepgrossier",
"tabaksfabriek",
"goud- en zilverkashoudster",
"vlotter en tapper",
"kuipster",
"bestedeling",
"dienstbaar",
"lavementzetter",
"spekslagersknecht",
"sloeproeier",
"muziekinstrumentmaker",
"stoker op 's rijks boot",
"leermeester diaconieschool",
"mutsenmaker",
"kruier en aanspreker",
"koopman in boter en turf",
"winkelierster in aardewerk",
"spoorwegknecht",
"magazijnbediende",
"geëmployeerde bij de posterijen",
"touwverkoper",
"vismarkt",
"fastkopersknecht",
"ambtenaar en gedelegeerde loterij",
"theologant",
"rooms-katholiek pastoor",
"kopergraveur",
"rijstkoper",
"stuurman Zaanse stoomboot",
"volmolenaar",
"paardenhaarspinner",
"hoedenmakersknecht",
"zangmeester",
"militaire dienst als zeeman",
"commies bij het bureau Waarborg",
"leerling bouwkunde",
"sjouwer",
"kleine-kinderschoolhoudster",
"borduurwerk",
"kledingwinkelierster",
"courantenvrouw",
"directeur raffinaderij",
"koopman in oude kleren",
"gepensioneerd smidsbaas",
"winkelier in matten en schuiers",
"boek-inbinder",
"winkelierster en dienstbode",
"kunsthandelaar",
"kapelmeester",
"stukadoorsknecht",
"naaister en dienstbaar",
"commissaris der stoomboot",
"kapitein der mariniers",
"directeur van een paardenpret",
"schoenmakersleerling",
"turf en hout",
"molenaarster",
"gaarster",
"oud-ijzerkoper",
"bontwerker en winkelier",
"stoombootwerker",
"bokkenhouder",
"schoolhoudster",
"waker",
"agent bij de Javasche Bank",
"leerling tabaksaffaire",
"rooimeester",
"rector",
"kastdraaideurmaker",
"rijkswijnroeier",
"mattenmaker",
"koekbakker",
"rustend turfdrager",
"soldaat Oostindisch leger",
"rooms-katholiek geestelijke",
"scheepspomp- en blokkenmaker",
"vleesslager",
"lijkendrager",
"verschietster",
"wijnkoper (wijnkuiper)",
"kunstkoper en schilder",
"mechanist",
"schoenmaker en grondeigenaar",
"aan de turfmarkt",
"fabrikant in lakwerken",
"zuster van liefde",
"knecht bij een loodgieterij",
"commissionair in effecten",
"directeur rijksbelastingen",
"onderwijzeres",
"eerste stuurman",
"winkelier en kantoorbediende",
"postmeester",
"wisselaar",
"leerlinge",
"wiskunde-onderwijzer",
"tapper en hovenier",
"kok in de kweekschool",
"inbrenger",
"zilverkashouder",
"diamantsnijder en sjouwer",
"rijkstelegrafist",
"bakster",
"eigenaar van percelen",
"behangseldrukster",
"zakkennaaier",
"voerman",
"vogelkoper",
"groenteman",
"tafelbediende",
"stoker bij de Hollandsche Spoorwegmaatschappij",
"winkelier in manufacturen",
"stoelenmaker",
"diamantwerker",
"glaskoper",
"gasfabrieksbediende",
"broodbakker",
"adjudant",
"geëmployeerde bij het kadaster",
"notarisleerling",
"schuurgoed",
"portier aan de stoomfabriek",
"gealimenteerd in Brentano",
"politiedienaar",
"courantenloper",
"suppoost",
"hovenier",
"begrafenisfondsbode",
"aardwerker",
"kwekeling",
"straatreiniger",
"stukadoor en winkelier",
"bode bij de rechtbank",
"paardenhaarbereider",
"filtreermaker",
"zandschipper",
"boterkopersknecht",
"koopman in aardewerk",
"waagdragersknecht",
"commandeur op het Matrozen-Instituut",
"pantoffelvlechtster",
"chirurgijn en dentist",
"opperman en nachtwaker",
"kantoorboekwinkel en -verkoper",
"ordinaris",
"verhuurder",
"kurkenstekersknecht",
"zeepmaker",
"emeritus predikant",
"leerling",
"diamantsnijder",
"adelborst eerste klasse",
"verkoopster",
"bezoekmoeder der bestedelingen",
"vuilniswerker",
"majoor der rijkspolitie",
"mangelvrouw",
"jollenvaarder",
"dentist",
"bibliotheekhouder en winkelier",
"in komenijswaren",
"meesterknecht sigaren en tabak[...]",
"professor in talen",
"hondendokter",
"commies bij de Marine",
"klederwasser",
"koek- en banketbakker",
"waakster",
"gepasporteerd",
"muziekgraveur",
"landbouwarbeider",
"bakker",
"strohoedenfabrikant",
"in juwelen",
"sluiswachter",
"houtzaagmolenaarsknecht",
"directeur der W.I. Mij.",
"stoker",
"bakkersbehulp",
"pettenverkoper",
"kleermakersjongen",
"geëmployeerde bij het Ministerie van Financiën",
"jolleman",
"aan de academie",
"helpster",
"koorddraaier",
"huisopzichter",
"knecht in een slagerij",
"boekwinkel",
"gemeente-oppasser",
"stedelijk geëmployeerd",
"vleesrookster",
"werkman in tabak",
"hoefsmid",
"schepersknecht",
"commissaris veer",
"conducteur bij de posterijen",
"werkman bij de stoombootmaatschappij",
"letterkundige en predikant",
"tractaatrondbrenger",
"zeegezel",
"knopenmaker",
"knecht bij een steenhouwer",
"kandidaat",
"houtbewerker",
"platenkleurder",
"loodwitwerker",
"nagelmaker",
"verfhoutmaler",
"inspecteur bij de Marine",
"turf- en houtaffaire",
"opperstuurman",
"werkzaam bij de gasfabriek",
"directeur Onderlinge.Brand Waarborg Maatschappij",
"horlogier",
"turfvulster",
"boekkopersknecht",
"winkelier in schuiers",
"commissionair in drogerijen",
"luitenant der mariniers",
"tabakswerkster",
"kuiper en kistenmaker",
"arbeider Hollandsche IJzeren Spoorwegmaatschappij",
"opzichter stadsbestrating",
"aanspreker en tapper",
"ambtenaar bij de stedelijke belastingen",
"vuurwerkmaker",
"bakkersnoodhulp",
"werkster",
"koetsier",
"stedelijk heelmeester",
"winkelier in scheepsbehoeften",
"paardrijder",
"barbiersbediende",
"pantoffelvlechter",
"kruideniersbediende",
"vleeshouwster",
"klerk bij de Houtvesterij",
"timmermansjongen",
"tweede luitenant der artillerie",
"assistent-ziekenvader",
"veehandelaar",
"scheepsjager",
"fruitverkoopster",
"kleermakersgezel",
"hout- en turfverkoper",
"secretaris der Nederlandsche Bank",
"mr. metselaar",
"plaatslijper",
"ambtenaarster",
"pettenwinkel",
"snoepkelder",
"spekslagersgezel",
"lid van het gerechtshof",
"scholiere",
"bladschrijver",
"slachtersleerling",
"kleine negotiatie",
"muziekhandelaar",
"tapper en stalhouder",
"inlands kraamster",
"winkelier in linnengoed",
"manufactuurverver",
"groenknecht",
"makelaar cargadoor enz.",
"koopman in aardappelen",
"landeigenaar",
"werktuigkundige",
"handschoenmaker",
"scheepsgezagvoerder",
"korenwerkster",
"surnumerair Handelmaatschappij",
"oppasser in een bakkerij",
"turf- en houtnering",
"geëmployeerde bij 's Rijks Schatkist",
"ambtenaar en makelaar",
"kelderknecht",
"tuinleerling",
"secondant",
"tegelaar",
"polderwerker",
"artsenijmenger",
"kok",
"snarenmaker",
"bode bij een tractaatgenoodschap",
"pottenwinkel",
"kuipersaffaire",
"papierverkoper",
"broekenmaker",
"boekverkoper",
"stedelijk opzichter",
"klein-pandhuishoudster",
"courantier",
"snijder",
"koekjeswerker",
"stadsbestedeling",
"naaischool",
"timmerman en slotenmaker",
"gasfitter",
"gepensioneerd officier",
"modelleur",
"ellenmaker (n. ellenmaker)",
"gepensioneerd opzichter",
"gepensioneerd majoor der artillerie",
"goudsmidsgezel",
"kolenwerker",
"jongensmoeder",
"substituut-griffier rechtbank",
"gepensioneerd visser",
"kleermaker en aanspreker",
"aardappelschilster",
"ziekenmeester",
"schuurder van diamantschijven",
"kosthoudster van meisjes",
"sigarennering",
"schavenmaker",
"wijnkopersbediende",
"orgelist",
"koophandel",
"rijksveearts",
"bierhuishoudster",
"schoolleerling",
"brigadier",
"hoofdcommies bij het Entrepot",
"reiziger in straatnegotie",
"assuradeur en wethouder",
"op het matrozeninstituut",
"ophaler van ziekenbussen",
"zeevaartkwekeling",
"grutterijhoudster en winkel",
"cipiersknecht",
"water- en vuuraffaire",
"kunstdraaier",
"toetuiger",
"beddenverhuurder",
"logementbeheerder",
"pottenventer",
"directeur Hollandsche Schouwburg",
"loodgieter en militair",
"rijder",
"luitenant",
"gevangenisbewaarder",
"dagbladuitgever",
"stalhouder",
"schildersbaas",
"sigarenpakker",
"zaalmoeder meisjes",
"pottenbakster",
"aan de posterij",
"fabrikant van stoomwerktuigen",
"lappenkramer",
"slaapbaas",
"kantoorbediende",
"schoenmakersjongen",
"bij de Bank van Lening",
"dienstmeisje",
"repareerder van uurwerken",
"in manufacturen garen enz.",
"leerverwerker",
"melkslijter en winkelier",
"dienstbaar bewaarschool",
"schavenslijper",
"passementmaker",
"boterverkopersbediende",
"vijlenkapper",
"stadsmodderman",
"koopman en sigarenmaker",
"spanen-tabaksdozenmaker",
"kwekeling bij het onderwijs",
"ordinaris tafelhouder",
"naaister voor gezelschap",
"boekverkoopster",
"logementbediende",
"arts-assistent",
"turfkopersknecht",
"commies bij de stad",
"verbandvader Binnengasthuis",
"rustend geneesheer",
"voermansknecht (voormansknecht)",
"turfsjouwer",
"tabaksverkopersbediende",
"binnenmoeder van het weeshuis",
"fabrikant in lampenkousen",
"instrumentmakersleerling",
"blikslagersknecht",
"courantenbrenger",
"opporder",
"schouwerman",
"wagenmaker",
"kuikenmester",
"rijkscommies in- en uitgaande rechten",
"katoenwerkster",
"instrumentmaker",
"rentenier",
"boekhouder en commissionair",
"directeur van het armkantoor",
"bleekster",
"binnenlands commissionair",
"kruier en nachtwaker",
"adelborst",
"oliekoekbakster",
"kantoorboekbinder",
"banketwerker",
"werkman bij het Rhijnspoor",
"student en militair",
"stoommolenaar",
"looiersknecht",
"scheepskok",
"glanster",
"chirurg",
"gasfabriek De Bruin & zoon",
"directeur Nederlandsche Rhijnspoorweg",
"puinvaarder",
"logement",
"militiecommissaris",
"tekenmeester",
"winkelierster in hout en turf",
"ijzergieter",
"hovenierster en kroeghoudster",
"kantoorwerker",
"kastelein in het militaire logement",
"portier Buitengasthuis",
"tabakskoopster",
"eigenaar",
"schoenmakersbediende",
"schuitenvoerder en tapper",
"rentmeester",
"directeur der zwemschool",
"koopman in manufacturen",
"zetter",
"verse-waarverkoopster",
"schildersleerling",
"broekdraagbandenmaakster",
"ritmeester tweede regiment dragonders",
"in melk en boter",
"gepensioneerd tamboer",
"zadelmakersjongen",
"bilder",
"wattenfabriek",
"leerling in de koophandel",
"inspecteur gasfabriek",
"fabrikant van sigaren",
"hoer",
"stoomstoker",
"ambtenaar Publieke Werken",
"likdoornsnijder",
"stadstimmertuinwerker",
"uitdragerij",
"besloten winkel",
"chirurgijn-majoor",
"officier",
"gasfabriekwerker",
"gepensioneerd commies stedelijke belastingen",
"wattenfabrikant",
"zeilenmaker en korendrager",
"werkman bij de Marine",
"wattenwerkster",
"oppasser van heren",
"kostschoolhouderes",
"grenadier",
"besteedster",
"geëmployeerde bij de politie",
"stadswerker",
"in Z.M. zeedienst",
"steenhouwer",
"grafdelver",
"snoeptafel",
"lapideurgezel",
"goud- en zilverwinkelierster",
"goud- en zilversmid",
"filiaalhoudster",
"hulp- en godsdienstonderwijzer",
"catechiseermeester",
"binnenvader weeshuis",
"chemisch leerling",
"bij de Genie",
"tapper en slijter",
"commissaris bij een veer",
"jagersknecht",
"wegarbeider",
"koopman in parfumerieën",
"zadelmakerij",
"schildersaffaire",
"Rijnschipper",
"kapelaan",
"koopman en winkelier in olie",
"boerin",
"schippersmatroos",
"stotteraar-geneesheer",
"besloten winkelierster in manufacturen",
"werkster molen De Nachtegaal",
"mr. smid",
"open-tafelhouder",
"keukenmoeder",
"kolonel",
"melkboer",
"zeeschout",
"lepelgieter",
"scheepsmakelaar",
"bruidsmutsmaker",
"oliefabriek",
"cargadoor en makelaar",
"gasthouder",
"wagenverhuurster",
"kleermaker",
"controleur der belastingen",
"koopman en assuradeur",
"venter en diamantslijper",
"winkelierster",
"in negotie",
"onderwijzer stadsarmenschool",
"bierstekerij",
"stadsmetselaarbaas",
"broodverkoper",
"kruidenman",
"leemvormer",
"besteller van dienstboden",
"dienstbode bij Moldsen",
"fabrieksknecht",
"kapitein",
"eigen middelen",
"reizend kunstenaar",
"kwartiermaker bij de Marine",
"zevenmaker",
"groentewerker",
"kleermaker bij de Marine",
"tweede luitenant-paardenarts",
"opzichter bij de stadswerken",
"assistent-scherprechter",
"kapitein zeevarend",
"oud-zeeofficier",
"hulponderwijzeres bewaarschool",
"turf- en houtwinkel",
"lampenmaker",
"fruitwinkel",
"kunsttandinzetter",
"komenijswaren",
"klerenwasvrouw",
"milicien",
"machinist tweede klasse",
"kamerbehangersknecht",
"portretmaker en koopman",
"huisleerling",
"garen lint en houtwaren",
"schoenenbaas",
"nachtwacht en arbeider",
"schilderswinkel",
"viltmaker",
"aan het hospitaal",
"hoofdcommies",
"tabakshandelaar en winkelier",
"koopman in parapluies",
"costumière",
"suikerbakker",
"zadelmaker en winkelier",
"loonzager",
"griffier van het kantongerecht",
"voddenkoper",
"in garen en band",
"fabriek in olie",
"hovenier en bloemist",
"koopman in olie",
"scholier",
"huisbewaardersknecht",
"winkelierster in fruit",
"boekhoudersbediende",
"gasthuismin",
"haarwever",
"koopman in beestenvoer",
"toneelzanger",
"koopman in goederen",
"assistent bij stadsagent",
"steendrukkersknecht",
"boekbindersbediende",
"scheepsarbeider",
"sigarenzaak",
"schoenboorster",
"scheepslakker",
"expert",
"kolonel en rentenier",
"bureau Handelmaatschappij",
"taalschoolmeester",
"kistenmakersknecht",
"beeldsnijder",
"directeur der hoedenfabriek",
"kolonel in het Oostindisch leger",
"bakkerin",
"schafthuishoudster",
"in tabak",
"schoenmaker en koopman",
"stadsloodgieter",
"mouter",
"tuinknecht",
"meubelbekleder",
"fabrikant van eau de Cologne",
"linnenhandelaar",
"mattenmaakster",
"kippen",
"Schermmeester",
"onderwijzersassistent",
"negotie in allerlei",
"heelkundige",
"militaire dienst",
"graveur en directeur Kunstacademie",
"bewaarschoolhoudster",
"ventster met haring",
"assistent-onderwijzer",
"taalmeester en translateur",
"tekenaar",
"turfwerker",
"rechtsgeleerde",
"notaris",
"turfhever en koopman",
"landbouwersknecht",
"naaister en werkster",
"werker op vlotten",
"venter in groenten",
"koopmanswinkel",
"schoolonderwijzeres",
"koningsdienst",
"verfkopersknecht",
"sjouwer en opperman",
"cachetsnijder",
"koopman in ijzer",
"stoker in de diamantslijpfabriek",
"korsettenmaker",
"smid en kachelmaker",
"pijpenmaakster",
"boerenmeid",
"aardappelhandel",
"suikermaler en kandijstoker",
"tweede luitenant-kwartiermeester",
"koopman in schoenen",
"generale regisseur",
"mr. bakker",
"koekjeskelder",
"graveur en lithograaf",
"winkelier in drogerijen en verfwaren",
"stadshorlogemaker",
"tapper en likeurstoker",
"blokkenmakersbaas bij de Marine",
"in steenkolen",
"gepensioneerd grensdouanier",
"gepensioneerd griffier",
"entrepotdok",
"bontwerknaaister",
"mandenmakersknecht",
"bank van lening",
"koffiewinkelierster",
"opperbesteller aan de stoomboot",
"officier der administratie eerste klas bij Marine",
"korendraagster",
"kammoeder voor de jongens",
"kamerknecht",
"pleegmoeder",
"koopvrouw in hooi en stro",
"fruitnering",
"kunstplaatdrukker",
"wachter spoorweg",
"houtgraveur",
"oliekoopman",
"doodgraver der gemeente",
"kroeghouder",
"klein beleenhuis",
"nachtronder Rhijnspoorweg",
"tabakswinkelierster",
"huishouder",
"gepensioneerd marktmeester",
"in haring en zoute vis",
"beestenkoopman",
"schatter",
"woont op schip",
"vertinner",
"surnumerair",
"schildersknecht",
"geattacheerd aan de redactie van het Handelsblad",
"in koffie en thee",
"handwerksman",
"kamerbehanger",
"scheepstuier",
"zeevaartscholier",
"substituut-griffier van justitie",
"in behangselpapieren",
"schaatsenmaker",
"deegkneder",
"bibliotheek",
"markeur",
"goudsmid en winkelier",
"winkel lederwaren",
"antiekwerker",
"juwelier",
"planter op Java",
"rijstpelmolenaar",
"lichtmatroos",
"rijkscommissaris",
"slagersgezel",
"directeur blindeninstituut",
"draadwerker",
"weger",
"dienstmaagd",
"loopjongen",
"hoveniersknecht",
"touwpluister",
"vaste middelen",
"koffiekelder",
"schoftkelder",
"huidenwerker",
"gravenmaker",
"vrouwenmoeder",
"oud-luitenant der genie",
"aspirant-notaris",
"kalkmeter",
"zeekapitein",
"spekslagersbediende",
"holdraaier",
"waagwerker",
"klerk bij Publieke Werken",
"witwerkersknecht",
"broodslijterij",
"krantenbezorgster",
"zwavelstokkenverkoopster",
"rustend zeeman",
"negotiant",
"stempelmaker",
"graankoper",
"lettergieter",
"groentekopersknecht",
"schilder en glazenwasser",
"portretteur",
"stoelenmaker en zeevarend",
"mangelhouder",
"winkel en fabrikant",
"groenboersknecht",
"boekverkopersleerling",
"koopvrouw in afval",
"botenmaker",
"lantaarnvulder",
"makelaar en convooiloper",
"vatter",
"directeur begrafenisonderneming",
"gegageerd van de stad Amsterdam",
"veenwerker",
"varkensslachtersknecht",
"onderbaas Publieke Werken",
"facteur",
"bij het gymnasium",
"in tabak en manufacturen",
"pakhuisbaas",
"sluiswachtersknecht",
"koopman en sjouwerman (snippermaker)",
"bibliothecaris",
"schouwer",
"marineofficier",
"gepensioneerd smid",
"zetbaas komenijswinkel",
"aan de stadsschouwburg",
"buitenlands commissionair",
"oesterkoper",
"lantaarnmaker",
"biljartmaker",
"kantoor- en reisbediende",
"koopman en platenzager",
"ambtenaresse",
"aanspreker en rouwwinkelier",
"fabrieksvader",
"reiziger en winkelier",
"baleinfabriek",
"kistenmaker",
"turfkruier",
"loodshulp",
"overhalersknecht",
"marktkruier",
"matrassenmaakster",
"student godgeleerdheid",
"kaarsenmaker en winkelier",
"lijfbediende",
"tegelbakker",
"lithograaf",
"translateur",
"zijdewindster",
"expediteur in zeezaken",
"galanteriekoopman",
"eierenkoopman",
"korenknecht",
"buffethoudster",
"artsenijmengersbediende",
"commies bij het Rijk",
"in fortepiano's",
"verlakker",
"viskoper en tapper",
"groenteboer",
"kruiwagenmaker",
"negotie in garen en band",
"gepensioneerd korendrager",
"werker bij de Handelmaatschappij",
"kunstlakkersknecht",
"slijtster van roggebrood",
"doekenwaster",
"augurkjesman",
"stroopbereider",
"gymnastiekonderwijzer",
"winkel in goud en zilver",
"zeejongen",
"plooier",
"stroopmakersknecht",
"wijnkeldersknecht",
"opperman en metselaar",
"boekbinder en aanspreker",
"commies bij de posterijen",
"ronder der verlichting",
"waterboerin",
"leerling tabak",
"bokkenslachter",
"schoenmaakster",
"courantenrondbrenger",
"koffiewerker",
"Nederlands consul te Marseille",
"horlogereparateur",
"fabrikant en winkelier van borstelwerk",
"koekbakster",
"suikerwerker",
"motboer",
"winkel in groenten",
"winkelierster in water en vuur",
"gebakverkoopster",
"koperslagersknecht",
"provenier",
"vrachtschipper",
"kastenmaker en aanspreker",
"bijouteriefabrikant",
"winkelier in tabak",
"eerste sluisman",
"metselaar en klompenmaker",
"logement reisbediende",
"stoombootmachinist",
"slachtster",
"suppoost bij de Huiszittende Armen",
"zeevarend",
"lichtmatroos Nieuwe Diep",
"stadsdrukkerij",
"horlogemaker",
"blikmaker",
"directrice verpleegzuster",
"tagrijn",
"belasting",
"gravenmakersknecht",
"blikslagersleerling",
"heelmeester",
"hellebaardier bij het Entrepot",
"klinisch student",
"sluisknecht",
"adjunct-commissaris",
"geneeskundige",
"karremansknecht",
"zeilenmakersleerling",
"omloper met klein gebak",
"pianist",
"zeilenmaker en commissionair",
"molenaar en houtkoper",
"restauratiehouder",
"in een fruitwinkel",
"collegehouder",
"kanaalloods",
"melkaffaire",
"goudkashouder",
"schrijnwerkersleerling",
"redemptorist priester",
"behangersleerling",
"koopman in schuurgoed",
"venter in zuurwaren",
"in petten",
"beeldhouwer en timmerman",
"gepensioneerd kapitein",
"voddeninkoper",
"koopman in wol",
"reiziger in manufacturen",
"korenfactor en makelaar",
"schermmeester",
"horlogemakersknecht",
"mandenmaker",
"barbierster",
"koekvrouw",
"turfsteker",
"koopman en koffiehuis",
"boer",
"vormmaker",
"werkmeisje",
"courantenbezorgster",
"matrozeninstituut",
"rijksroeier",
"leraar",
"directeur",
"koopman in hout en turf",
"knecht bij de verkopingen",
"bediende in het gasthuis",
"makelaar in steenkolen",
"boekhoudersgezel",
"eerste geneesheer",
"sigarenmaker en tapper",
"spinster",
"koopman en fabrikant",
"werkman in parapluies",
"hoedenschoonmaker",
"aan het nachtwezen",
"werktuigbediener",
"commissaris van politie",
"koopman in kaas",
"handelsagent",
"vleesjongen",
"papierhandelaar",
"melkslijter",
"militair op zee",
"koopman en schipper",
"zandkramer",
"muziekspeelster",
"rondbrenger van nieuwsbode",
"tabakspakker",
"decorateur",
"haarplukster",
"rustbewaarder",
"kistjesmaker",
"ontvanger van het geslacht",
"deurwaarder en aanspreker",
"straatwerker",
"strohoedenmaker",
"boerenleerling",
"zetschipper",
"tuigmaker",
"clown",
"nachtwacht en sjouwerman",
"gepensioneerd landswacht",
"koopman en stalhouder",
"houtsnijder",
"mastenmakersknecht",
"zerkenmaker",
"koffiebrander",
"groennering",
"in tuinzaden",
"kostgangster",
"reiziger in kramerijen",
"conciërge Handelmaatschappij",
"pantoffelmaker en aanspreker",
"arbeider Hollandsche Spoorweg",
"danser",
"schrijfmeester",
"warmoezierster",
"zeepziedersknecht",
"tamboer",
"lettergietersknecht",
"huisdoctor",
"directeur meelfabriek",
"gepensioneerd kapitein der infanterie",
"moddermolen",
"werker",
"dienstbode bij Kruse",
"manegeknecht",
"kappersknecht",
"pelterijwerker",
"beschuitwinkelierster",
"agent bij de Rhijnspoorweg",
"waterboer",
"oude-klerenkoopman",
"landswerf",
"lijnslager",
"restaurateur",
"boekbindersjongen",
"viskoopman",
"bijoutier",
"taalmeesteres",
"gistverkoopster",
"korenmeter en dansmeester",
"biertapster",
"diamantsnijder en koopman",
"meisje van plezier",
"kettingzager",
"viskoper",
"commandeur op het droogdok",
"melkhandelaar",
"bediende bij de Marine",
"stedelijk commies",
"kantoorbediende en voorzanger",
"gepensioneerd soldaat",
"kettingscheerder",
"roggebroodverkoper",
"kassier bij de Rhijnspoorwegmaatschappij",
"bestellersknecht",
"moeder van het Bestedelingenhuis",
"winkelier in tabak enz.",
"wattenfabriekswerkster",
"marmerwerker",
"student chirurgie",
"directeur droogbakken",
"gepensioneerd sergeant-majoor",
"askarrenmaker",
"stokkenmaker",
"catechiseermeesteres",
"Rhijnspoorwegklerk",
"katoendrukker",
"scheepsdokter",
"bierhandel",
"brandspuitmakersknecht",
"tolk",
"pianofabrikant",
"modelwerker landswerf",
"kweekschoolonderwijzer",
"spoorweg",
"leesbibliotheekhouderes",
"graver",
"dienaar van politie",
"bouwman",
"houtkoper",
"aardewerkverkoper (aardeverkoper)",
"looier en huidenzouter",
"timmerman en baardscheerder",
"schoftenaffaire",
"fabrikant van kartonwerken",
"conducteur bij de spoorweg",
"loodsmatroos",
"polkadanser",
"president Nederlandsche Handelmaatschappij",
"assistent-klerk",
"manufactuurster",
"magazijnhouder",
"kruideniersvrouw",
"wachter",
"tapper en kuiper",
"publieke vrouw",
"veehouder en melkslijter",
"mastenmakersleerling",
"lantaarnaansteker",
"vleesverkoper",
"winkelierster in mutsen",
"secondante",
"beurtschipper op Zutphen",
"varensgezel",
"leerbewerker",
"soldaat te Harderwijk",
"gedelegeerde",
"winkelier in lappen",
"commissionair",
"aan de spoorwegen",
"kasbediende",
"scheepsboorder",
"gepensioneerd provoost",
"lakenfabrikant",
"winkelier en broodslijter",
"werkmeester",
"president van de rechtbank",
"koopman en winkelier",
"boekhoudersleerling",
"sloper",
"harder",
"kaasmaker",
"godsdienstonderwijzeres",
"koopman in bezems",
"perserij",
"postleerling",
"vormsnijder",
"koperwerker",
"figurant",
"nachtwerker",
"koekbakkersleerling",
"plaatkoekbakker",
"reizend koopman",
"inbrenger in de Bank",
"groenteventster",
"chanteuse",
"zilverkas",
"gepensioneerd kolonel",
"aspirant-ingenieur",
"boekhouder en kapper",
"officier bij het achtste regiment infanterie",
"commissaris bij het Utrechtse Veer",
"schooldienster",
"groenteboerknecht",
"gegageerd sergeant",
"tabakshandelsknecht",
"stoelenmatster",
"kapitein der stoomboot",
"mutsenmaakster",
"colporteur in de boekhandel",
"verfkoper",
"genootschapbode",
"suikersteker",
"geneesheer",
"koopman",
"bontwerkersknecht",
"winkelierster en mangelkelder",
"zeehandelaar",
"zilversmid",
"koffievrouw",
"venter in turf en hout",
"breister",
"vioolmaker",
"regisseur theater",
"smid en slotenmaker",
"boekwinkelbediende",
"hoofdinspecteur van politie",
"sigarenkistenmaker",
"koper in turf en hout",
"arlequin",
"stadswerker en huisbewaarder",
"architect makelaar en timmerman",
"souffleur",
"rijksschatter",
"horlogemaker en smid",
"kleine nering",
"beunhaas",
"kinderschoenmaker",
"Westindisch ambtenaar",
"bureau-oppasser",
"lid der Tweede Kamer en directeur van politie",
"fruitkelder",
"slijterij",
"ondernemer van vermakelijkheden",
"koopman in papier",
"procureurklerk",
"diamantmolendraaier",
"reparateur",
"koetsiersknecht",
"stadsdrukker",
"pettenfabriek",
"assistent-loods",
"tabaksverkopersleerling",
"kwekeling stadsscholen",
"kostleerling",
"korenmeter",
"roosjesversteller",
"verhuurster van boeken",
"liedjeszanger",
"graanhandelaar",
"tabakskeurder",
"koopman en consul",
"in tabak en sigaren",
"koopman in kramerijen",
"schoenmakersknecht",
"hoofdonderwijzeres",
"marktverkoper",
"orgelmakersknecht",
"administrateur der stadsbestedingen",
"fitter gasfabriek",
"winkelierster in turf en hout",
"koopman in agaatsteen",
"ganzenkoper",
"affaire in sterke dranken",
"knecht",
"draadvlechter",
"bediende in tabak",
"bankhouder en koopman",
"bierstekersknecht",
"tabakskoopman",
"stadsrooimeester",
"sociëteitshoudster",
"spijkerman",
"wijnkoper",
"veekoopman",
"verse-waarverkoper",
"kopergieter",
"breukbandenbekleder",
"kousenmaker",
"tabakskopersleerling",
"handelsreiziger",
"ambtenaar van het Grootboek",
"winkelier in modes",
"blekersknecht",
"schouwermansknecht",
"lector gymnasium",
"kassier",
"bouwknecht",
"depothouder broodfabriek",
"krijgsdienst",
"kleermakersbaas",
"marine",
"schipper op de Oost",
"lakstoker",
"neringdoenster",
"uitroeier van wandluizen",
"lakenwinkelier",
"catechismusleermeester",
"nachtwacht bij de Rhijnspoorweg",
"kantoorloper en koopman",
"koopman in galanterieën",
"dienstbaar bij dag",
"filt(reermaker)",
"hulponderwijzeres",
"luitenant bij het korps mariniers",
"speelkaartenmaker",
"conrector Stads-Gymnasium",
"kunstschrijver",
"voddenkoopster",
"stoomketelmaker",
"koopvrouw in turf en hout",
"winkelier en koopman",
"raseerder",
"groenvrouw",
"verhuurster",
"steenkolenweger",
"melkventster",
"vrachtrijder",
"balansenmaker",
"zeevarend als stuurman",
"kasteleines",
"stroopmaker",
"aanspreker en barbier",
"garen en lint",
"drogerijen",
"bode Rhijnvaart",
"likeurstoker",
"smid en koperwerker",
"pettenmaakster",
"groentekelder",
"agent in assurantiën",
"jager",
"geëmployeerde derde klas in het hospitaal",
"bediende bij het veer",
"jalouzieënmaker",
"koopman in vaatwerk",
"paraplufabrikant",
"mangelster",
"ijzerkoper",
"tapper en metselaar",
"pijpenbrander",
"varende",
"mechanicus",
"werkman en militair",
"agent bij de Rhijnspoorweg (RSMSp)",
"opzichter bij de bestrating",
"huisgouverneur",
"bierhuis",
"dienster",
"substituut-officier van Justitie",
"koopman in matten",
"spekslagersleerling",
"in de muziek",
"koopman en kuiper",
"biersteker",
"in aardappelen en groenten",
"winkelierster in thee",
"zeeman",
"goochelaar",
"steenkoper",
"timmerbedrijf",
"drogist",
"droge-gistverkoopster",
"besteller en boekbinder",
"huisbewaarder",
"kosthuis",
"metselaar",
"kinderschilder",
"venter in bloemen",
"opwinder van uurwerken",
"catechiseermeesteres (kattige leermetres)",
"tehuis",
"schachtenmakersknecht",
"boekverkoper en boekbinder",
"in wissel",
"horlogeversteller en barbier",
"groentewagen",
"bank van leninghoudster",
"melkvrouw",
"opkorter",
"bokkingroker",
"spoorwegwerker",
"inlegster",
"assuradeur en commissionair",
"messenmakersknecht",
"water- en vuurnering",
"apotheekloopjongen",
"pantoffelmaker en metselaar",
"koksmaat",
"consul-generaal van Portugal",
"glazenmaker",
"korenlader",
"referendaris financiën",
"hofmeester",
"schoenenschoonmaker",
"pakhuis",
"turfdraagster",
"kleermaker en nachtwacht",
"geldontvanger",
"stroopbrandersknecht",
"venter",
"in muziek",
"broodslijtster",
"smeersmelter",
"visserman",
"oude-vrouwenmoeder",
"koopman in goud",
"koopman in zemelen",
"stoker op de grote vaart",
"stoker op een stoomboot",
"modewinkelierster",
"pantoffelmaakster",
"stoelenzetster",
"in dienst bij de Marine",
"kastenmakersleerling",
"loterijkoopman",
"asbaas",
"kinderwinkel",
"tapper en loterijkoopman",
"rijschoolhouder",
"koopman in antiek",
"handelaar in drogerijen",
"op een wolfabriek",
"mestboerknecht",
"katoenpluizer",
"architect en gepensioneerd officier",
"oliekopersknecht en aardappelen",
"keurmeester van het brood",
"geëmployeerde bij de Rhijnspoorweg",
"drogistknecht",
"koopman in oudroest",
"koekslijtster",
"buitenvaart",
"postbode",
"sluisdichter",
"knecht bij een chocoladefabriek",
"scheepsknecht",
"fabriek in tabak",
"commissionair in wijnen",
"godsdienstonderwijzer",
"mazer",
"horlogekastenmaker",
"schilder vergulder en verfwinkel",
"student rechten",
"besloten winkelier",
"wollennaaister",
"kastenmaker en houtzaagmolenaar",
"sigarenmaker",
"docent aan het gymnasium",
"kaartenmakersleerling",
"zeemansfonds",
"venter en winkelier",
"landhuishoudkundige",
"predikant",
"meelverkoper",
"kleingebakbakker",
"matrassenmakersknecht",
"loper van het tractaatgenootschap",
"leerling loodgieter",
"zilversmelter",
"bediende gasfabriek",
"werkman bij de gasfabriek",
"franjemaker",
"plantsoenwachter",
"winkelmeisje",
"aardappelkoopvrouw",
"luitenant-adjudant",
"assistent-moeder",
"bouwmeester",
"touwbaasknecht",
"kunsthandelaar en schilder",
"winkelier in galanterieën",
"meubelstoffeerder en behanger",
"eerste luitenant der dragonders",
"steenhouwersknecht",
"mangelaarster",
"glasslijper",
"kooinering",
"tonster",
"bewaarschoolhulp",
"kaartenmaker",
"essayeursbediende",
"filoloog",
"broodslijter",
"schoenverkoopster",
"linnenjuffrouw",
"bleker",
"oliekoekenkraam",
"stoker bij de spoorwegen",
"oppasser der paarden",
"bewaarschool",
"drogistleerling",
"snuifwerker",
"koekbakkersknecht",
"commandeur bij de Marine",
"rustend kapitein",
"reiziger in sigaren",
"specerij- en korenmolenaar",
"kwekeling aan de stadarmenschool",
"gepensioneerd majoor bij het Oostindisch leger",
"knecht bij het Goudse Veer",
"bediende bij de IJk",
"garen en band",
"geëmployeerde bij het stadhuis",
"commissionair in effecten en wijnkoper",
"winkeliersknecht",
"koopman binnenland",
"brigadier rijksveldwacht",
"paraplufabriek",
"tabaksfabrikant",
"grutterswinkelier",
"opzichtster in het Oudemanhuis",
"winkelknecht",
"zeeman en tapper",
"preparateur",
"danseres",
"overhaler",
"gasstoker",
"controleur der rijksbelasting",
"kaslopersknecht",
"onderhofmeester Kamperboot",
"notarisbediende",
"schoenmaker en kruier",
"stadsgepensioneerd",
"moutersknecht",
"kraanknecht",
"werkzaam bij zijn vader",
"kandidaat in de geneeskunde",
"machinemaker",
"gaswerker",
"koopvrouw in schoenen",
"kapitein zeeman",
"mr. spekslager",
"belastingcontroleur",
"huistimmmerman",
"behangseldrukkerij",
"blind speelman",
"koopman in effecten",
"portionstafel",
"boerenarbeider",
"griffier bij de arrondissementsrechtbank",
"aanspreker en schuurgoedverkoper",
"oud-ijzer- en voddenverkoper",
"gepensioneerd blinde militair",
"waarnemend griffier",
"winkel",
"groentewinkel",
"bode",
"fabrikant in matten",
"korenmolaarsknecht",
"houtzagersknecht",
"stationschef",
"werker aan de pelmolen",
"werkt bij de gasfabriek",
"sjouwerman",
"drogerijen en verfwaren",
"horlogemakersgezel",
"bureaulist",
"werkman zilverfabriek",
"pandjeshuishoudster",
"liedjeszangster",
"kantoorschrijver",
"voddenventersbediende",
"bakkersgezel",
"kerkbediende",
"eerste luitenant-ingenieur",
"koperplaatdrukker",
"boekhouder Bank van Lening",
"tafelmaker",
"bleekvrouw",
"suikerfabriek",
"zandvaarder",
"directeur Associatie Cassa",
"mutsenwasster",
"lijfknecht",
"kleermaker en dienstbaar",
"in potten",
"officier der dragonders",
"oud-kapitein bij het Oostindisch leger",
"koopman in paardenhaar",
"plantagewerker",
"stadsopperman",
"pakkettenknecht",
"kleerbleker",
"rechterlijk ambtenaar",
"machinestoker",
"koopman in hoeden",
"tapper en winkelier",
"houderes van een bewaarschool",
"korenloper",
"grutter koopman en winkelier",
"scheepstimmermansknecht",
"letterzetter en winkelier",
"makelaar en metselaar",
"azijnmakersknecht",
"beddenwinkelier",
"redacteur Handelsblad",
"ambtenaar bij de stedelijke accijnzen",
"visbakster",
"zandvaarster",
"muziekmeester",
"groentekoper",
"rijtuigschildersknecht",
"geëmployeerde bij het militair hospitaal",
"bleker en winkelier",
"commies bij het waterkantoor (commandeur -)",
"rabbinaal assessor",
"pleisterwerker",
"versteller",
"provoost",
"ijzerkoper en ijzerdraaier",
"stukadoor en havenmeester",
"suikerraffinadeursknecht",
"negotiehoudster",
"makelaar en assuradeur",
"stovenzetster",
"veteraan",
"loodwitfabrikant",
"justitie",
"handschoenwasser",
"fruitventster",
"stadsambtenaar",
"gemaintineerde",
"schouwburgbedienster",
"pomp- en blokkenmaker",
"meesterknecht in de stokviskoperij",
"onderwijzer en leraar",
"oud-raadsheer",
"siroopmaker",
"spoorwegbediende",
"assistent-vader",
"rafactiemeester van de tabak",
"aan de Nederlandsche Bank",
"in sigaren",
"resident op Java",
"suikermonstersteker",
"turfraapster",
"steenpolijster",
"smidsleerling",
"taalmeester",
"stadswerkman",
"waterstaatsopzichter",
"logé",
"ordinarishouder",
"verbandmoeder",
"leerling boekverkoper",
"vuilnisvaarder",
"danseresje",
"knecht in een wattenfabriek",
"beambte bij de spoorwegen",
"op de haarfabriek",
"zakkenplakster",
"kantoorbediende en reiziger",
"kapitein bij de troepen",
"med. ambtenaar",
"rijksdeurwaarder",
"werkman aan de schouwburg",
"nettenmaker",
"helpster Sophiaschool",
"huisbediende",
"rijksveldwachter",
"gepensioneerd korporaal",
"korenmolenknecht",
"scheerdersbediende",
"boekbinder en behanger",
"kost- en dagschoolhouder",
"kunstenmaker",
"officier ter zee",
"goudsmidsknecht",
"employé bij de genie",
"smidsaffaire",
"goud- en zilverfabrikant",
"boodschapper",
"tekenmeester en kunstschilder",
"knecht bij een commissionair",
"steenkoper en metselaar",
"chocolademaker",
"loodsman",
"tapper en verhuurder van matrozen",
"brouwerskuiper",
"groenteverkoop",
"deurwaarder bij de directe belastingen",
"kuiper en tapper",
"reder en assuradeur",
"gepensioneerd koopman",
"diamantsnijdersknecht",
"knecht bij het Algemeen Ziekenfonds",
"groentekoopman",
"boommaker",
"oude-kindermeid",
"diender",
"veekoper",
"stoker bij de gasfabriek",
"koorddanser",
"logementwaarnemer",
"buitenvaarder",
"koffermaker",
"timmermansaffaire",
"kolfballenfabrikant",
"beeldhouwer commensaal",
"klerk bij de rijksbelastingen",
"fabrikant in olie",
"onderwijzer in de godsdienst",
"goudstikker",
"beambte bij de Rhijnspoorweg",
"loopmeisje",
"waskaarsenmaker en aanspreker",
"oud-ijzer",
"korenverkoper",
"zanger",
"glasverkoper",
"lopend arbeider",
"werkend in de fabriek",
"hoofdcommies militaire zaken",
"linnenwever",
"bewaarder op 's Lands werf",
"ondermeester bij de diaconie",
"koffiekleurder",
"sluitersknecht",
"beurtschipper",
"klerk ter stedelijke secretarie",
"conducteur",
"keursmaker",
"ondernemer van publieke vermakelijkheden",
"vuurstoker",
"bierverkoper",
"med. doctor en professor",
"timmerman en militair",
"ambulant muzikant",
"rietvlechter",
"genees- en heelmeester",
"touwloper",
"secretaris Nederlandsche Handelmaatschappij",
"tabakswerksman",
"beeldenmaker",
"stratenmaker",
"orgelvrouw",
"ontvanger der directe belastingen",
"winkelier in sigaren",
"geldwisselaar",
"kashoudster",
"dentiste",
"diamantbewerker",
"controleur bij de Rhijnspoorweg",
"groentenering",
"koekmaker",
"plaatzaagster",
"zemelenkoper",
"zijdewaster",
"fruitkoopster",
"kaarsenmakersleerling",
"meubelmaker",
"punchschenkster",
"directrice pleegzuster",
"melkslijtersknecht",
"geëmployeerde tweede klas garnizoenshospitaal",
"equipagemeester",
"commissaris van het veer op 's-Hertogenbosch",
"winkel en kleermaker",
"schaftkelderhoudster",
"winkel in tabak en kruidenier",
"weger aaan de Keulse waag",
"schuierverkoper",
"leerkoper en winkelier",
"houtwerker",
"letterzetter",
"kolenweger",
"élève",
"beddenkoopman",
"onderbaas",
"pandjeshuishouder",
"bloemist",
"koppenzetster",
"roosjesslijpersknecht",
"metselaarsbaas bij de Marine",
"haarspinner",
"koopman kaarsenmaker en winkelier",
"koopman in ijzerwaren",
"etuikartonmaker",
"stadsmolenaar",
"blokkenmaker",
"slaapsteehouder",
"entrepotwerker",
"koopman in vlees",
"lapster",
"veerknecht",
"fabrieksarbeider",
"zilversmidsleerling",
"winkelier in bedden",
"wattenmaker",
"winkelier in brandstoffen",
"winkel en schuitenvoerder",
"kapitein der infanterie",
"winkelmeid",
"groenteverkoper",
"strohoedenfabriek",
"geëmployeerde bij de artillerie",
"schouwknecht",
"balletmeester",
"papierkoopman",
"sjouwer en zeeman",
"turfhandel",
"drukkersknecht",
"controleur der in- en uitgaande rechten",
"agent",
"looier",
"goudsmid",
"in Neurenberger kramerijen",
"bruggeman",
"steenbakster",
"tweede meid",
"assuradeur",
"schipper op de West",
"kwekeling zeevaartschool",
"besloten winkel in theeën",
"keukenmeid",
"olieknecht",
"korsettenwever",
"in Utrechts water",
"eerste luitenant-kwartiermeester",
"winkel in garen en band",
"schipper op Groningen",
"olieverkoper",
"lithografiedrukker",
"stokviskopersknecht",
"knoppendraaier",
"tabaksnijder",
"beenzwartbrander",
"koffiehuisknecht",
"kleintapper",
"aan de appelmarkt",
"pettenkoopman",
"timmermansleerling",
"fruitverkopersknecht",
"theekopersknecht",
"priester",
"tuinmansknecht",
"bediende in een steenkoperij",
"tandmeester",
"schepenaftekenaar",
"schoensmeerder",
"knecht in kramerijen",
"grootmajoor",
"visventer",
"vlotter",
"weesvader",
"knecht bij een aardappelnering",
"kindermeid",
"plaatwerker",
"stokkendraaier",
"portionshouder",
"gedelegeerde loterij",
"steenkolenkoper",
"lector",
"koopvaardij",
"steenkolenmeter en stucadoor",
"noodhulp korenmeter",
"ziekenmoeder",
"karnemelkboer (kernmaker)",
"officier derde klas",
"water- en vuurverkoopster",
"fabriek sigaren",
"directeur brandmaatschappij",
"koperslagersjongen",
"sociëteitshouder",
"beeldhouwersjongen",
"commissionair en assuradeur",
"kurkwerker",
"sleperijhoudster",
"consulaat-secretaris",
"tafelhoudster",
"kapitein-kwartiermeester",
"verhuurkantoor",
"bottelier",
"pakhuisknecht",
"botenbouwersknecht",
"kinderschoolhoudster",
"bode bij de Nederlandsche Handelmaatschappij",
"zuster",
"water en vuur",
"tabakswinkelbediende",
"stadsgraver",
"belastingontvanger",
"aan de schouwburg",
"zwavelstokkenmaker",
"opzichter te Meerenberg",
"winkelier en behanger",
"visschoonmakersknecht",
"sociëteitsknecht",
"boekbindersgezel",
"werkzaam bij de Nederlandsche Bank",
"letterzettersknecht",
"machinist en stoker",
"kunstenaar",
"kadet",
"smelter",
"noodhulp",
"werkman bij de Nederlandsche Handelmaatschappij",
"koopman fabrikant en apotheker",
"zeevarend chirurgijn",
"reisbediende",
"kantoorbediende en commissionair",
"stopster",
"parapluventer",
"eierenkoper",
"vuilnisknecht",
"portier aan de werf",
"stedelijk ambtenaar",
"scheepsoptuiger",
"in bieren",
"hulploods",
"tweede geneesheer",
"gasthuishoudster",
"beestenkoper",
"watersleper",
"uit naaien",
"timmerknecht",
"kapitein ter koopvaardij",
"goudsmidse",
"koperpletter",
"redacteur courant",
"touwpluizer",
"oppasser te Haarlem",
"directeur van een maatschappij",
"vleeshouwersknecht",
"commandant",
"ordinarishoudster",
"boekhouder en varkensmester",
"zandschippersknecht",
"priester buiten bediening",
"winkelier in garen en band",
"fijnschilder",
"knecht in een koffiehuis",
"op een handelskantoor",
"reisgezellin",
"mastenmaker",
"leerling landbouw",
"karwerker",
"zetbaas",
"steendraagster (steendraaister)",
"lampenwinkelier",
"directeur wasinrichting",
"courantenombrenger en aanspreker",
"mannenvader",
"kok op de Lemmerboot",
"wijnkopersleerling",
"knecht in Artis",
"dienstbaar en huishoudster",
"kaarsenmaker bij Brandon",
"groentehandel",
"slijter in klompen",
"noodhulp loods",
"aardappelverkoper",
"militair kapitein",
"dienstknecht",
"lid der Tweede Kamer",
"leersnijder",
"suikerstamper",
"president van een bank",
"matrassenmaker",
"kleermakersaffaire",
"kweekschool",
"verswaterschipper",
"graveerder",
"kantoorboekverkoper",
"kapitein plaatsvervangend majoor",
"onbekwaam",
"oudskoop",
"student",
"directeur der Marine",
"dienaar",
"melkverkoopster",
"ijzerkopersknecht",
"paraplukoopman",
"askarman",
"geëmployeerde bij de Nederlandsche Rhijnspoorweg",
"schilderijmaker",
"koopman in mest",
"tapijtweefster",
"visiteur bij het rijk",
"koopman in oudroest en metselknecht",
"gepensioneerd militair",
"pontman in Natura",
"costumeur",
"visiteur",
"muziekwerker",
"kapitein luitenant-ter-zee",
"weversknecht",
"tulenaaister",
"toonkunstenaar",
"wolkammer",
"handelt in kinderspeelgoed",
"in verfwaren",
"diamantslijpersleerling",
"geëmployeerde bij de groenmarkt",
"knechtje",
"redemptorist broeder",
"sieraadschilder",
"tabaksverkoper",
"apothekersgezel",
"pianomeester",
"volontair handelskantoor",
"oliestoker",
"boekbindster",
"koopman in meubelen",
"ambtenaar derde klasse",
"conciërge",
"slijter in sterke drank",
"visiteur bij 's Rijks middelen",
"pakker",
"notarisklerk",
"vellenbewerker",
"schoenmaker en nachtwacht",
"heelmeester en verloskundige",
"mosselvrouw",
"groenboer",
"leerling in de schilderkunst",
"courantenrondbrengster",
"in blaasbalgen",
"schoorsteenvegersleerling",
"politieagent",
"Grieks priester",
"zilversmidsgezel",
"baker",
"plaatsbewaarder",
"klerk bij de thesaurie",
"brander",
"balletjes- en stroopmaker",
"landbouwer",
"gietersknecht",
"beambte",
"agent van handel",
"zeeofficier",
"plaatsbewaarster",
"gepensioneerd sergeant",
"korenschieter",
"pettenventer",
"eerste luitenant-adjudant",
"procuratehouder fa. Westerman",
"zeemansknecht",
"schoenmaker en muzikant",
"fabrikant van strohoeden",
"waarnemend besteller",
"directeur van een assurantiemaatschappij",
"boekhoudersknecht",
"chirurgijn",
"directeur du Theatre Automat.",
"scharenslijper",
"sigarenmakersknecht",
"koopman (schuimer)",
"rijtuigknecht",
"groenwagen",
"blind zonder beroep",
"modewerkster student",
"verhuurder van diensboden",
"brievenbesteller en loterij",
"groentehandelaar",
"handelaar en siroopmaker",
"assuradeur en buitenlands commissionair",
"winkelier in eieren",
"stokkendraaiersknecht",
"reizend met een wafelkraam",
"kostschoolhouder",
"rector gymnasium",
"makelaar en steenhouwer",
"landsambtenaar",
"waterslijter",
"haven- en dokmeester",
"korist",
"tabaksslijter",
"rijksbelasting",
"commissionair in verfwaren",
"lector op wachtgeld Marine",
"pikkok",
"student letteren",
"officier van gezondheid tweede klasse",
"huurkoetsier",
"koopman in pianofortes",
"kassier bij de Nationale Schuld",
"houtkoper en houtzaagmolenaar",
"kleerkoper",
"officier van justitie",
"vader in het Oud-Roomse Weeshuis",
"kassiersbediende",
"majoor bij het derde regiment dragonders",
"bewaarster eerste klasse",
"korporaal-tamboer",
"lodenhagelgieterij",
"broodbakkersgezel",
"lakker",
"houtvlotter",
"politiebeambte",
"leerbereider",
"kaarsenmakersknecht",
"kunstdrijver",
"juweliersgezel",
"gaarkeukenhouder",
"rustend predikant",
"huishoudster",
"boekenkoper",
"juffrouw van gezelschap",
"besteller van het Rotterdammerveer",
"stadsbode",
"directeur levensverzekeringsmaatschappij",
"schoenmaker en winkelier",
"koopman in metalen",
"kolendrager",
"landwerker",
"ijzervormer",
"geëmployeerde",
"rijtuigsloper",
"ondermeester stadsarmenschool",
"pandjeshuis",
"roggebroodbakker",
"builenmaakster",
"makelaar en commissionair",
"ambachtsman",
"klompenmaker",
"koopvrouw in vodden",
"koopvrouw in fruit",
"boek- en plaatdrukker",
"garenwinder",
"schilder en korendrager",
"bierleverancier",
"heel- en verloskundige",
"koperslaagster",
"secretaris van de schouwburg",
"koffieneringhoudster",
"poelierster",
"kruidenzoekster",
"vleeskoper",
"leerfabrikant",
"eigenaar van vaartuigen",
"geelgietersknecht",
"groenventster",
"zeemtouwer",
"apothekersknecht",
"burgemeester",
"koopman in boeken",
"lid van de arrondissementsrechtbank",
"meubelmakersknecht",
"geweermakersknecht",
"catechiseermeester en aanspreker",
"staalgraveur",
"provisor",
"naaileerling",
"korenwerker",
"viskaker",
"bestelster in boter",
"krammer",
"roggebroodbakkerij",
"hoofdcontroleur",
"leermeester",
"koekslijter",
"optuiger",
"oplager stadsbank",
"makelaar en steenkoper",
"bottelier bij de Marine",
"beeldhouwer",
"stoker bij de heer De Bruin",
"werkman Entrepotdok",
"leerinkoper",
"directeur in assurantie",
"diamantkloversknecht",
"groenteverkoopster",
"droogschuurder",
"directeur van een stoomsleepbootmaatschappij",
"zijdewerkster",
"gepensioneerd turfdrager",
"gasstoker en -blusser",
"verfster",
"glaswerker",
"kramer en turfdrager",
"knopenmaakster",
"geëmployeerde bij het grootboek",
"huisvrouw",
"koopvrouw in groenten",
"water- en vuurverkoper",
"machinist op de stoomboot",
"kunstlakker",
"koopman in teer",
"dienstbaar in huis",
"ijzerwinkelier",
"suppoost Armhuis",
"ingenieur",
"spijkermaker",
"waterverkoper",
"lakenkoopman",
"barbier en aanspreker",
"staatsloterij",
"behangselpapierdrukker",
"steenzetter",
"winkelier in lakens en kleermaker",
"baker en huisbewaarster",
"aardappelmarktknecht",
"leerkopersknecht",
"schoenenkoopvrouw",
"boekverkopersknecht",
"keurmeester aan de turfmarkt",
"optisch glasslijper",
"waagdrager",
"slijter en tapper",
"spoorwegwachter",
"rijksarts",
"zangeres",
"controleur en koopman",
"loods",
"visbakker",
"wolwerkster",
"wagenverhuurder",
"instrumentdraaier",
"kapper en barbier",
"cementmolenaar",
"koffiehuishouder en tapper",
"letterzetter en boekdrukker",
"macadam-steenklopper",
"knecht in een slijterij",
"commies bij de stedelijke belastingen",
"timmerman en kastenmaker",
"zwepenmaker",
"broodventster",
"handelsknecht",
"balletjesmaker",
"korenfactor en koopman",
"postiljon",
"scheepvaarder",
"tapperij",
"kurkensnijder",
"baardscheerder",
"glasschilder",
"gepensioneerd officier van gezondheid",
"leerling sigarenmaker",
"waagmeester",
"tuinman en nachtwacht",
"handel in sigaren",
"oppasser diergaarde",
"kruidenierswinkel",
"koffie- en theenering",
"schuierwerkster",
"privaatdocent",
"pottenbakker",
"parfumier",
"korenfactor",
"schildersjongen",
"pepermuntwerkster",
"rustend turfwerkster",
"muziekmeester en tapper",
"mattenverkoper",
"tapper",
"foktuiger",
"ziekenoppasser",
"hoedenmakersleerling",
"rouwwinkelier en aanspreker",
"turfhever",
"smid",
"lampenist",
"gepensioneerd postiljon",
"gepensioneerd artilleriemagazijn",
"architect",
"krantenmaker",
"regisseur-generaal",
"straatmuzikant",
"opzichter landontginning",
"zaagselvoerder",
"leerjongen",
"hofmechanicus",
"schilder en koopman",
"koffiehandel",
"korporaal bij het nachtwezen",
"behanger en beddenwinkel",
"korenmolenaarsknecht",
"fruitknecht",
"assistent-karman",
"assistent-linnenvrouw",
"schuitenvoerder en korendrager",
"koperwarenkoopman",
"gepensioneerd Oostindisch ambtenaar",
"klokkenmaker",
"dienstbaar en min",
"commissionair in suiker",
"tijdelijk zonder",
"vetermaker",
"brooddepot",
"steendrager",
"bladkeerder",
"boekhandelaar en boekdrukker",
"toneelspeler",
"lettersnijder",
"bouwmansknecht",
"conducteur bij de Rhijnspoorweg",
"boekhandelsleerling",
"commissaris (veer op) Amersfoort",
"speelgoedmaker",
"verguldersleerling",
"touwwinkelierster",
"kurkenmakersknecht",
"schilder en glazenmaker",
"waakvrouw",
"melkzaak",
"winkelier in glaswerk",
"schoppenmaker",
"knecht van de vitrioolfabriek",
"commies derde klas",
"roggebroodbakkersknecht",
"kapitein-ter-zee",
"agent der Zwolse Stoombootmaatschappij",
"tabaksbediende",
"hout en turf",
"kuiperswinkel",
"dassenmaker",
"stellenmaker",
"assistent bij het gerechtshof",
"bloemen",
"commies bij de rijksbelastingen",
"hoefsmid en kachelmaker",
"kramer",
"handelaar",
"koksaffaire",
"brillenslijper",
"schoftkelderhoudster",
"in fruit",
"militair in Oost-Indië",
"lid Prov.St. Gelderland en gemeenteraad Nijkerk",
"pakkistenmaker",
"modemaakster",
"sluisdieper",
"sjouwer van de vismanden",
"zaagmolenaarsknecht",
"veehouder",
"modelwerker",
"koffiehuishouder",
"dienstbode bij dag",
"winkelhoudster",
"kleerlapper",
"vleesroker",
"inspecteur van politie",
"literateur",
"ventster in groenten",
"paardenkoper",
"conducteur bij de Hollandsche IJzeren Spoorwegm.",
"ketelmaker",
"oud-ijzerverkoopster",
"leerling in tabak",
"biljart- en meubelmaker",
"koopman in brandstof",
"houtzager",
"koopman en houtzager",
"consul van Noorwegen",
"schoenboorder",
"commies van aanneming bij de Marine",
"koopman en apotheek",
"leestenmaker",
"biersteker (bierstoker)",
"spekslager",
"melknering",
"goudborduurder",
"kunstdraaiersknecht",
"zeilenmakersknecht",
"in krijgsdienst",
"bretelmaker",
"commissionair in garen en band",
"pikeur",
"logement koopman en rijksschatter",
"kruister in appels",
"joods onderwijzer",
"Rhijnspoorwegarbeider",
"kaaskopersknecht",
"huisknecht en timmerman",
"kleppermaker",
"roeier van natte waren",
"melkboerin",
"schrijnwerker",
"kassierhouder",
"kamerdienaar",
"werkman in steenkolen",
"confiturier",
"geneesheer en verloskundige",
"brigadier-veldwachter",
"schavenmakersknecht",
"bierbrouwer",
"houtvestersknecht",
"stoelenmaker en aanspreker",
"collecteur",
"bij de stadsbestrating",
"vroedvrouw",
"eigenaresse",
"kuipersknecht",
"negotiante",
"controleur der directe belastingen",
"apprenti-mecanicien",
"mangel",
"visiteur rijksbelastingen",
"tailleur",
"gepensioneerd commies",
"directeur stoombootonderneming",
"zwemmeester",
"geëmployeerde bij de Bank van Lening",
"modegoedmaakster",
"fabrikant",
"godgeleerde",
"visiteur der in- en uitgaande rechten",
"sloepenmaker",
"gepensioneerd majoor der cavalerie",
"winkeldochter",
"boomwachter",
"magazijnmeester",
"kapitein van het zesde regiment infanterie",
"op 's Rijks lijnbaan",
"knecht in het gasthuis",
"fabriekswerker",
"straatnegotiante",
"scheerdersaffaire",
"zilversmidsknecht",
"gistkoper",
"pakhuismeester",
"doctor in de letteren",
"water- en vuurwinkelier",
"vlottersaffaire",
"koopvrouw in kant",
"opzichter in Artis",
"kastenwerker",
"boterkoper",
"grofsmid",
"steenzager",
"majoor bij het derde regiment infanterie",
"vader in het Leprozenhuis",
"hoornblazer",
"lampen",
"op de fabriek bij de haven",
"gistslijter",
"boekhoudster",
"visbereider",
"veerschipper op Naarden",
"broodhaalster",
"winkelier in goud en zilver",
"commies",
"boorder",
"drukker",
"stadsvroedvrouw",
"handelaar in sigaren",
"toneelschilder",
"kraambewaarster",
"schuitenjager",
"steenkolendrager",
"gepensioneerd luitenant",
"drogistbediende",
"verkoper",
"beddenverkoper",
"schilder en sergeant",
"in landsdienst",
"gardenier",
"dijkwerker",
"houtkoper en kastenmaker",
"bediende in een apotheek",
"werktuigfabrikant",
"wijnkopersknecht (wijnkuipersknecht)",
"advocaat en pakhuismeester",
"fruitverkoper",
"raffineerder",
"winkelierster in tabak",
"timmerman en tapper",
"stoffenperser",
"vruchtverkoopster",
"werkman bij de Handelmaatschappij (H.M.S.)",
"beddenmakerswinkel",
"gegageerd",
"leerling kweekschool",
"suikermonstertrekker",
"steenkolenfactor",
"tafelhouder",
"scheepstuigmaker",
"wijnhandelaar",
"bakkerij",
"controleur",
"pleegzuster",
"schoonschrijver",
"kwekeling diaconieschool",
"wasverwerker en bloemenkweker",
"aspirant-telegrafist",
"negotievrouw",
"kapitein-directeur van het hospitaal",
"portionstafelhoudster",
"winkelier in spiegels",
"galanterie",
"vervaardiger van gastoestellen",
"in turf",
"schout-bij-nacht",
"molenaresse",
"stedelijk ontvanger",
"tijdelijk commissaris",
"koopmansbediende",
"baleinfabrikant",
"helpster in de bewaarschool",
"assistent-bode",
"gezagvoerder",
"poorter",
"beddenmakersknecht",
"kantoorknecht",
"behangselgronder",
"blekersmeid",
"koopvrouw in poppengoed",
"kostuumnaaister",
"haarwerkster",
"muilenverkoopster",
"steenkolenmeter",
"metselaar en winkelier",
"galonwerker",
"stadskorendrager",
"kolfballenmakersknecht",
"directeur van een theater",
"gouddraadwerker",
"zeekapitein Californië",
"fruit- en visverkoper",
"koopman confiturier en winkelier",
"kleermaakster",
"linnenwinkelierster",
"kanselier consulaat",
"broodventer",
"nachtronder der straatverlichting",
"courantenombrengster",
"oliekoper",
"kapitein om de Oost",
"bediende in een boekhandel",
"diamantslijper",
"tinnegieter en turfdrager",
"gepensioneerd matroos",
"turf",
"stedelijk pensioen",
"boedelschatter",
"schofthuishoudster",
"touwslagersknecht",
"baas timmerman op zee",
"zaagmolenaar",
"draagbaarmaker",
"ontvanger",
"chocoladefabriek",
"werker in tabak",
"rooms-katholiek priester",
"wieldraaier",
"bloemenventer",
"boekdrukker -verhuurder en -verkoper",
"winkelier in ijzerwaren",
"fruitventer",
"aanspreker en schilder",
"ezelinnenhouder",
"kleinhandelaar",
"gistverkoper",
"kleermaker en lakenkoper",
"lijnbaander",
"handelsman in fruit",
"schuitknecht",
"onderwijzeres in de godsdienst",
"in schuurgoed",
"in het malehuis (N.Isr. Krankzinnigengesticht)",
"blauwselfabrikant",
"bestuurder onderwijsinrichting",
"zinkwerker",
"schuitenvoerder",
"stadswaldieper",
"commissaris der Keulse Vaart",
"officier in Nederlandse dienst",
"beambte bij het hospitaal",
"water- en vuurnegotie",
"kuiperij",
"in houtwaren",
"bode bij de kweekschool",
"koster van de Oude Kerk",
"pettenmaker",
"schooltje",
"lopend koopvrouw",
"reizend koopvrouw",
"aan de Nederlandsche Handelmaatschappij",
"balletjesmakersknecht",
"landbouwster",
"ambtenaar bij de accijnzen",
"wattenfabrieksarbeider",
"schoenmaker en aanspreker",
"werkman in de handel",
"winkel in drogisterijen",
"met lappen katoen en kanten",
"schoorsteenvegersknecht",
"suikerbakker en werkman",
"zeeman en zeilenmaker",
"scheepstimmerman",
"hout- en turfaffaire",
"wijnkoopman",
"bewaarster",
"werkbode",
"aan het toneel",
"liefdezuster",
"vismarktvrouw",
"lampenmaker en blikslager",
"leerling in de fabriek van Van Vlissingen",
"paardenhaarvlechter",
"doctor in de rechten",
"jager aan het Haarlemmerveer",
"zilverdrijver",
"dekenwerkster",
"stuurman",
"broodbakkersknecht",
"sigarenwerker",
"mattenkoper",
"ijzerwaren",
"waterslijtster",
"pakhuizen",
"gepensioneerd zeeman",
"vleeskoopster",
"ketellapper",
"bankbewaarder",
"turfventster",
"kindermoeder",
"slagersjongen",
"kleine-kindermeid",
"artist",
"meesterknecht in het dok",
"bloemistknecht",
"vogelkooitjesmaker",
"tapper en muzikant",
"kantensnijder",
"bijbeldrukker",
"veearts eerste klasse",
"meid",
"agent in manufacturen",
"brouwmeester",
"officier der cavalerie",
"teerkopersknecht",
"palingroker",
"kerver",
"broodbakkershulp",
"geëmployeerde bij de stadsdrukkerij",
"geëmployeerde bij de Nederlandsche Bank",
"pasteibakker",
"dragonder",
"zetkasteleinse",
"gegageerd nachtwaker",
"goud- en zilverhandelaar",
"glasblazer",
"bierslijter",
"zilverwerker",
"gepensioneerd commissaris",
"blokkenmaker bij de Marine",
"winkelier in fruit",
"metselaarsknecht",
"weesmoeder",
"reizend",
"groente- en aardappelenkoopman",
"élève volontair bij Van Vlissingen",
"opzichter van ziekenhuizen",
"vensterglaskoopman",
"veehoudersknecht",
"zijdeverver",
"borduurster",
"koopman in vensterglas",
"schilder (peintre)",
"modiste",
"groenwinkelier",
"kachelmaker",
"baggerman",
"vruchtennatfabrikant",
"eerste luitenant der infanterie",
"hovenier en bloemkweker",
"procureur",
"apotheekloper",
"loodgieter",
"kleermaker en tapper",
"kolenfactor",
"suppoost R.C. Armkantoor",
"in oud ijzer",
"karnemelkboer",
"leerling horlogerie",
"kolonel plaatsvervangend commandant",
"fruitkoper",
"kuipersjongen",
"rijksadvocaat",
"kaashandelaar",
"brandspuiter",
"pijpenuitbrander",
"oliekopersknecht",
"directeur Ned. Handelmaatschappij (de N.Holl.)",
"kunstwerker",
"nering in allerlei",
"collecteur staatsloterij",
"deurwaarder bij de rechtbank",
"gomelastiekwerker",
"catechist en aanspreker",
"bibliotheekhouder",
"huisschilder",
"officier in het Oostindisch leger",
"asophaler",
"wagenmakersbaas",
"winkelier in thee",
"brievenbesteller",
"pastoor",
"chocoladefabrikant",
"pianomaker",
"schoenenventster",
"opzichter",
"assuradeur-directeur",
"dienstbodehulp",
"wijnkoopmansknecht",
"zadelmaker",
"ballastschipper",
"handschoenmakersknecht",
"tabakswinkelier",
"kramster",
"bij de Marine",
"postambtenaar",
"horlogemakersleerling",
"toneelzangster",
"naaldewerker",
"suikercontractant",
"plooister",
"chirurg en vroedmeester",
"wagendrijver",
"in schuurgerei",
"advocaat en secretaris Rhijnvaart",
"bediende bij de Nederlandsche Bank",
"beltslechter",
"passementwerker",
"strijkijzermaker",
"oud-officier van justitie",
"gerechtsdienaar",
"plaatkleurder",
"kapitein der artillerie",
"oudroestverkoopster",
"schrijfster aan de markt",
"huisbewaker",
"oliekopersknecht (oliekuipersknecht)",
"rijtuigmaker",
"kantwerker",
"pandhoudster",
"klerk bij het gerechtshof",
"likeurstoker en tapper",
"luitenant-ter-zee tweede klasse",
"tapper en schilder",
"turf- en houtkoper",
"stadsdieper",
"organist",
"wijnkoper en kuiper",
"cargadoorsbediende",
"employé Rijksschatkist",
"schoorsteenveger",
"eerste leraar diaconieschool",
"borstelfabrikant",
"schildknaap",
"kinderschooljuffrouw",
"mangelhuis",
"vlotter en commissionair",
"strijkster",
"machinist stoommolen",
"geëmployeerde bij de heer ontvanger",
"koksknecht",
"bij de Nederlandsche Handelmaatschappij",
"inlands reiziger",
"employé Nederlandsche Handelmaatschappij",
"pensionaris",
"stoker bij de Engelse Gasfabriek",
"linieerder",
"grafdelfster",
"ketelsmid",
"scheepsjongen",
"koopman in bedden",
"courantenwijk",
"gepensioneerd magazijnmeester bij de Marine",
"ivoorwerker",
"commies bij het Entrepotdok",
"koopmansleerling",
"wolspinster",
"ijzerfabriek",
"assurantie-commissionair",
"dozenmaakster",
"wijnknecht",
"leerling handel en industrie",
"groentevrouw",
"fabrikant in lampen",
"directeur der Nederlandsche Handelmaatschappij",
"zaakwaarnemer",
"leerling telegrafist",
"gepensioneerd",
"lakmaker",
"perkwachter",
"korenopzichter",
"stukadoor en instrumentmaker",
"leerling letterzetter",
"pikschraper",
"apothekersjongen",
"slager en koopman",
"vormschilder",
"winkelier horlogeglazen",
"wijnkoopster",
"negotiekraam",
"lid Tweede Kamer Staten-Generaal",
"actrice",
"scheepstimmerm.aff. verhuurster van waterleggers",
"koekjeswinkel",
"spoorwachter",
"knecht op de varkenmarkt",
"koopman in verfwaren",
"winkeljuffrouw",
"commandant van de stadswacht",
"schrijfpennenbereider",
"bloemenmaakster",
"kanselier",
"vlottersknecht",
"breionderwijzeres",
"orgelspeler",
"waagknecht",
"gezelschapsjuffrouw",
"graveur",
"koopman in petten",
"koffiepakhuisknecht",
"koopman in kalk en steen",
"schoenschoonmaakster",
"turf- en houtnegotie",
"broodbakkersaffaire",
"opziender",
"machineknecht",
"kaasdrager",
"timmerman en kistenmaker",
"olieslager",
"instituut",
"lampenmeester",
"koopman in zand",
"olieslagersknecht",
"doodgraver",
"trompetter",
"kapitein bij de Rijn-IJssel Stoomboot Mij",
"orgeldraaier",
"koralenslijper",
"commies bij het postkantoor",
"op de Landswerf",
"tabakskoper",
"controleur der stadsbelastingen",
"kunstenares",
"kwastenmaker",
"tabaksaffaire",
"stadskalkmeester",
"binnenvaartsknecht",
"kosthuishouder",
"raadslid te Semarang",
"cargadoor en convooiloper",
"rustend",
"luitenant der infanterie",
"waagdrager en schuitenvoerder",
"kapitein bij een regiment infanterie",
"meesterknecht",
"turfschipper",
"commissionair en koopman",
"zuurverkoper",
"verbandvader",
"wetschrijver",
"kartonwerker",
"kraanzager",
"groentenaffaire",
"tariefzager",
"huurknecht",
"schoen- en laarzenmaker",
"zakjesplakster",
"Clinische School",
"koriste",
"bottier",
"ontvanger der accijnzen",
"wagenexpeditieknecht",
"bankwerker",
"molenmaker en molenaar",
"balletspeler",
"mattenwinkelierster",
"katoenmeter",
"klerk",
"sigarenmaker en militair",
"chef de Bureau Nederlandsche Handelmaatschappij",
"vlaggennaaister",
"vormer",
"winkelier en melkslijter",
"bloemenkoopman",
"tabaksnering",
"strohoedenmakersknecht",
"steendrukker",
"baleinsnijdersknecht",
"bediende",
"in vensterglas",
"stadsvisiteur",
"tolgaarder",
"chocolademolenaar",
"speelman",
"bestelster",
"kroegbaas",
"waardin",
"handelaar in papier",
"scheepsinstrumentmaker",
"handelaarster",
"kabinetwerker",
"gepensioneerd modderman",
"beddenwinkel",
"timmerman",
"koopmansknecht",
"in muziekinstrumenten",
"aardewerkverkoper",
"bediende in manufacturen",
"metselaar en steenkoper",
"foerier",
"schofthuis",
"hoofdonderwijzer rooms-katholieke armenschool",
"drukkersgezel",
"diamantleerling",
"beddenmaker",
"boterverkopersknecht",
"venter in zwavelstokken",
"boterwinkelier",
"hortulanus",
"legger",
"medailleur",
"grutter",
"ontvanger der successie",
"huisschildersleerling",
"depothouder",
"arbeider",
"colporteur",
"polderknecht",
"woont in keet",
"veercommissaris",
"smid bij de Marine",
"lijstenmakersknecht",
"aan de Marine",
"behangseldrukkersknecht",
"pottenwinkelierster",
"kastenmakersbediende",
"turfverkoopster",
"kraamster",
"Engels taalmeester",
"kunstvuurwerker",
"werker bij de Hollandsche Spoorweg",
"zettapster",
"korenmolenaar",
"huistimmerman en tapper",
"omroepster",
"politieinspecteur",
"gruttersknecht",
"sigarenkokermaker",
"stukadoor",
"handschoenreiziger",
"porseleinschilder",
"kolenmeter",
"gedelegeerde eerste klas",
"broodbakster",
"scharenmaker",
"varkensslager",
"stadsviskeurder",
"omroeper",
"commies bij het loodswezen",
"leerling in stoomfabriek",
"machinist ter zee",
"boekverkopersbediende",
"mangelkelder",
"ambtenaar bij de Bank van Lening",
"courantenman",
"muziekdraaier",
"leerkoopman",
"fabrikant van kachels",
"bierhuisknecht",
"kousenaffaire",
"kunsthaarverkoper",
"kleinschoolhouder",
"koffiestand",
"schouwburgmedewerker",
"schoolonderwijzer",
"vader",
"turfleverancier",
"veeman",
"porster",
"wasmoeder",
"mangelwinkel",
"in hout en turf",
"wagenmakersleerling",
"leerling seminarium",
"beddenverhuurster",
"blikslager",
"boekbindersknecht",
"hoofdboekhouder stedelijk [...]",
"vogelkoopman",
"aardappel- en gistverkoper",
"aardappelslijter",
"handelaar in vogels",
"oude-klerenkoper",
"beurtschipper op Rotterdam",
"boekdrukker",
"gepensioneerd landwacht",
"koper- en blikslager",
"venter in manufacturen",
"koopman commissionair en assuradeur",
"laarzenmaker",
"boekwinkelier",
"werker van den kansel",
"brandmeester",
"papierdrukker",
"werkmoeder",
"karrenmaker",
"winkelaffaire",
"eigenaar van waterschepen",
"watervrouw",
"horlogerepareerder",
"ijzerkramer",
"in zwavelstokken",
"winkelier in prenten",
"diamantklover",
"vetverkoper",
"poeliersbediende",
"bewaarder tweede klasse",
"stukadoor en steenkolenweger",
"eerste onderwijzer stadsscholen",
"poelier en f[...]",
"houtdraaier",
"bootsmansmaat",
"in lompen",
"student heelkunde",
"orgelmakersleerling",
"zijdeverversknecht",
"ingenieur bij de Marine",
"opperman",
"rentenier en grondeigenaar",
"postrijder",
"familjevader op Nederlands Mettray te Bijsselt",
"hoedenwinkelier",
"makelaar commissionair en kantoorbediende",
"blokken- en pompenmaker",
"winkel in behangselpapier",
"pleisterbeeldenmaker",
"negotiant in lappen",
"palingverkoper",
"koedrijfster",
"toneelmeester",
"winkel in petten",
"garen- en lintwinkel",
"chirurgisch student",
"commissaris bij het Hoornse Veer",
"kartonmaker",
"kaartenwasser",
"dokter ter zee",
"korenwachter",
"buidelmaker",
"nachtwaker bij de Rhijnspoorweg",
"geëmployeerde bij het hospitaal",
"huisonderwijzer",
"heropvoeding",
"koffiehuishoudster",
"korenverschietster",
"opzichter bij de stads Publieke Werken",
"aan de vismarkt",
"huurster",
"zoutevisverkoper",
"winkelier in speelgoed",
"poeliersknecht",
"barbier",
"nachtportier",
"graandroger",
"entrepot",
"leerling schoenmaker",
"knecht in aardewerk",
"geheimschrijver",
"marktmeester",
"schotelmaker",
"papierwinkel",
"pensioen",
"winkeliersdochter",
"publiek huis",
"geëmployeerde bij de gasfabriek",
"courantenombrenger",
"beeldhouwersknecht",
"houthakker",
"moeder van het Leprozenhuis",
"stalhouder en koopman",
"matroos",
"boswachter",
"winkelleerling",
"koekverkoopster",
"tabak en sigaren",
"officier der administratie",
"kleermakersleerling",
"witter",
"grondbezitter",
"kuiper en kastenmaker",
"kostvrouw",
"timmerman en makelaar",
"commissiereiziger",
"pakknecht",
"lid van de rechtbank",
"pedel bij het Geneeskundig Toevoorzicht",
"ambtenaar Rijks Entrepot",
"bediende bij de koster",
"bankbediende",
"epaulettenmaker",
"kluwtjeswinkeljuffrouw",
"katoendrukkersknecht",
"tuinarbeider",
"melkkoopster",
"voermansknecht",
"hout- en turfnering",
"nachtwaker",
"aardappelvrouw",
"groentekruier",
"kranenmaker",
"glasgraveur",
"goud en zilver",
"klerenhandelaar",
"bloemenmaker",
"student aan het seminarium",
"grutter en winkelier",
"aardappelknecht",
"majoor der infanterie",
"pakkersknecht",
"schuiter",
"kleermaker en winkelier",
"cargadoor",
"suikerfabrikant",
"handelaar in tabak",
"doolhofhouder",
"koopman in oud ijzer",
"landmeter eerste klas bij het kadaster",
"koopman in winkelier",
"factor",
"aardappelwinkel",
"slijterijbediende",
"kapitein van het achtste regiment",
"directeur van een assurantiefirma",
"bouwkundige",
"loodwitmaker",
"lid der zetters der stad Amsterdam",
"stijfster",
"agent bij de stoombootmaatschappij",
"tweede luitenant",
"school",
"melkslijter en modderman",
"doctor",
"élève onderwijs",
"koperplaatsnijder",
"in haring",
"stratenmaker en sjouwer",
"timmerman op zee",
"aanspreker en kruier",
"tabakskerver",
"arrondissementsijker",
"groentehandelaresse",
"in dienst",
"koopman in komenijswaren",
"catechisante",
"handelskantoorbediende",
"varende op de stoomboot",
"molenaarsknecht",
"fruithandelaar",
"azijnbrouwersknecht",
"pastoor rooms-katholieke kerk",
"slijter in gist",
"rijksconducteur",
"vader in gesticht Brentano",
"lid van het gymnasium",
"hoedenhandelaar",
"commissionair en winkelier",
"timmerman en korendrager",
"oud-commissionair",
"ballastvaarder",
"agent van brandassuranties",
"blokkenmakersknecht",
"stoker bij de Rhijnspoormaatschappij",
"zijdewerker",
"kunstschilder",
"slachter",
"houtzaagmolenknecht",
"schoenmaker en matroos",
"ambtenaar bij de Marine te Medemblik",
"stadstimmerman",
"beheerster",
"eerste luitenant der artillerie",
"baander",
"belastingdeurwaarder",
"draagbandenmaakster",
"waakster en baker",
"telegrafist",
"hoedenwinkel",
"kartonier",
"koperslager",
"leger Oost-Indië",
"puinhaler",
"krantenombrenger",
"knecht bij het Huiszittenhuis",
"slagersleerling",
"wijnverkoper",
"kruisenmaker",
"pottenslijper",
"buikspreker",
"gepensioneerd majoor",
"mattenmaker en mattenverkoper",
"koopman in hooi en stro",
"trekt van de armen",
"militair koloniste",
"koffieverkoper",
"aannemer",
"debitant (debutant)",
"portefeuillemaker",
"stadsschildersbaas",
"ellenmaker",
"commies bij de stedelijke accijnzen",
"in onroerend goed",
"azijnverkoopster",
"ritmeester",
"ijzerverkoper",
"turfkoopman",
"antiek-meubelmaker",
"buitenlands zeevarend",
"blaasbalgmaker",
"pachter",
"doodgraversknecht",
"gipswerker",
"rustend loodsman",
"dragersknecht",
"leerling smid",
"tabakshandelaar",
"papierhandel",
"hoedenfabrikant",
"kantonrechter",
"inbrenger Bank van Lening",
"machinist",
"lijstentrekker",
"curator Clinische School",
"looiersleerling",
"gepensioneerd onderwijzer",
"reizend kramer",
"in bedden",
"koopman in hout",
"uitdrager",
"pandjesbaas",
"tapijtwever",
"kunsthaarwerker",
"schoenmaker en laarzenmaker",
"diamantmolendraaister",
"tweede luitenant der infanterie",
"steentekenaar",
"lijfrenten",
"koopman in gevogelte",
"pruikenmaker",
"reizend visvrouw",
"handel in groente",
"portier der Nederlandsche Bank",
"sigarenfabrikant",
"zandkoper",
"buitengewoon ambtenaar",
"apotheker en drogist",
"werktuigmaker",
"kofschipper",
"kok op de stoomboot",
"ambtenaar spoorwegadministratie",
"havenmeester",
"scheepsdiepgangmeter",
"directeur van een assurantiecompagnie",
"kruier",
"waterbode",
"regisseur",
"besteller van het Waalwijker Veer",
"machinedrijver",
"vleesknecht",
"president van het gerechtshof",
"scheepsprovisie",
"eerste leermeester stadsarmenscholen",
"luitenant-kolonel in het Oostindisch leger",
"zaalmoeder jongens",
"klein-pandjeshuis",
"inbrenger van de lommerd",
"kistenlakker",
"inspecteur van administratie bij de Marine",
"politoerwerker",
"beheerder",
"makelaar en timmerman",
"kleine-kinderschool",
"landschapsschilder",
"molenmaker",
"employé Nederlandsche Bank",
"baardknipper",
"vurenmaker",
"pennenbereider",
"winkeljongen",
"zwartselbrander",
"stuurman ter zee",
"dokter en chirurgijn",
"scheerder",
"koopman in lompen",
"poorder",
"festonier",
"houder van een museum",
"voddenzoeker",
"stoker op de spoorweg",
"gepensioneerd predikant",
"gruttersbediende",
"meisjesmoeder",
"koopman in granen",
"linnenmoeder",
"linieerder en boekbinder",
"mangelwerkster",
"toneelbediende",
"bediende en tabakswinkelier",
"waterverkoopster",
"zandwerker",
"publieke verkoper",
"steenwerker",
"aanspreker en winkelier",
"wolspinner",
"handel in horloges",
"melkman",
"wolkamster",
"loper aan het telegraafkantoor",
"hoofdcommies bij de posterijen",
"visiteur der accijnzen",
"sloepenveger",
"schilder en kunstlakker",
"roeier",
"aanspreker en kerkdienaar",
"koopman fabrikant en winkelier",
"winkelier en kleermaker",
"Buitengasthuis",
"sociëteitskastelein",
"volwerker",
"raffinadeur",
"koek",
"kleermakersknecht",
"schoenmaker en militair",
"graveur en winkelier",
"industrieel",
"oudroestkramer",
"spoorwegbeambte en portier",
"schuitenverhuurder",
"surnumerair bij de Registratie",
"stadssmid",
"boekhandelaar",
"verificateur in- en uitgaande rechten",
"commandeur en scheepstimmerman",
"commissionair in handschoenen",
"ebonist",
"keurmeester van de haring",
"portier Oudemannenhuis",
"klerk bij de Marine",
"scheepstoetuiger",
"koperdraadtrekker",
"acrobatisch kunstenaar",
"rijknecht",
"expediteur",
"diamantversteller",
"stalloper",
"directeur der Koninklijke Academie",
"visverkopersknecht",
"schoenverkoper",
"fruitman",
"huisknecht",
"polderarbeider",
"zetter der belasting",
"timmermansknecht",
"grutmolenaar",
"turf- en houtkoopman",
"brillenmaker",
"aanspreker en schoenmaker",
"zeevarend bij de Marine",
"directeur der Domeinen",
"drogerijenmolenaar",
"metaaldraaier",
"draaier",
"bode van de Waalse Kerk",
"kerkknecht",
"bediende in een fabriek",
"confiseur",
"mestspecie",
"kosteres",
"plaatdrukker",
"naaister",
"poldermeester",
"bediende in een tabaksfabriek",
"fabrikant en koopman",
"bode tot het Nut iedereen",
"modewerkster",
"student medicijnen",
"houtplaatzager",
"militair hoefsmid",
"kassier bij de Nederlandsche Bank",
"commissaris van het Zaandammer Veer",
"hoedenwinkelierster",
"koekverkoper",
"pellersknecht",
"hoofdsuppoost Huiszittenhuizen",
"negotiante in groente",
"fabrieksleerling",
"geëmployeerde bij de schouwburg",
"geelgietersleerling",
"mosterdfabriek",
"horlogeleerling",
"brandspuitslangenmaker",
"slaapsteehoudster",
"mineur",
"koopman in koekgoed",
"kistenmakersaffaire",
"koopman in juwelen",
"gepensioneerd schout-bij-nacht",
"pettenverkoopster",
"warmoeziersknecht",
"pakker voor de handel",
"Oostindisch ambtenaar met verlof",
"scheepmaker",
"reder",
"spoorwegman",
"graanboersknecht",
"bij de belastingen",
"deurwaarder bij de belastingen",
"onderwijzer en organist",
"inleghuis",
"waterschippersknecht",
"inktfabrikant",
"opzichter rijksweg",
"oubliebakster",
"gepensioneerd luitenant kapitein-ter-zee",
"koopman en commissionair in effecten",
"gealimenteerd",
"verlakschilder",
"stangenmaker",
"sergeant",
"sigarenmaakster",
"handelsman",
"modenaaister",
"in bloemen",
"winkelierster in aardappelen",
"mr. broodbakker",
"rijksboekhouder",
"aan boord Z.M. fregat Prins Hendrik",
"afvalverkoper",
"koperslager en stadskorendrager",
"agent van politie",
"suikerbakkersknecht",
"advocaat-generaal",
"zwartster",
"koorzanger",
"paraplumaker",
"hooiwerker",
"orgelstemmer",
"grutterij",
"groenteknecht",
"scheerder en aanspreker",
"bij de Rhijnspoorweg",
"bediende derde klasse in het hospitaal",
"in kant",
"kastenmaker",
"commissionair in assurantiën",
"verswaterslijtster",
"vijlenkapster",
"zakkenplakker",
"schipper",
"zittende onder de brug",
"kolfbaanmatmaker",
"winkelierster in garen en band",
"koetsier en turfdrager",
"speelgoedmaakster",
"winkeldienster",
"fruitvrouw",
"klein commissionair",
"juwelier en diamantslijper",
"arts",
"leerling stuurman",
"mr. schilder",
"pettenwinkelier",
"handelaar in aardappelen",
"bediende bij de Bank van Lening",
"kunstrijder",
"kleinschooltje",
"pluimenwasser",
"krantenvrouw",
"daghuurder",
"bakkersjongen",
"metaalgieter",
"werker in de handel",
"in groente",
"med. student",
"koffienering",
"luitenant-ter-zee",
"koopman in specerijen",
"danskunstmeester",
"boekdrukkersbediende",
"houtzaagmolenaar",
"kantoorhouder",
"kantenwaster",
"violenreparateur",
"procuratiehouder",
"geëmployeerde bij een kruidenier",
"lakstokersknecht",
"koopvrouw in aardappelen",
"controleur bij het kadaster",
"rijtuigarbeider",
"geëmployeerde bij de Hollandsche Gasfabriek",
"bezemmaker",
"kartonwerkster",
"muziekleraar",
"op de vismarkt",
"schoenmaker en turfdrager",
"kleermaker en aardappelen",
"biljartmakersknecht",
"breimoeder",
"deurwaarder Rijksbelastingdienst",
"karreman",
"bontwerker",
"sergeant-majoor hoornblazer",
"glasbuiger",
"rentenierster",
"kaartenschilder",
"assistente-ziekenmoeder",
"huis- en scheepssmid",
"visiteur belasting",
"kantoorknaap",
"lithograficus",
"ventster",
"manufacturen",
"bankopsluiter",
"tapijtwerker",
"gepensioneerd loodsman",
"postiljon en tapper",
"suikerraffinadeur",
"wachter Rhijnspoorweg",
"koopman in ganzen",
"binder",
"beestenslachter",
"ankersmid",
"fabrikant in borstels",
"oliefabrikant",
"schipper bij de Marine",
"farmaceut",
"scheepskapitein",
"koopman in steenkool",
"hofbeambte",
"directeur van paardrijders",
"koffiehandelaar",
"scheepssloper",
"assistent",
"touwspinner",
"scheepsbouwer",
"schijvenschuurder",
"commissionair in granen",
"fabrikante van patentolie",
"stoomfabriek",
"papierdrukkersknecht",
"boekdrukkersgezel",
"commissionairsdienaar",
"winkelier in tabaksartikelen",
"onderwijzersleerling",
"loodgietersjongen",
"hoogleraar",
"theewerker",
"boedelschikkersknecht",
"werkzaam tractaatgenootschap",
"koster van de Engelse kerk",
"theoloog",
"verguldersknecht",
"koopman in eieren",
"zeepzieder",
"winkeldame",
"makelaar",
"wagendrager",
"zadel- en rijtuigmaker",
"gezelschapsdame",
"rietwerker",
"zijden-koordmaker",
"surnumerair bij de Registratie en Domeinen",
"papierkoper",
"kastenmakersjongen",
"verver",
"drager",
"collecteur loterij",
"goudwerker",
"koloniaal",
"onderwijzer en binnenvader",
"gezel",
"kuiper bij de koopvaardij",
"passementerie",
"verstelster",
"timmerman en molenmaker",
"luitenant der artillerie",
"commissaris",
"pachter van het veer Blauwhoofd",
"koopman en makelaar",
"leerlooister",
"kaartenfabrikant",
"lappenkoper",
"hellebaardier",
"steenbikster",
"timmerman en houtkoper",
"buksjager",
"dekenwasvrouw",
"sjouwersknecht",
"fijnsmid",
"gepensioneerd kapitein luitenant-ter-zee",
"instrumentmakersknecht",
"mutsenwinkel",
"kalkwerker",
"mandenmaakster",
"dienstbode en naaister",
"leesbibliotheekhouder",
"tuinier",
"stoker op een boot",
"assistent-stadsarchitect",
"griffier",
"rijksvisiteur",
"hoeden- en pettenwinkel",
"vrijman",
"beeldhouwersleerling",
"dagloner",
"koffermakersknecht",
"komediant",
"katoenventster",
"eerste luitenant bij de mariniers",
"draaister",
"varkensslachter",
"winkelier en leestenmaker",
"winkelier in turf en hout",
"in ijzerwaren",
"gepensioneerd assistent",
"pompenmaker",
"leerlooier",
"oud-Oostindisch ambtenaar",
"sigaren",
"wattenwerker",
"rafactiemeester",
"beurtschipper op Arnhem",
"zeevarend timmerman",
"studieus",
"kruideniersknecht",
"debitant",
"juweliersknecht",
"werkmeid",
"marmerpolitoerder",
"wasvrouw",
"winkelier in blaasbalgen",
"fabrikant in paardenhaar",
"koopman in sigaren",
"fungerend onderbaas",
"koopman in poppengoed",
"koopman in fruit",
"scheepsreder",
"wasser",
"koffiemaker",
"steen- en beeldhouwer",
"koorddanseres",
"mr. mouter",
"houtverkoper",
"koopvrouw in brandstof",
"commissionair en besteller",
"bloemenfabrikant",
"ambtenaar bij de belastingen",
"reizend commissionair",
"winkelierster in lint",
"aan de askar",
"varkensslagersleerling",
"goudsmederes",
"stoelenbekleder",
"steenkopersknecht",
"kastenmakersknecht",
"klerk ter griffie",
"fabrikant van filtreermachines",
"bosjesmaker",
"krijgsman",
"voorlezer en voorzanger Zuiderkerk",
"doctor in de natuurkunde",
"kruiersaffaire",
"stadsopkorter",
"stadsgagement",
"groenman",
"mechanicus en horlogemaker",
"beendraaier",
"glasdecorateur",
"ijker",
"kindje in huis (doorgehaald: dienstmeid)",
"loodgietersleerling",
"loterij",
"glasblazersknecht",
"smidsknecht en Oost-Indisch militair",
"doekenwasser",
"huidenzouter",
"schoolmeester",
"in kousen",
"vader van het Diaconessenhofje",
"winkelier in glas",
"kapper en aanspreker",
"hoenderkoper",
"luitenant-kolonel",
"behangster",
"diamantslijper en koopman",
"leidekker",
"esseyeursknecht",
"zeesjouwer",
"winkelierster in groenten",
"assurantiebezorger",
"tabakszaken",
"pleetwerker",
"kantoorbediende en winkelier",
"bonne",
"melkventer",
"acteur",
"mastenmaakster",
"boter- en kaasbediende",
"koopman en tapper",
"medicus",
"commies bij het Ministerie van Financiën",
"kantoor Bank van Lening",
"korsettenmaakster",
"modewinkel",
"koopman in brandhout en turf",
"patentoliefabriek",
"zaagselkoper",
"voerman in turf en hout",
"mr. loodgieter",
"winkelier en aanspreker",
"schoonmaakknecht",
"gebrande-stroopmakersknecht",
"draaier en winkelier",
"opzichter bij de Marine",
"koopman en reder",
"staatsraad",
"wekster",
"klavierstemmer",
"geëmployeerde bij het ziekenhuis",
"koopman in turf en hout",
"oliewerker",
"pianomakersknecht",
"koffiebrandster",
"schoorsteenvegersbaas",
"rechter arrondissementsrechtbank",
"leermeester in school",
"keurmeester",
"Franse-kerkbode",
"Rijnkapitein",
"brugwachter",
"koopman in poppen",
"schoonmaker",
"schoenmaker bij de Marine",
"opticus",
"ingenieur van 's rijks stoommachine",
"touwslager",
"garnizoen Den Haag",
"eerste luitenant deljud't (?)",
"blind",
"besteller",
"waldieper",
"galanteriewerker",
"kuipersgezel",
"mr. zadelmaker",
"kroeghoudster",
"fabrikant in koper",
"scheepstuiger",
"beurtschipper op Amersfoort",
"melkkoper",
"oudroestverkoper",
"diamantjongen",
"kantoorbediende en tapper",
"expert in zeezaken",
"bakkersgereedschapmaker",
"stoffeerder",
"zeeman aan boord Prins Frederik der Nederlanden",
"galanteriekramer",
"leerkoopster",
"winkelier in schoenen",
"stoker in chemicaliën",
"in kurken enz.",
"kindermeisje",
"leerling spekslager",
"tabakswerkersbediende",
"verguldersjongen",
"mr. goud- en zilversmid",
"berger aan het Zeerecht",
"behanger en matrassenmaker",
"winkelier in verfwaren",
"kapper",
"schilder en sergeant nachtwacht",
"zeelader",
"pottendraaier",
"hospitaalknecht",
"ossenslager",
"stadsschuitbaas",
"goederenverhuurder",
"machinewerker",
"brugophaalster",
"koffiesiroopbrander",
"loterijdebitant",
"stadsarmenschool kwekeling",
"toneelspeelster",
"augurkjeskoper",
"tabaksknecht",
"slijter in wijnen",
"rijtuigschildersaffaire",
"brandersknecht",
"gepensioneerd visiteur",
"snuiffabriek",
"portier",
"winkelier in pijpen",
"wagendraaier",
"kandidaat-procureur",
"rijksbeambte",
"professor in de geneeskunde",
"essayeursknecht",
"waarzegster",
"winkelier in hout en turf",
"winkelier in linnen",
"Brits consul",
"graanboer",
"korendrager",
"schippersknecht",
"Weidenthal",
"paasbroodbakker",
"kunsthaarwerkster",
"onderwijzer in de zeevaartkunde",
"varkenmarkt",
"stalknecht",
"directeur der Posterijen",
"chef de bureau",
"bijbelbinder",
"glaskopersknecht",
"verpleegzuster",
"draaier en drogist",
"directeur Brandwaarborg",
"hypotheekbewaarder",
"voorlezer",
"kruideniersleerling",
"ambtenaar gevangenissen",
"koopman in turf",
"geëmployeerde bij Natura Artis Magistra",
"mr. schilder en winkelier",
"oud-ijzer- en voddenkoper",
"kunstschildersleerling",
"meubelmakersgezel",
"veehoudersleerling",
"fabrieksarbeidster",
"winkelier in komenij",
"tweede stuurman",
"steenbakker",
"schilderes",
"in marinedienst",
"werkman en dagloner",
"hoogleraar aan het Atheneum Illustre",
"employé",
"entrepotknecht",
"kunsthandelbediende",
"karknecht",
"aan de stadsverlichting",
"luitenant-kwartiermeester",
"arbeider Rhijnspoorweg",
"krameres",
"timmerman en huisbewaarder",
"koopman in vogels",
"stalhoudster",
"tapper en suikerbakker",
"melkknecht",
"ijzerdraaier",
"slepersknecht",
"koster van de Sionskapel",
"reizend kantoorbediende",
"baangr't (?)",
"koffiezoekster",
"in augurken",
"pantoffelmaker",
"fruitwinkelier",
"leerling metselaar",
"landman",
"toneliste",
"bloemenverkoper",
"uitdraagster",
"naaldenmaker",
"employé gasfabriek",
"directeur sociëteit",
"voorman",
"korenwacht",
"roestkoopman",
"rijstpellersknecht",
"directeur paardenspel",
"koopman in vodden",
"korporaal",
"inbrenger stadsbank",
"begrafenisbode",
"gouddraadtrekker",
"tabakswinkel",
"tappersknecht",
"suikerbakster",
"kastenmaker en mandenmaker",
"buffetjuffrouw",
"kielenmaker",
"vroedmeester",
"metselaar en nachtwacht",
"azijnstokersknecht",
"orkestmeester",
"burgerlijk ingenieur",
"kastelein",
"restauratiebediende",
"scheepmakersleerling",
"koopman in brandhout",
"koek- en banketbakkersbediende",
"fruitkopersknecht",
"fabrikant in inkt",
"hervormd krankbezoeker",
"knecht bij een zolderbaas",
"buitengewoon commies",
"kramerijen",
"boomkweker",
"heibaas",
"chemist",
"moeder Ned. Port. Isr. Weeshuis",
"doofstom",
"restauratie in de Hollandsche Schouwburg",
"leerling telegrafie",
"scheepsbouwer en werktuigkundige",
"klerk bij de Rhijnspoorweg",
"kelner",
"officier bij de Marine",
"melkslijtster",
"kruidenier",
"consul van Portugal",
"krantenbezorger",
"loodgietersknecht",
"huisbesteedster",
"advocaat en secretaris",
"houthandelaar",
"geëmployeerde tweede klas bij het garnizoen",
"molenaar",
"gerechtsdeurwaarder",
"nachtwaakster",
"politiediender",
"timmermansgezel",
"ossendrijver",
"marokijnwerker",
"azijnbrouwer",
"preceptor gymnasium",
"dagwerker",
"witwerker",
"leerling varkensslager",
"gepensioneerd majoor-apotheker",
"koopman en huisbewaarder",
"kapitein van het derde regiment",
"haarsnijder",
"aannemer van Publieke Werken",
"groot zilverwerker",
"schuitensloper",
"planter",
"kammenmaker",
"turfknecht",
"boerenknecht",
"inspecteur der registratie",
"bakker en aanspreker",
"turfkoper",
"waterschipper",
"telegrafie élève Hollandsche Spoorwegmaatschappij",
"commissaris van een veer",
"schuitenvoerster",
"prentmaker",
"poppenmaker",
"roeier bij de Havendienst",
"instrumentmaker en chirurg",
"kleerkoopman",
"convooiloper",
"aswerkster",
"poffertjesbakker",
"haringpakker",
"smidsknecht en noodhulp nachtwacht",
"directeur Nederlandsche Bank",
"zalenverhuurder",
"gepensioneerd dienaar politie",
"scheerdersleerling",
"zeeman en timmerman",
"winkelier in grutterswaren",
"brouwerssleper",
"cichoreifabrikant",
"schoen- en mandenmaker",
"kok bij de cellulaire gevangenis",
"beschuitwinkel",
"kleinschoolhoudster",
"opzichter bij de gasfabriek",
"eerste luitenant derde regiment dragonders",
"winkelier en touwslager",
"metselaar enz.",
"geagreëerde bij de rijksaccijnzen",
"steenkolenwerker",
"Oostindisch ambtenaar",
"slopersknecht",
"wachtmeester dragonders",
"koekenkoopvrouw",
"schipper en koopman",
"roosjessnijdersknecht",
"brandwacht op het stadhuis",
"melkkoopman",
"zeilenmaker",
"water- en vuurvrouw",
"winkelier in aardewerk",
"suppooste",
"klokkenmakersknecht",
"hoedenmaakster",
"schoenenverkoopster",
"theaterdirecteur",
"modestikster",
"azijnfabrikant",
"zeevaarder",
"agent der stoomboot en commissionair",
"onderwijzer in de muziek",
"huiswachter",
"heelsvrouwe",
"verkoper van potten",
"genees- en heelkundige",
"depothouder van bijbels",
"klerk bij de in- en uitgaande rechten",
"viskopersknecht",
"handel",
"houtvlottersknecht",
"tuinierster",
"werkjongen",
"in lakens en manufacturen",
"omloper met gebak",
"huidenzoutersknecht",
"bediende in een magazijn",
"maakt papieren dozen",
"geëmployeerde bij de Hollandsche Spoorwegmaatsch.",
"dresseur",
"reizend met een koekkraam",
"betaalmeester Nederlandse schuld",
"strohoedenmaker en barbiersknecht",
"kinderspeelgoedmaker",
"diamantmolen",
"fabrikant en winkelier",
"gezelschapsjuffer",
"magazijnmeester bij de spoorwegen",
"handelsvolontair",
"apotheker",
"loodasgieter",
"ontvanger der belastingen",
"aardappelhandelaar",
"in turf en hout (doorgehaald: boekhandelaar)",
"manufacturenzaak",
"opzichter bij het Paleis voor Volksvlijt",
"koordmaker",
"stadskarreman",
"besloten winkel in thee",
"tuinder en koopman",
"lijstenmaker en letterzetter",
"portionstafelhouder",
"poffertjesbakster",
"wagenmakersknecht",
"knecht in Natura",
"geëmployeerde bij het dok",
"komenijs",
"zemelenkoper en pandjeshuishouder",
"secretaris Hollandsche Spoorwegmaatschappij",
"negotieman",
"zeevarend in landsdienst",
"dame van gezelschap",
"boekverkopersjongeling",
"agent bij de Maatschappij van Landbouw",
"lijstenmaker",
"scheepslegger",
"lombardhoudster",
"kapitein-adjudant",
"meisje",
"winkelier in turf",
"jollenvoerder",
"assurantie",
"filtreermaker (filter)",
"gepensioneerd luitenant-kolonel",
"stadsarbeider",
"bouwmachinist",
"viskruier",
"zadelmakersknecht",
"scheepsleerling",
"gegageerd militair",
"tapper en kleermaker",
"adjunct-kamerbewaarder stadhuis",
"orgelman",
"kosthouder",
"vertaler",
"prentenkleurder",
"raadsheer",
"parfumeur",
"poelier",
"kermisreizigster",
"onderwijzer in de wiskunde",
"Walenkerkbode",
"gepensioneerd 's Lands werf",
"instrumentslijper",
"schuitenvoermansknecht",
"poldergast",
"student theologie",
"commies bij de directe belastingen",
"moeder",
"naaister en breister",
"inlegger",
"schuiermaker",
"vervaardiger van [...]",
"korenkopersknecht",
"brouwersleerling",
"boekenverhuurster",
"kleinhandel in snoepgoed",
"commies-opziener",
"tabaksmeester",
"augurkjesinleggerij",
"chirurg en dentist",
"conducteur bij het Hollandsche Spoor",
"oogheelkundige",
"koster",
"handel in drogerijen",
"timmerman en winkelier",
"onderwijzer in de wis- en zeevaartkunde",
"registratieontvanger",
"leerlooiersknecht",
"directeur Algemene Expeditie Maatschappij",
"spoorwegarbeider",
"korenverscheper",
"orgelmaker",
"belastingambtenaar",
"metselaar en muzikant",
"azijnmakersbediende",
"werkman bij de Keulse Waag",
"gegageerd stedelijke dienst",
"inspecteur",
"schrijnwerkersknecht",
"koper- blik- en zinkwerker",
"justitiedienaar",
"leerkoper schoenen- en laarzenmaker",
"sergeant der infanterie",
"keurder van brood en meel",
"in aardewerk",
"stadsaanplakker",
"ambtenaar bij de Stads Bank van Lening",
"zagenvijler",
"schilder en aanspreker",
"koffiebereider",
"huisnaaister",
"besteller bij de spoorwegen",
"schoolhouder",
"wasster",
"badmeester",
"tegelklopper",
"chirurgijn en vroedmeester",
"houtdrager",
"herenoppasser",
"bureaucommies bij de Waarborg",
"geëmployeerde bij het stedelijk loodswezen",
"petjesmaakster",
"bode en aanspreker",
"tuiger",
"hoofdonderwijzer",
"suppoost in Artis",
"linnennaaister",
"schilder en tabaksverkoper",
"stalbaas",
"haarbereider en wolkamster",
"keukenvader",
"molenaarsleerling",
"tabaksgrossier",
"klerk bij de belastingen",
"geëmployeerde bij de waterleiding",
"gouverneur",
"schoenlapper",
"strijkijzermakersknecht",
"siroopfabrikant",
"aan de gasfabriek",
"dienaar sloeproeier",
"in brandstoffen",
"leerling koopmansknecht",
"tabaksbereider",
"koopman en venter",
"stoker aan boord",
"machinesteller",
"houtkopersknecht",
"buitengewoon opziener",
"komenijswinkel",
"bij de posterijen",
"spiegelkoper",
"negotie in aardappelen",
"peltenbereider",
"eerste klerk van hypotheken",
"gepensioneerd controleur der belastingen",
"geweermaker",
"stadsdrukkerijbediende",
"kantoor van administratie",
"smidsgezel",
"bewaarder",
"gordijnenwaster",
"gistventer",
"lithograaf en tekenmeester",
"was op doen",
"zeedienst",
"aardappelaffaire",
"directeur ambachtsschool",
"venter in vruchten",
"straatveegster",
"employé bij de Marine",
"ondernemer postwagendiensten wagenmaker enz.",
"doodkistenmaker",
"dienstmeid",
"schipper in aardappels",
"koopman in zakken en pakmatten",
"dagbladschrijver",
"sociëteitsbediende",
"loterijgedelegeerde",
"ambtenaar bij de Marine",
"slepersjongen",
"reder en cargadoor",
"museum anatomische voorwerpen",
"broodverkoopster",
"ambtenaar rijksmiddelen",
"in wijnen",
"klokkenschilder",
"linieerder en boekdrukker",
"koffiehuisbediende",
"kassier en commissionair",
"schilder",
"stalhouder en tapper",
"vuurwerker",
"stadsschermmeester",
"apothekersbediende",
"klerk van een begrafenisonderneming",
"consul",
"knecht bij de publieke verkopingen",
"korenverschieter",
"sleepbootkapitein",
"adjunct-landmeter",
"venter in sigaren",
"werkvrouw",
"bode bij een doodgraverscollege",
"schilder en spekslager",
"redemptorist",
"borstelmaker",
"onderwijzeres aan het Blindeninstuut",
"griffier bij het provinciaal gerechtshof",
"inspecteur stadsasbelt",
"goud- en zilverwinkelier",
"tafelbereider",
"bediende in kaas",
"portretmaker",
"tapper en gistverkoper",
"mr. in de rechten",
"dekennaaister",
"vergulder op zijde",
"agent der Spaanse schatkamer",
"winkelhouder",
"vetsmelter",
"modelmaker",
"beambte bij de Rhijnspoorweg (R.S.S.)",
"dozenmaker",
"nering in water en vuur",
"kantoorbewaarder",
"portretschilderes",
"negotie",
"mandenmakersaffaire",
"advocaat",
"apotheekhouder",
"huiswerker",
"ontvanger der registratie",
"huisbewaarster",
"scheppenmaker",
"likeurstokersknecht",
"timmerman bij de Marine",
"aanplakker",
"assistent-broodweger",
"marskramer",
"bankhouder",
"kruidenierster",
"foedraalmaker",
"apotheek",
"mangelaffaire",
"architectleerling",
"in modeartikelen",
"muziekmaker",
"zwavelstokkenmaakster",
"scharensliep",
"secretaris Kamer van Koophandel",
"kruister",
"matroos op het schip Sambre",
"asfaltwerker",
"organist en onderwijzer",
"op 's Rijkswerf",
"kooienmaker",
"magazijnknecht",
"bestuurster",
"rechter",
"dagueriticus",
"in hoeden",
"noodhulp besteller",
"aardappelnegotie",
"deurwaarder",
"beambte bij de Rhijnspoorweg (R.C.S.)",
"steenkolenmeter en -weger",
"boorster",
"steenkolenverkoper",
"plaatzagersknecht",
"hoedenmaker",
"verfwaren",
"kraamwaakster",
"kapitein in het Oostindisch leger",
"aanspreker en courantombrenger",
"bewaringhoudster",
"koopman in zaagsel",
"zemelenkopersknecht",
"kapster",
"redacteur der Amsterdamsche Courant",
"geëmployeerde bij het postkantoor",
"schoolbovenmeester",
"schaftkelder",
"minister financiën Oostenrijk",
"nachtwacht en werkman",
"hoedjesmaakster",
"deurwaarder bij de arrondissementsrechtbank",
"bediende in een drukkerij",
"tabaksbinder",
"hooiweger",
"siroopbrander",
"turffactor",
"gepensioneerd luitenant-kolonel der artillerie",
"bierhandelaar",
"suikerbakkersjongen",
"diamantslijpersknecht",
"visiteur uit- en invoerrechten",
"commissionair in tabak",
"kantoor- en boekbindersbediende",
"boodschaploper",
"apotheker en chemist",
"warmoezier",
"spinner",
"knecht bij een bakker",
"koopman in honden",
"brandspuitmaker",
"leerknecht",
"sleperij",
"belast met het onderwijs",
"in galanterieën",
"kantoorboekwinkel",
"vleeshuis",
"groentewinkelierster",
"bode van het werkhuis",
"geëmployeerde bij het garnizoenshospitaal",
"schuitenknecht",
"scheepstouwer",
"boterkopersbediende",
"mr. kleermaker",
"scheepsjagersknecht",
"directeur Assurantie Comp.",
"dokter in de medicijnen",
"violist van Z.M. de Koning",
"winkelier in kramerijen",
"rustend wagenmaker",
"foerier regiment Jagers",
"pottenkoopman",
"boekhandelsknecht",
"draaiersleerling",
"agent van Van Gend en Loos",
"kruiwagenverhuurder",
"klein-pandjeshuishoudster",
"plafonneur",
"ijzersmelter",
"winkelier in groenten",
"lampenmakersknecht",
"venter in turf en hooi",
"handwerkonderwijzeres",
"bediende bij de Israëlitische gemeente",
"senator",
"verversglazenmaker",
"intern",
"sigarenmakersleerling",
"kashouder",
"haarwerker",
"gasthuis",
"daggelder",
"veerschipper",
"leerkoper",
"dagbladredacteur",
"schuiermakersknecht",
"duimstokkenmaker",
"luitenant bij de Marine",
"commies bij de Nederlandsche Bank",
"commandeur",
"handelsbediende",
"opziener van orgels",
"oppasser bij de kerk",
"in mode",
"kleerkamer",
"schuiermaakster",
"aanspreker",
"aanspreker en kleermaker",
"oud-chirurgijn",
"directeur salon varieté",
"gepensioneerd Westindisch hoofdambtenaar",
"pachter der tolbrug",
"koopman in schoensmeer",
"directrice blindeninstituut",
"officier van gezondheid",
"stokvisbeuker",
"koopman in verse waar",
"behangersknecht",
"slijtster",
"kaasventer",
"opzichter van pakhuizen",
"kunstschilder en graveur",
"machine-katoenspinner",
"geweermaker bij de Marine",
"boomsluiter",
"beëdigd landmeter",
"schuitenjagersknecht",
"knecht in een turfloods",
"slotenmaker",
"slijkster",
"boedelschikker",
"broodbakkerijhoudster",
"ordonnans",
"ijzersmid",
"bezemmaakster",
"geëmployeerd ambtenaar",
"pandhuishouder",
"directeur Franse opera",
"draaiersknecht",
"fruitkoopvrouw",
"opzichter bij de Rhijnspoorweg",
"winkelier in pillen",
"schachtenfabrikant",
"brouwer",
"muziekoplegger",
"Rhijnspoorwegpoetser",
"klerk bij de griffie",
"tabakswerker",
"houtmolen",
"beurtschipper op Middelburg",
"directeur der Registratie van Noord-Holland en Utr",
"genees- heel- en verloskundige",
"in balcostumes",
"koksbediende",
"artsenijbereider",
"dokter",
"gasfabriekstoker",
"consul van Pruisen",
"employé der telegraaf",
"kolonel der schutterij",
"winkelbediende",
"herborist",
"onderwijzer Blindeninstituut",
"baleinmaker",
"klerk bij de rechtbank",
"aspirant-ingenieur bij de Marine",
"kaarsenmaker",
"pepermuntmaker",
"tweede schipper op de stoomboot",
"corrector",
"tabaksspinner",
"behanger en tapper",
"buitenbezorger",
"werker in de gasfabriek",
"machinist-leerling",
"kalkdrager",
"commissionair koopman",
"praktizijn",
"commissaris bij het Delftse en Haagse Veer",
"zwepenmakersknecht",
"koopvrouw in lompen",
"marinepensioen",
"koopvrouw",
"porder",
"politie",
"marinedienst",
"dienstbode",
"borduurwerker",
"wever",
"grootwerker",
"ambtenaar op wachtgeld",
"pianohandelaar",
"voetbode",
"secretaris",
"logementbediende en bode",
"koopvaardijkapitein",
"advocaat en assuradeur",
"muziekmeesteres",
"pettennaaister",
"wafelbakster",
"dessertwerker",
"artiste",
"sergeant der mariniers",
"instructeur",
"spoorwegexpediteur",
"courantenbezorger",
"surnumerair bij de belastingen",
"dansmeester",
"kuiper en koopman",
"smidsknecht",
"groentenopkoper",
"mutsenwasvrouw",
"gepensioneerd ambtenaar",
"zetkastelein",
"portretschilder",
"in gouvernementsbetrekking",
"kurkenmaker",
"besteder",
"courant",
"visverkoopster",
"auteur",
"winkelierster in modes",
"schippersvrouw",
"zuurverkoopster",
"viltwerker",
"varkensslagersknecht",
"commensalenhouder",
"ondermeester",
"verfwarenkoopman",
"ambtenaar Rhijnspoorweg",
"kantoor",
"directrice werkhuis",
"dienstbaar stoombootmij.",
"knecht in het scheikundig laboratorium",
"winkelier in matten",
"fruit",
"visverkoper",
"knecht in turf",
"loodasbrander",
"vader Ned. Port. Isr. Weeshuis",
"opkoopster",
"kruiersknecht",
"glanzer en aanspreker",
"kosthuishoudster",
"heel- en vroedmeester",
"diamantdraaier",
"hulponderwijzer",
"glanzersknecht",
"banketbakkersbediende",
"messenmaker",
"timmerman en metselaar",
"geëmployeerde bij het Gesticht",
"horlogemakersbediende",
"geëmployeerde bij het Rijksarchief",
"koper",
"varensman",
"fitter",
"straatveger",
"bewaarder bij de Marine",
"geëmployeerde bij de Nederlandsche Handelmaatsch.",
"pachter der stadsstallen",
"modeleerling",
"komenijsaffaire",
"krijgsdienst West-Indië",
"koopman en kruier",
"bierkoper",
"wasvrouw en tapster",
"makelaarster",
"vernisser",
"zwaardveger",
"polderman",
"zanddrager",
"winkelierster in manufacturen",
"koopman en schilder",
"steenkopersbediende",
"staljongen",
"boekverkoper en pianohandelaar",
"matroos derde klas",
"oud-ijzerhandelaar",
"mr. ijzergieter",
"visschoonmaker",
"koopman in kleren",
"geëmployeerde op een kantoor",
"min",
"venter in katoenen",
"kruier en schoenmaker",
"glasverkoopster",
"chocoladewinkelier",
"brandspuit en winkelknecht",
"ornamentmaker",
"schepenloper",
"afslager",
"tentenverhuurder",
"scheepstimmeman",
"ziekenfondsknecht",
"dienstbaar Nederlandsche Bank",
"scheepvaartkapitein",
"spiegelverfoeliër",
"koopvrouw in aardewerk",
"spuiter",
"gepensioneerd schipper",
"koekbakkersbediende",
"linnenkopersknecht",
"muzikant en kleermaker",
"in dienst van de diaconie",
"kanonnier",
"scheepsmatroos",
"havenknecht",
"handelscommies",
"winkelier en goudsmid",
"in hengelstokken",
"boekbinder",
"geëmployeerde bij de spoorwegen",
"in zeedienst",
"opperman en nachtwacht",
"zoutwerker",
"wapenfabrikant",
"spoorwegwerkman",
"scheepsbevrachter",
"cipier der gijzeling",
"molmvervoerder",
"pottenmaker",
"in parfumerie",
"ambtenaar-leerlingbediende",
"turfnering",
"koopman en drogist",
"confituurwerker",
"kammoeder voor de meisjes",
"slijtersknecht",
"muziekonderwijzeres",
"deurwaarder bij de rijksbelastingen",
"directeur der Amsterdamsche Courant",
"wasserij van zijden stoffen",
"commandeur op 's Rijks werf",
"huisbewaarder en aanspreker",
"aan de politie",
"apothekersleerling",
"commissionair en expediteur",
"groenboerman",
"inbrenger Stads Bank van Lening",
"makelaarsbediende",
"majoorsweduwe",
"verversknecht",
"magazijnbeheerder",
"manufacturier",
"grondeigenaar",
"werkt bij de Handelmaatschappij",
"slepersaffaire",
"ornamentsnijder",
"zeemansleerling",
"nering in vodden",
"pantoffelwerkster",
"vice-consul van Rusland",
"diamantslijpster",
"adjunct-boekhouder Nederlandsche Bank",
"boekhandel",
"kurkenmonteerder",
"kazerneknecht",
"oliekoekbakker",
"dienstbaar als min",
"neringdoende",
"plaatsnijder",
"horlogemaker en koopman",
"inlands kramer",
"biksteenklopper",
"lompen- en oudijzerkoopster",
"hoenderplukker",
"schoenmaker",
"koopvrouw in manufacturen",
"koloniale dienst",
"loper",
"touwbaander",
"suikerraffineerder",
"peller",
"tweede luitenant bij de mariniers",
"verhuurder van boeken",
"assuradeur en reder",
"opzichter bij het Rijksmuseum",
"behanger en aanspreker",
"kruiersvrouw",
"haarfabriek",
"vuilniskarreman",
"assistent bij de stadsaccijnzen",
"majoor",
"beëdigd vertaler",
"apotheker bij het Oostindisch leger",
"slachtwezen",
"diamant",
"steenkolenman",
"landswerfopzichter",
"blikschilder",
"wijnkopersbaas",
"commissionair gebakken steen in minerale wateren",
"marktschrijfster",
"volontair bij regiment",
"koperdraaier",
"water- en vuurwinkel",
"wielmaker",
"in gebak",
"diamantsnijdster",
"koperslagersgezel",
"administrateur",
"blank officier",
"arbeidster",
"vuilnisman",
"zwavelstokken",
"besteedster en winkelierster",
"in augurken en eieren",
"telegrafieseiner spoormaatschappij",
"kappersbediende",
"gildeknecht",
"volontair",
"kolenman",
"vogelkoopster",
"klerk bij de burgerlijke stand",
"kopergietersknecht",
"potter",
"oppasman",
"kopersmid",
"kachelsmid",
"aardappelkoper",
"houtkoopman",
"wattenkaardster",
"ondernaaimoeder",
"mangelwinkeltje",
"kalkmaker",
"machinist bij Kooij",
"brievenbesteller en debitant",
"student bouwkunde",
"slepersschipper",
"kassiersknecht",
"commissionairsknecht",
"baandersknecht",
"reizend voor handelszaken",
"inlands omloper",
"heiwerker",
"stoker bij de Haarlemmer spoorweg",
"palfrenier",
"broodfabrikant",
"mutsennaaister",
"toneeldirecteur",
"bomenmaker",
"binnenmoeder",
"besteller van goederen",
"aanlegster",
"commissionair en kantoorbediende",
"stadsopzichter",
"gepensioneerd bij de Marine",
"wachtmeester",
"slepersknecht en tapper",
"schippersknecht en korendrager",
"herenknecht",
"commissionair wisselzaken",
"banketbakkersleerling",
"fabrikant in parapluies",
"boekbinder en boekverkoper",
"diamantschijvenmaker",
"officier van gezondheid derde klasse",
"aanspreker en rouwwinkel",
"fabrikant in bessenwijn",
"ambtenaar hospitaal",
"bankhoudster",
"boomplanter",
"winkel in hout",
"diamantdraaister",
"nering",
"schouwman",
"rijtuigschilder",
"zoogster",
"perster",
"hoedenmakersbediende",
"houtzaagmolenaar en houtkoper",
"geoctroyeerde",
"verversgezel",
"boom- en mastenmaker",
"zager",
"tonelist",
"werkman op de ossenmarkt",
"sigarenhandelaar",
"goudwerkgezel",
"geëmployeerde ter directie politie",
"employé bij de spoorweg",
"wafelbakker",
"porseleinkraamster",
"spiegelmaker",
"binnendienst",
"breukbandenmaker",
"tabak",
"grutterswaren",
"reiziger en kantoorbediende",
"ondernemer",
"schrijver",
"kaaskoper",
"spoorbeambte",
"op een magazijn",
"kleurder",
"pottenventster",
"kleine-schoolhouderesse",
"portierster",
"oliekoekvrouw",
"schilder en huisbewaarder",
"commies bij het Rijks Entrepot",
"rijswerker",
"schilder en winkelier",
"palingvisser",
"zaalmoeder",
"aardappelkoopman",
"kandidaat tot de Heilige Dienst",
"spoorwegemployé",
"schoonmaakster",
"slagersknecht",
"linnenmeid",
"rijksontvanger",
"cipier",
"jongen",
"distillateur",
"geëmployeerde bij de Handelmaatschappij",
"koffermakersgezel",
"koopman in wandelstokken",
"voedster",
"geestelijke liefdezuster",
"logementhouder",
"bediende in een koffiehuis",
"opzichter Publieke Werken",
"zoutevisverkoopster",
"stadsboomplanter",
"assistent-bode der Nederlandsche Handelmaatsch.",
"koksleerling",
"bode bij de drenkeling-maatschappij",
"pakhuiswerker",
"gepasporteerd militair en kruier",
"violiste",
"reiziger",
"op de gasfabriek",
"kerkelijke bediening",
"winkelier in oud ijzer",
"haarkunstwerkster",
"in grof aardewerk",
"laarzenmaakster",
"wachtmeester tweede regiment dragonders",
"kappersleerling",
"strosnijder",
"musicus",
"papierfabrikant",
"apotheker en kuiper",
"groentekoopvrouw",
"kachelkoper",
"strofabrikant",
"sjouwer en nachtwaker",
"ijzergietersknecht",
"geëmployeerde bij de bank",
"claviger ? (clarigu)",
"metselwarenwinkelier",
"tapper en timmerman",
"koopman in kinderspeelgoed",
"kleermaker en zaakwaarnemer",
"boekverkoper en leesbibiliotheek",
"palfrenier en huisbediende",
"winkelier in koek",
"bakkersknecht",
"med. doctorandus",
"seinwachter",
"gepensioneerd hoofdambtenaar",
"luitenant-ter-zee eerste klasse",
"herenbediende",
"stedelijk boekhouder",
"koopvrouw in vogels",
"kandidaat tot de Heilige Kerk",
"koopman in glas",
"mr. koperslager",
"opzichter stadslantaarnverlichting",
"tapper en diamantsnijder",
"koopman en scheepsreder",
"stalmeester",
"commissionair en boekhouder",
"mr. kuiper",
"logementhoudster",
"taalonderwijzer",
"rouwwinkel",
"aan de stoombootmaatschappij",
"pottenmaakster",
"handelwerker",
"in goud- en zilverwerken",
"aswerker",
"koopvrouw in turf",
"bode bij een begrafenisfonds",
"waterbrenger",
"ambtenaar",
"geëmployeerde bij de havenmeester",
"stedelijke betrekking",
"in vaders affaire werkzaam",
"schatster",
"ter zee",
"melkster",
"goud- en zilverkashouder",
"turfdrager",
"redemptorist kwekeling",
"balletjesmaakster",
"tandarts",
"sigarenkistjesmaker",
"slijper",
"bij de Holl. Gasfabriek (onderingen bij de - ?)",
"ambtenaar bij de posterijen",
"tekenmeester en timmerman",
"zakkenmaker",
"lompenkoopster",
"geneeskundige en kaarsenmaker",
"zwakkenmoeder",
"schaftkantoorhoudster",
"werkman bij de pelmolen",
"duivenpost",
"in flessen",
"oudroestkoopster",
"kandidaat in de rechten",
"vissersknecht",
"stoelenmatter",
"in garen en lint",
"lantaarnopsteker",
"marktmeester korenmarkt",
"decoratieschilder",
"opzichter aan de Korenbeurs",
"mr. suikerbakker",
"winkelier in leer",
"directeur van het administratiekantoor",
"directeur stadswaterwerken",
"kastrandmaker",
"sjouwster",
"kerkdienaar",
"molenaarsbaas",
"schuitenmaker",
"lakenwinkel en winkelier",
"koopman in leder",
"bekleder",
"wagenknecht",
"werkman bij de Handelmaatschappij",
"manufacturist",
"marktbewaarder",
"geëmployeerde bij de Hollandsche IJzeren Spoorwegm",
"oud-ambtenaar",
"besloten winkelierster",
"in stokvis",
"leerling zeevaartschool",
"scheepmakersknecht",
"gaslantaarnopsteker",
"boekdrukkersknecht",
"bediende in het hospitaal",
"zaakgelastigde",
"kastenmaker en behanger",
"drogist en verfwinkel",
"koopman in stokvis",
"koffie-affaire",
"koopman in zuurwaren",
"kookvrouw",
"spoorwegbeambte",
"winkelier in ijzerwaren en scheepsbehoeften",
"konstapelsmaat",
"koopman en commissionair",
"stationwerker",
"stoelenmakersknecht",
"oud-officier",
"beëdigd translateur",
"neringwinkel",
"scheerdersknecht",
"mr. moutersknecht",
"machinist bij de Marine",
"grasmaaier",
"koopvrouw in haring",
"brandstofkoopman",
"koopman en ondernemer",
"plaatsvervangend rechter",
"meesterknecht bij de stadsdrukkerij",
"student botanie",
"kunstgraveur",
"tapisserie",
"koopman in beesten",
"steenhouwersleerling",
"pennenfabriek",
"aan de spoorwegexpeditie",
"bewaarder eerste klasse",
"magnetiseur",
"bierhuishouder",
"blikslager en aanspreker",
"adjunct-intendant",
"velkoper",
"waterdrager",
"bloemenkweekster",
"schuitenvoerdersknecht",
"lampenpittenmaker",
"reizend kantwerker",
"priester rooms-katholieke kerk",
"bediende tabakkoopnering",
"smidsjongen",
"winkelier",
"geëmployeerde bij de stoomvaartmaatschappij",
"koopman in rottingen",
"rijksambtenaar",
"knecht bij het Grootboek",
"kistenmaakster",
"zandvormer",
"magazijnloper",
"tegelbewerker",
"opperwachtmeester",
"commies bij de belastingen",
"plaatdrukkersknecht",
"werker bij de spoorwegen",
"scheepsarts",
"waternering",
"baggeraar",
"mr. timmerman",
"kinderschool",
"zwartbrander",
"opticien",
"bronzer",
"schaver",
"civiel ingenieur",
"beestensnijder",
"leerling stoombootwerf",
"papierwerker",
"brugophaler",
"winkelnering",
"winkel in pleisterbeeld",
"archivaris",
"letterzetter en aanspreker",
"werkman bij de handel",
"stalhoudersknecht",
"oud-korendrager",
"mutsenwasser",
"med. doctor",
"barbiersleerling",
"slijpster",
"stikster",
"winkel in manufacturen",
"afslager der Bank van Lening",
"passementwerkster",
"visser",
"commissionair in sigaren",
"ivoorzandverkoper",
"handelmeester",
"vergulder",
"broodbakkersleerling",
"behanger en koopman",
"politoerder",
"stadsbaas",
"gepensioneerd hoofdofficier",
"knecht in een fabriek",
"effectenhandelaar",
"chemisch student",
"commissionair boekhouder en grondeigenaar",
"gereedschapmaker",
"porseleinkramer",
"barometer- en thermometermaker",
"groenventer",
"baleinwerker",
"schoenpoetsster",
"koffieknecht",
"goudslager",
"muziekleerling",
"directeur stadswerken",
"koopman en kapitein",
"bank van leninghouder",
"bode bij de gemeente",
"winkelierster in bezems",
"oudroestkoper",
"suikerwaterstoker",
"strohoedenmaakster",
"venter en koopman",
"sigarenverkoper",
"zaagselboer",
"molenmakersknecht",
"koopvrouw in verf",
"directeur ziekenfonds",
"in valhoeden",
"stuurman ter koopvaardij",
"knecht bij de gasfabriek",
"vellenbereider",
"fabrikant in tabak",
"leesbibliotheek",
"veenknecht",
"zilversmeder",
"werkman molen De Nagtegaal",
"gootschepster",
"journalist",
"steenkoper en makelaar",
"tabaksleerling",
"prentschilder",
"gepasporteerd militair",
"chef in een verkoophuis",
"in vodden",
"kantoorleerling",
"roommaker",
"kantoorbediende en boekverkoper",
"fabrikant in brons",
"tinnegietersknecht",
"commissaris op Haarlem",
"winkelier in ijzer",
"raffinadeursmeesterknecht",
"boekbindersleerling",
"steenbilder",
"geëmployeerde bij de Maatschappij van Landbouw",
"debitant loterij",
"gepensioneerd onderofficier",
"boekhandelsbediende",
"sjouwer en schoenmaker",
"hoofdsuppoost",
"broodsnijder",
"visschoonmaakster",
"politieman",
"winkelier in aardappelen",
"bankier",
"gepensioneerd wachtmeester",
"commissaris van het Harlinger Veer",
"boomsnoeier",
"scheepssjouwer",
"subtituut-griffier Provinciale Hof Noord-Holland",
"stamper",
"penningmeester Entrepot",
"kruidenuitzoeker",
"scheepstimmerman op zee",
"kapitein der infanterie in het Ooostindisch leger",
"bloemenman",
"in papier en kantoorbehoeften",
"slijtersbaas",
"rietdekker",
"boekettenmaker",
"consul van het koninkrijk Württemberg",
"letterkundige",
"controleur van de Waarborg",
"controleur der accijnzen",
"begeleider",
"bootsman",
"zegelklopper",
"minnemoeder",
"siroopmakersknecht",
"sleper",
"sigarenmakersjongen",
"schachtenmaker",
"gepensioneerd bij het Oostindisch leger",
"koster in de zeemanskerk",
"barbiersknecht",
"kapitein van het achtste regiment infanterie",
"apotheker en commissionair",
"veearts",
"literator",
"kruier en doodgraver",
"bij de ijzerbaan",
"timmeraffaire",
"geestelijke",
"leverancier in bier",
"kandidaat in de theologie",
"tuinbaas",
"chocoladewerker",
"houtkoper en kistenmaker",
"suikerbakker en sigarenmaker",
"essayeur",
"slager",
"tapper en koopman",
"boekdrukkersleerling",
"melkhaalster",
"scheepsmastenmaker",
"kolonist",
"op de wattenfabriek",
"kunstschilder en tekenmeester",
"tapster",
"kantoorklerk",
"commies de comptoir",
"koopvrouw in stro",
"gepensioneerd kapitein bij het Oostindisch leger",
"waker op de schepen",
"veedrijver",
"schoenenboenster",
"onderneemster van verkopingen",
"gaswerksmid",
"koopman in groenten",
"grondeigenaresse",
"betaalmeester en fabrikant",
"mestboer",
"slapershuis",
"beurtschipper op Franeker",
"kastenmaker en timmerman",
"institutrice",
"vleeskopersknecht",
"directeur brouwerij",
"wijnkopersknecht",
"gepensioneerd rijksambtenaar",
"mandenmakersleerling",
"gieter",
"gepensioneerd ontvanger",
"werkknecht",
"godsdienstleraar",
"particulier",
"timmerman en beeldhouwer",
"teerwerker",
"evangelisch-luthers krankenbezoeker",
"in gist",
"winkelier in kruidenierswaren",
"eerste luitenant achtste regiment infanterie",
"remplaçant",
"koekjesverkoper",
"lombardhouder",
"opzichtster",
"grimeur",
"kanselier van het Franse consulaat",
"huistimmerman",
"kalfsslachter",
"tabakskopersbediende",
"blikslager en lampenier",
"majoor der mariniers",
"chirurgijnsbediende",
"ingenieur Oost-Indië",
"aardappelnering",
"commies bij het Entrepot",
"modderman",
"instituteur",
"voddenhuis",
"makelaar en boekhouder",
"historieschilder",
"turfvuller",
"ivoordraaier",
"buitengewoon commies bij de stedelijke accijnzen",
"lithografist",
"mattenkoopman",
"behangersaffaire",
"was op doeker",
"directiesecretaris"]
import json
from SPARQLWrapper import SPARQLWrapper, TURTLE, JSON
from rdflib import Graph
def getHisco(occupation):
print(f"Querying {occupation}")
q = f"""
PREFIX schema: <http://schema.org/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT * WHERE {{
?hiscoOccupation a schema:Occupation ;
schema:name ?occName ;
schema:occupationalCategory ?hiscoCategory .
?hiscoCategory schema:codeValue ?hiscoCode ;
schema:name ?hiscoCategoryName .
FILTER ( LCASE(STR(?occName)) = "{occupation}" )
FILTER ( LANG(?occName) = 'nl')
}}
"""
sparql = SPARQLWrapper(
"https://api.druid.datalegend.net/datasets/iisg/HISCO/services/HISCO/sparql"
)
sparql.setQuery(q)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results['results']['bindings']
if __name__ == "__main__":
d = dict()
for occupation in occupations:
occupation = occupation.lower()
results = getHisco(occupation)
d[occupation] = results
with open('occupations2hisco.json', 'w') as outfile:
json.dump(d, outfile)
| 21.354344 | 86 | 0.765847 |
73f263129a82f143d758fcbf71d3d23beb3dfb73 | 1,246 | py | Python | generation/pycocoevalcap/bleu/bleu.py | yangliuy/Hybrid-Neural-Conversation-Model | 329a9310b2608ed1c04d17285a16a7170d6d4703 | [
"Apache-2.0"
] | 22 | 2019-09-23T20:14:54.000Z | 2022-02-24T20:49:35.000Z | generation/pycocoevalcap/bleu/bleu.py | yangliuy/Hybrid-Neural-Conversation-Model | 329a9310b2608ed1c04d17285a16a7170d6d4703 | [
"Apache-2.0"
] | 2 | 2019-08-31T08:41:14.000Z | 2020-10-03T21:10:34.000Z | generation/pycocoevalcap/bleu/bleu.py | yangliuy/Hybrid-Neural-Conversation-Model | 329a9310b2608ed1c04d17285a16a7170d6d4703 | [
"Apache-2.0"
] | 3 | 2019-11-07T03:03:53.000Z | 2020-10-23T15:50:44.000Z | #!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
from bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
#assert(len(ref) > 1)
bleu_scorer += (hypo[0], ref)
#score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
#score, scores = bleu_scorer.compute_score(option='average', verbose=1)
# return (bleu, bleu_info)
return score, scores
def method(self):
return "Bleu"
| 25.958333 | 79 | 0.58427 |
73f2939dd8bda2255e3aa7d2b351df488d5e0e83 | 5,027 | py | Python | datawinners/project/views/create_questionnaire.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/project/views/create_questionnaire.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/project/views/create_questionnaire.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | import json
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _, ugettext
from django.views.decorators.csrf import csrf_exempt
from datawinners import settings
from datawinners.accountmanagement.decorators import session_not_expired, is_not_expired
from datawinners.accountmanagement.models import NGOUserProfile
from datawinners.activitylog.models import UserActivityLog
from datawinners.common.constant import CREATED_QUESTIONNAIRE
from datawinners.main.database import get_database_manager
from datawinners.project import helper
from datawinners.project.helper import associate_account_users_to_project
from datawinners.project.wizard_view import get_preview_and_instruction_links, create_questionnaire, \
update_questionnaire
from datawinners.utils import get_organization
from mangrove.datastore.entity_type import get_unique_id_types
from mangrove.errors.MangroveException import QuestionCodeAlreadyExistsException, QuestionAlreadyExistsException, EntityQuestionAlreadyExistsException
from mangrove.transport.xforms.xform import xform_for
@login_required
@session_not_expired
@csrf_exempt
@is_not_expired
def create_project(request):
manager = get_database_manager(request.user)
if request.method == 'GET':
cancel_link = reverse('dashboard') if request.GET.get('prev', None) == 'dash' else reverse('alldata_index')
return render_to_response('project/create_project.html',
{'preview_links': get_preview_and_instruction_links(),
'questionnaire_code': helper.generate_questionnaire_code(manager),
'is_edit': 'false',
'active_language': request.LANGUAGE_CODE,
'post_url': reverse(create_project),
'unique_id_types': json.dumps([unique_id_type.capitalize() for unique_id_type in
get_unique_id_types(manager)]),
'cancel_link': cancel_link}, context_instance=RequestContext(request))
if request.method == 'POST':
response_dict = _create_project_post_response(request, manager)
return HttpResponse(json.dumps(response_dict))
def _validate_questionnaire_name_and_code(questionnaire):
code_has_errors, name_has_errors = False, False
error_message = {}
if not questionnaire.is_form_code_unique():
code_has_errors = True
error_message["code"] = _("Form with same code already exists.")
if not questionnaire.is_project_name_unique():
name_has_errors = True
error_message["name"] = _("Form with same name already exists.")
return code_has_errors, error_message, name_has_errors
def _is_open_survey_allowed(request, is_open_survey):
return get_organization(request).is_pro_sms and is_open_survey
def _create_project_post_response(request, manager):
project_info = json.loads(request.POST['profile_form'])
try:
ngo_admin = NGOUserProfile.objects.get(user=request.user)
is_open_survey_allowed = _is_open_survey_allowed(request, request.POST.get('is_open_survey'))
questionnaire = create_questionnaire(post=request.POST, manager=manager, name=project_info.get('name'),
language=project_info.get('language', request.LANGUAGE_CODE),
reporter_id=ngo_admin.reporter_id,
is_open_survey=is_open_survey_allowed)
except (QuestionCodeAlreadyExistsException, QuestionAlreadyExistsException,
EntityQuestionAlreadyExistsException) as ex:
return {'success': False, 'error_message': _(ex.message), 'error_in_project_section': False}
code_has_errors, error_message, name_has_errors = _validate_questionnaire_name_and_code(questionnaire)
if not code_has_errors and not name_has_errors:
associate_account_users_to_project(manager, questionnaire)
questionnaire.update_doc_and_save()
if settings.BRAND_FEATURES.get('DW_BUILDER_PROJECT_TO_XLSFORMS', False):
questionnaire.xform = xform_for(get_database_manager(request.user), questionnaire.id, request.user.get_profile().reporter_id)
questionnaire.update_doc_and_save()
UserActivityLog().log(request, action=CREATED_QUESTIONNAIRE, project=questionnaire.name,
detail=questionnaire.name)
return {'success': True, 'project_id': questionnaire.id}
return {'success': False,
'error_message': error_message,
'error_in_project_section': False,
'code_has_errors': code_has_errors,
'name_has_errors': name_has_errors} | 54.053763 | 150 | 0.719714 |
73f2aa3ca4ceda465bc9892c2741471244e7173e | 759 | py | Python | sample/common.py | camilastock/opendxl-nmap-client-python | 2221adcb154a412c14925935159afc67ed9ba7a5 | [
"Apache-2.0"
] | 1 | 2017-10-11T22:26:16.000Z | 2017-10-11T22:26:16.000Z | sample/common.py | ndcolter-mcafee/opendxl-threatevent-client-python | b6a3fe8621ae2809f1c7c718a2c4350e868e830f | [
"Apache-2.0"
] | null | null | null | sample/common.py | ndcolter-mcafee/opendxl-threatevent-client-python | b6a3fe8621ae2809f1c7c718a2c4350e868e830f | [
"Apache-2.0"
] | 1 | 2018-02-12T18:20:18.000Z | 2018-02-12T18:20:18.000Z | """
Common definitions for the samples.
This includes the defining the path to the configuration file used to initialize the DXL client
in addition to setting up the logger appropriately.
"""
import os
import logging
# Config file name.
CONFIG_FILE_NAME = "dxlclient.config"
CONFIG_FILE = os.path.dirname(os.path.abspath(__file__)) + "/" + CONFIG_FILE_NAME
# Enable logging, this will also direct built-in DXL log messages.
# See - https://docs.python.org/2/howto/logging-cookbook.html
log_formatter = logging.Formatter('%(asctime)s %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
| 31.625 | 95 | 0.778656 |
73f2b7fc414ae7cdc185458ae691be0018e15043 | 5,321 | py | Python | n_utils/cloudfront_utils.py | NitorCreations/nitor-deploy-tools | 93dd86d5e6586520f72dd38319f55849bf4a5bc5 | [
"Apache-2.0"
] | 10 | 2017-02-12T17:04:56.000Z | 2018-11-10T16:46:57.000Z | n_utils/cloudfront_utils.py | NitorCreations/nitor-deploy-tools | 93dd86d5e6586520f72dd38319f55849bf4a5bc5 | [
"Apache-2.0"
] | 22 | 2017-04-06T11:41:01.000Z | 2019-09-18T05:45:03.000Z | n_utils/cloudfront_utils.py | NitorCreations/nitor-deploy-tools | 93dd86d5e6586520f72dd38319f55849bf4a5bc5 | [
"Apache-2.0"
] | 1 | 2020-08-26T07:32:02.000Z | 2020-08-26T07:32:02.000Z | #!/usr/bin/env python
# Copyright 2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import print_function
from builtins import str
import time
import boto3
def distributions():
pages = boto3.client("cloudfront").get_paginator('list_distributions')
print(pages.paginate())
for page in pages.paginate():
print(page)
distribution_list = page.get('DistributionList')
for distribution in distribution_list['Items']:
yield distribution['Id']
def distribution_comments():
pages = boto3.client("cloudfront").get_paginator('list_distributions')
for page in pages.paginate():
distribution_list = page.get('DistributionList')
for distribution in distribution_list['Items']:
yield distribution['Comment']
def get_distribution_by_id(distribution_id):
cloudfront = boto3.client("cloudfront")
ret = cloudfront.get_distribution(Id=distribution_id)['Distribution']
ret['DistributionConfig']['Id'] = distribution_id
ret['DistributionConfig']['DomainName'] = ret['DomainName']
return [ret['DistributionConfig']]
def get_distribution_by_comment(comment):
pages = boto3.client("cloudfront").get_paginator('list_distributions')
ret = []
for page in pages.paginate():
distribution_list = page.get('DistributionList')
for distribution in distribution_list['Items']:
if comment == distribution['Comment']:
ret.append(distribution)
if not ret:
raise Exception("Failed to find distribution with comment " + comment)
else:
return ret
def hosted_zones():
pages = boto3.client('route53').get_paginator('list_hosted_zones')
for page in pages.paginate():
for hosted_zone in page.get('HostedZones', []):
yield hosted_zone
def upsert_cloudfront_records(args):
distributions = None
if args.distribution_id:
distributions = get_distribution_by_id(args.distribution_id)
else:
distributions = get_distribution_by_comment(args.distribution_comment)
zones = list(hosted_zones())
changes = {}
for distribution in distributions:
if 'Aliases' in distribution:
print("Upserting records for " + distribution['Id'] + " (" + distribution['Comment'] + ")")
for alias in distribution['Aliases']['Items']:
change = get_record_change(alias, distribution['DomainName'], distribution['Id'], zones)
if not change['HostedZoneId'] in changes:
changes[change['HostedZoneId']] = []
changes[change['HostedZoneId']].append(change['Change'])
route53 = boto3.client('route53')
requests = []
for req in changes:
requests.append(route53.change_resource_record_sets(HostedZoneId=req,
ChangeBatch={
'Changes': changes[req]
})['ChangeInfo'])
if args.wait:
not_synced_count = 1
while not_synced_count > 0:
not_synced_count = 0
for req in requests:
if not route53.get_change(Id=req['Id'])['ChangeInfo']['Status'] == 'INSYNC':
not_synced_count = not_synced_count + 1
if not_synced_count > 0:
print("Waiting for requests to sync - " + str(not_synced_count) + " not synced")
time.sleep(2)
else:
print(str(len(requests)) + " requests INSYNC")
def longest_matching_zone(alias, hosted_zones):
ret = {'Name': ''}
for zone in hosted_zones:
if (alias + ".").endswith(zone['Name']) and len(zone['Name']) > len(ret['Name']):
ret = zone
return ret
def get_record_change(alias, dns_name, distribution_id, hosted_zones):
zone = longest_matching_zone(alias, hosted_zones)
if zone:
print(alias + " => " + dns_name + "(" + distribution_id + ") in " + zone['Name'])
if alias + "." == zone['Name']:
type = "A"
else:
type = "CNAME"
change = {
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': alias,
'Type': type}
}
if type == "A":
change['ResourceRecordSet']['AliasTarget'] = {
'HostedZoneId': 'Z2FDTNDATAQYW2',
'DNSName': dns_name,
'EvaluateTargetHealth': False
}
else:
change['ResourceRecordSet']['ResourceRecords'] = [{
'Value': dns_name
}]
change['ResourceRecordSet']['TTL'] = 300
return {'HostedZoneId': zone['Id'], 'Change': change}
| 37.737589 | 104 | 0.607029 |
73f312d21e6fb6971fe47e26f6090204f1835f5b | 1,849 | py | Python | gwtarget/DESI_mainInjector/Main-Injector-master/python/kasen_modelspace.py | rknop/timedomain | d3e3c43dfbb9cadc150ea04024d9b4132cb9ca17 | [
"MIT"
] | 1 | 2021-06-18T05:25:27.000Z | 2021-06-18T05:25:27.000Z | gwtarget/DESI_mainInjector/Main-Injector-master/python/kasen_modelspace.py | MatthewPortman/timedomain | b9c6c2e6804d7dde56311d9402769be545d505d0 | [
"MIT"
] | null | null | null | gwtarget/DESI_mainInjector/Main-Injector-master/python/kasen_modelspace.py | MatthewPortman/timedomain | b9c6c2e6804d7dde56311d9402769be545d505d0 | [
"MIT"
] | null | null | null | import os
import numpy as np
from knlc import kn_brightness_estimate
def run_ap_mag_for_kasen_models (filter, distance, dist_err, days_since_burst,
kasen_fraction, data_dir="./", fast=False, doPlots=True) :
report_file = data_dir + "/kn_report"
if not fast :
knlc_dir = os.getenv("DESGW_DIR", "./")+ "/knlc/"
code = knlc_dir+"kn_brightness_estimate.py"
cmd = "python {} --distance {} --distance_err {} --time_delay {} ".format(
code, distance, dist_err, days_since_burst)
cmd = cmd + "--fraction {} ".format(kasen_fraction)
if doPlots :
cmd = cmd + "--magplot_file kn_mag_plot.png "
cmd = cmd + "--expplot_file kn_exp_plot.png "
cmd = cmd + "--report_file {} ".format(report_file)
os.system(cmd)
file = report_file + ".txt"
fd = open(file,"r")
for i in range(0,16): fd.readline()
line = fd.readline().split()
apparent_mag = dict()
apparent_mag["g"] = np.float(line[0])
apparent_mag["r"] = np.float(line[3])
apparent_mag["i"] = np.float(line[6])
apparent_mag["z"] = np.float(line[9])
ap_mag = apparent_mag[filter]
else :
kn_calc = kn_brightness_estimate.KNCalc(distance, dist_err, days_since_burst)
percentile_dict = kn_brightness_estimate.calc_mag_fractions(kn_calc.template_df_full)
cutoffs = kn_brightness_estimate.mags_of_percentile(
kasen_fraction, percentile_dict)
kn_brightness_estimate.make_output_csv(
np.linspace(0., 100., 101),
percentile_dict,
write_answer=True,
flt=filter,
fraction=kasen_fraction)
file = data_dir+"/answer_{}.txt".format(filter)
ap_mag = np.genfromtxt(file)
return ap_mag
| 39.340426 | 93 | 0.608437 |
73f33c018d551c0327e64f5c322c6b826b0969c3 | 1,868 | py | Python | Executor/lib/conn_device.py | DPham-Xyk/CoreNet | 4d36529ae6e51d5fa8cf95be58704ddab10a54c0 | [
"MIT"
] | null | null | null | Executor/lib/conn_device.py | DPham-Xyk/CoreNet | 4d36529ae6e51d5fa8cf95be58704ddab10a54c0 | [
"MIT"
] | null | null | null | Executor/lib/conn_device.py | DPham-Xyk/CoreNet | 4d36529ae6e51d5fa8cf95be58704ddab10a54c0 | [
"MIT"
] | null | null | null | import logging
import yaml
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
# Logging
logger = logging.getLogger(__name__)
class ConnDevice(object):
def __init__(self, config_path='config/devices.yaml'):
"""Common interface for connecting to Junos network devices.
:param config_path: Location of the credentials for each network device
:type config_path: str
"""
self.network_devices = {}
self.connected_devices = {}
self._import_network_devices(config_path)
def _import_network_devices(self, network_device_file):
"""Import the hostnames, username and password for each network device
:param config_path: Location of the credentials for each network device
:type config_path: str
"""
logger.debug('Loading network devices into JunosCollector')
with open(network_device_file, 'r') as f:
import_devices = yaml.load(f.read())
for device in import_devices['devices']:
self.network_devices[device['name']] = device
logger.debug('Imported credentials for %s', device['name'])
for _, device in self.network_devices.items():
self._connect_to_device(device)
def _connect_to_device(self, device):
"""Connects to the network device via Netconf
:param device: Contains the necessary information to connect to the device
:type device: dict
"""
try:
logger.debug('Connecting to %s', device['ip'])
dev = Device(host=device['ip'], user=device['user'], password=device['password'])
dev.open()
logger.info('Successfully connected to %s', device['ip'])
except ConnectError as e:
logger.error('%s', str(e))
self.connected_devices[device['name']] = dev
| 35.245283 | 93 | 0.650964 |
73f33df3382aa07570398b499fd966281c1dd7c0 | 2,527 | py | Python | src/null_player.py | c3n21/hidamari | 776e9ca0415a80759fdf9feab53d883b56d868d5 | [
"Apache-2.0"
] | 89 | 2020-12-29T02:10:47.000Z | 2022-03-31T05:50:41.000Z | src/null_player.py | c3n21/hidamari | 776e9ca0415a80759fdf9feab53d883b56d868d5 | [
"Apache-2.0"
] | 42 | 2020-12-29T02:14:03.000Z | 2022-03-29T08:34:21.000Z | src/null_player.py | c3n21/hidamari | 776e9ca0415a80759fdf9feab53d883b56d868d5 | [
"Apache-2.0"
] | 13 | 2020-12-29T02:10:49.000Z | 2022-03-05T21:45:47.000Z | import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk
from base_player import BasePlayer
from commons import *
class NullPlayer(BasePlayer):
def __init__(self, config):
super().__init__(config)
self.start_all_monitors()
self.menu = self._build_context_menu()
for child in self.menu.get_children():
# Remove unsupported action
if child.get_label() not in ["Show Hidamari", "Quit Hidamari", "GNOME Settings"]:
self.menu.remove(child)
self.menu.show_all()
# Welcome dialog
dialog = Gtk.MessageDialog(text="Welcome to Hidamari 🤗", message_type=Gtk.MessageType.INFO,
secondary_text="<b>Right click</b> on the desktop to access the Main Menu",
secondary_use_markup=True,
buttons=Gtk.ButtonsType.OK)
dialog.run()
dialog.destroy()
@property
def mode(self):
return self.config[CONFIG_KEY_MODE]
@mode.setter
def mode(self, mode):
self.config[CONFIG_KEY_MODE] = mode
@property
def data_source(self):
return self.config[CONFIG_KEY_DATA_SOURCE]
@data_source.setter
def data_source(self, data_source: str):
self.config[CONFIG_KEY_DATA_SOURCE] = data_source
@property
def volume(self):
return self.config[CONFIG_KEY_VOLUME]
@volume.setter
def volume(self, volume):
self.config[CONFIG_KEY_VOLUME] = volume
@property
def is_mute(self):
return self.config[CONFIG_KEY_MUTE]
@property
def is_playing(self):
return False
def pause_playback(self):
pass
def start_playback(self):
pass
def start_all_monitors(self):
for monitor in self.monitors:
if monitor.is_initialized:
continue
# Window settings
window = Gtk.Window()
window.set_type_hint(Gdk.WindowTypeHint.DESKTOP)
window.set_size_request(monitor.width, monitor.height)
window.move(monitor.x, monitor.y)
# Button event
window.connect("button-press-event", self._on_button_press_event)
window.show_all()
window.set_opacity(0.0)
monitor.initialize(window)
def _on_monitor_added(self, _, gdk_monitor, *args):
super(NullPlayer, self)._on_monitor_added(_, gdk_monitor, *args)
self.start_all_monitors()
| 28.393258 | 110 | 0.617333 |
73f355fa31f66730c6f5e10e7354ad9d4e5129c8 | 15,663 | py | Python | src/hades/bin/dhcp_script.py | agdsn/hades | 78782831b840188b14e37c0673a6d6e9712f64ce | [
"MIT"
] | 8 | 2015-04-27T00:41:03.000Z | 2021-11-14T17:15:26.000Z | src/hades/bin/dhcp_script.py | agdsn/hades | 78782831b840188b14e37c0673a6d6e9712f64ce | [
"MIT"
] | 84 | 2015-04-27T12:15:00.000Z | 2021-11-28T19:06:23.000Z | src/hades/bin/dhcp_script.py | agdsn/hades | 78782831b840188b14e37c0673a6d6e9712f64ce | [
"MIT"
] | 4 | 2015-11-14T16:20:40.000Z | 2017-09-05T00:27:49.000Z | from __future__ import annotations
import argparse
import codecs
import grp
import itertools
import logging
import os
import pwd
from contextlib import closing
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, TypeVar, Sequence, TextIO, \
Mapping
import netaddr
from sqlalchemy import text
from sqlalchemy.engine.base import Connection, Engine
from sqlalchemy.engine.result import RowProxy
from hades import constants
from hades.common.cli import (
ArgumentParser,
parser as parent_parser,
setup_cli_logging,
)
from hades.common.db import (
auth_dhcp_lease,
create_engine,
get_all_auth_dhcp_leases,
)
from hades.common.privileges import drop_privileges
from hades.config.loader import load_config
logger = logging.getLogger(__name__)
def engine_from_config(filename: str) -> Engine:
config = load_config(filename)
engine = create_engine(config, isolation_level="SERIALIZABLE")
return engine
def generate_leasefile_lines(
leases: Iterable[
Tuple[
datetime,
netaddr.EUI,
netaddr.IPAddress,
Optional[str],
Optional[bytes],
]
]
) -> str:
"""
Generate lines in dnsmasq leasefile format from an iterable.
:param leases: An iterable that yields (ExpiresAt, MAC, IPAddress,
Hostname, ClientID)-tuples
:return: An iterable of strings
"""
for expires_at, mac, ip, hostname, client_id in leases:
mac = netaddr.EUI(mac)
mac.dialect = netaddr.mac_unix_expanded
if client_id is None:
client_id = "*"
else:
it = iter(client_id.hex())
client_id = ":".join(a + b for a, b in zip(it, it))
yield "{expires_at:d} {mac} {ip} {hostname} {client_id}\n".format(
expires_at=int(expires_at.timestamp()),
mac=mac,
ip=ip,
hostname=hostname if hostname is not None else "*",
client_id=client_id,
)
# noinspection PyUnusedLocal
def print_leases(
args,
context: Context,
engine: Engine,
) -> int:
"""Print all leases in dnsmasq leasefile format"""
connection = engine.connect()
with connection.begin():
leases = get_all_auth_dhcp_leases(connection)
context.stdout.writelines(generate_leasefile_lines(leases))
return os.EX_OK
def get_env_safe(environ: Mapping[str, str], name: str) -> Optional[str]:
"""
Try to get a string value from the environment and replace illegal
characters using backslashreplace.
See `here http://lucumr.pocoo.org/2013/7/2/the-updated-guide-to-unicode/`_
for details.
:param environ:
:param name:
:return:
"""
value = environ.get(name, None)
if value is not None:
value = value.encode("utf-8", "backslashreplace").decode("utf-8")
return value
T = TypeVar("T")
def obtain_and_convert(
environ: Mapping[str, str],
name: str,
func: Callable[[Any], T],
) -> Optional[T]:
"""
Obtain a value from the environment and try to convert it using a given
function.
"""
value = get_env_safe(environ, name)
if value is None:
return value
try:
return func(value)
except ValueError as e:
raise ValueError(
"Environment variable {} contains an illegal value {}".format(
name, value
)
) from e
def obtain_user_classes(environ: Mapping[str, str]) -> str:
"""Gather all user classes from environment variables."""
for number in itertools.count():
user_class = get_env_safe(environ, "DNSMASQ_USER_CLASS" + str(number))
if user_class is None:
return
yield user_class
def obtain_tuple(
environ: Mapping[str, str],
name: str,
sep: str,
func: Callable[[Any], T] = lambda x: x,
) -> Optional[Tuple[T]]:
"""Obtain a tuple of values from the environment"""
value = get_env_safe(environ, name)
if value is not None:
try:
value = tuple(func(v) for v in value.split(sep) if v)
except ValueError as e:
raise ValueError(
"Environment variable {} contains illegal value {}".format(
name, value
)
) from e
return value
@dataclass
class LeaseArguments:
mac: netaddr.EUI
ip: netaddr.IPAddress
hostname: Optional[str]
@classmethod
def from_anonymous_args(cls, args):
return cls(
mac=args.mac,
ip=args.ip,
hostname=args.hostname,
)
def obtain_lease_info(
args: LeaseArguments,
context: Context,
*,
missing_as_none: bool,
) -> Dict[str, Any]:
"""Obtain lease information from the CLI arguments and the environment.
The IPAddress, MAC, Tags, Client-ID and ExpiresAt keys are always present
in the returned dictionary, because these values should be known by the
the DHCP Server during every client interaction. The Hostname key is present
if the ``DNSMASQ_OLD_HOSTNAME`` environment variable is available.
For other values, the ``missing_as_none`` parameter specifies if a missing
environment variable should result in the corresponding key being present
with value of None in the resulting dict or if the key should be absent.
"""
expires_at = obtain_and_convert(context.environ, "DNSMASQ_LEASE_EXPIRES", int)
time_remaining = obtain_and_convert(context.environ, "DNSMASQ_TIME_REMAINING", int)
if time_remaining is None:
time_remaining = 0
if expires_at is None:
now = datetime.utcnow().replace(tzinfo=timezone.utc)
expires_at = now + timedelta(seconds=time_remaining)
else:
expires_at = datetime.utcfromtimestamp(expires_at).replace(
tzinfo=timezone.utc
)
client_id = context.environb.get(b"DNSMASQ_CLIENT_ID")
if client_id is not None:
try:
client_id = codecs.decode(client_id.replace(b":", b""), "hex")
except ValueError as e:
raise ValueError(
"Environment variable DNSMASQ_CLIENT_ID contains "
f"illegal value {context.environ.get('DNSMASQ_CLIENT_ID')}"
) from e
values = {
"IPAddress": args.ip,
"MAC": args.mac,
"ClientID": client_id,
"ExpiresAt": expires_at,
}
def set_value(key, value):
if value is not None or missing_as_none:
values[key] = value
hostname = args.hostname
if hostname is not None or "DNSMASQ_OLD_HOSTNAME" in context.environ:
values["Hostname"] = hostname
set_value(
"SuppliedHostname", get_env_safe(context.environ, "DNSMASQ_SUPPLIED_HOSTNAME")
)
set_value("Tags", obtain_tuple(context.environ, "DNSMASQ_TAGS", " "))
set_value("Domain", get_env_safe(context.environ, "DNSMASQ_DOMAIN"))
set_value("CircuitID", context.environb.get(b"DNSMASQ_CIRCUIT_ID"))
set_value("SubscriberID", context.environb.get(b"DNSMASQ_SUBSCRIBER_ID"))
set_value("RemoteID", context.environb.get(b"DNSMASQ_REMOTE_ID"))
set_value("VendorClass", get_env_safe(context.environ, "DNSMASQ_VENDOR_CLASS"))
user_classes = tuple(obtain_user_classes(context.environ))
user_classes = user_classes if user_classes != () else None
set_value("UserClasses", user_classes)
set_value(
"RelayIPAddress",
obtain_and_convert(context.environ, "DNSMASQ_RELAY_ADDRESS", netaddr.IPAddress),
)
set_value(
"RequestedOptions",
obtain_tuple(context.environ, "DNSMASQ_REQUESTED_OPTIONS", ",", int),
)
return values
def query_lease_for_update(
connection: Connection,
ip: netaddr.IPAddress,
) -> Optional[RowProxy]:
query = auth_dhcp_lease.select(
auth_dhcp_lease.c.IPAddress == ip
).with_for_update()
with closing(connection.execute(query)) as result:
row = result.fetchone()
if result.fetchone() is not None:
logger.warning(
"Querying database for lease with IP %s "
"returned more than one row",
ip,
)
return row
def perform_lease_update(
connection: Connection,
ip: netaddr.IPAddress,
mac: netaddr.EUI,
old: RowProxy,
new: Dict[str, Any],
):
changes = {k: v for k, v in new.items() if old[k] != v}
if not changes:
return
query = auth_dhcp_lease.update(values=changes).where(
auth_dhcp_lease.c.IPAddress == ip
)
result = connection.execute(query)
if result.rowcount != 1:
logger.warning(
"Unexpected row count %d while updating lease for IP %s "
"and MAC %s",
result.rowcount,
ip,
mac,
)
return result
def add_lease(
args,
context: Context,
engine: Engine,
) -> int:
connection = engine.connect()
values = obtain_lease_info(
LeaseArguments.from_anonymous_args(args),
context,
missing_as_none=True
)
values = {k: (v if v is not None else text('DEFAULT'))
for k, v in values.items()}
ip, mac = values["IPAddress"], values["MAC"]
logger.debug(
"Inserting new lease for IP %s and MAC %s",
ip,
mac,
)
with connection.begin():
# TODO: Use INSERT ON CONFLICT UPDATE on newer SQLAlchemy (>= 1.1)
old_values = query_lease_for_update(connection, ip)
if old_values is None:
connection.execute(auth_dhcp_lease.insert(values=values))
else:
logger.warning("Lease for IP %s and MAC %s already exists", ip, mac)
perform_lease_update(connection, ip, mac, old_values, values)
return os.EX_OK
def delete_lease(
args,
context: Context,
engine: Engine,
) -> int:
connection = engine.connect()
values = obtain_lease_info(
LeaseArguments.from_anonymous_args(args),
context,
missing_as_none=False
)
ip, mac = values["IPAddress"], values["MAC"]
logger.debug("Deleting lease for IP %s and MAC %s", ip, mac)
query = auth_dhcp_lease.delete().where(auth_dhcp_lease.c.IPAddress == ip)
with connection.begin():
result = connection.execute(query)
if result.rowcount != 1:
logger.warning(
"Unexpected row count %d while deleting lease for IP %s and MAC %s",
result.rowcount,
ip,
mac,
)
return os.EX_OK
def update_lease(
args,
context: Context,
engine: Engine,
) -> int:
connection = engine.connect()
values = obtain_lease_info(
LeaseArguments.from_anonymous_args(args),
context,
missing_as_none=False
)
values.setdefault('UpdatedAt', text('DEFAULT'))
ip, mac = values["IPAddress"], values["MAC"]
logger.debug("Updating lease for IP %s and MAC %s", ip, mac)
with connection.begin():
# TODO: Use INSERT ON CONFLICT UPDATE on newer SQLAlchemy (>= 1.1)
old_values = query_lease_for_update(connection, ip)
if old_values is None:
connection.execute(auth_dhcp_lease.insert(values=values))
else:
perform_lease_update(connection, ip, mac, old_values, values)
return os.EX_OK
# noinspection PyUnusedLocal
def do_nothing(
args,
context: Context,
engine: Engine,
) -> int:
logger.warning("Unknown command %s", args.original_command)
return os.EX_OK
def add_lease_command(sub_parsers, action, action_help):
sub_parser = sub_parsers.add_parser(action, help=action_help)
sub_parser.add_argument("mac", type=netaddr.EUI, help="MAC address")
sub_parser.add_argument("ip", type=netaddr.IPAddress, help="IP address")
sub_parser.add_argument("hostname", nargs="?", help="Hostname")
return sub_parser
def create_parser(standalone: bool = True) -> ArgumentParser:
class Parser(ArgumentParser):
def parse_known_args(self, args=None, namespace=None):
if namespace is None:
namespace = argparse.Namespace()
# The dnsmasq man page states, that the dhcp-script should handle
# unknown commands, we therefore have to convert unknown commands
# into the no-op command, for argparse to parse it properly.
# argparse uses the type parameter of actions to convert values
# before parsing it, but in the case of sub-parsers it parses all
# positional arguments.
def type_func(x):
commands.type = None
namespace.original_command = x
return x if x in commands.choices else "no-op"
commands.type = type_func
return super().parse_known_args(args, namespace)
def exit(self, *a, **kw):
if standalone:
return super().exit(*a, **kw)
logger.warning("Unexpected call to argparsers exit(args=%r, kwargs=%r)", a, kw)
parser = Parser(
description="dnsmasq leasefile dhcp-script to store leases in the "
"Hades database",
parents=[parent_parser] if standalone else [],
exit_on_error=standalone,
)
commands = parser.add_subparsers(metavar="COMMAND", dest="command")
commands.required = True
commands.add_parser(
"init",
help="Print all leases in dnsmasq leasefile format"
)
commands.add_parser("no-op", help=argparse.SUPPRESS)
add_lease_command(commands, "add", "Add a lease")
add_lease_command(commands, "del", "Delete a lease")
add_lease_command(commands, "old", "Update a lease")
return parser
@dataclass
class Context:
"""Information relevant to the communication of the program"""
stdin: TextIO
stdout: TextIO
stderr: TextIO
environ: Mapping[str, str]
environb: Mapping[bytes, bytes]
def main():
import sys
logger.warning("Running in standalone mode. This is meant for development purposes only.")
# When dnsmasq starts, it calls init before dropping privileges
if os.geteuid() == 0:
try:
passwd = pwd.getpwnam(constants.AUTH_DHCP_USER)
except KeyError:
logger.critical("No such user: {}".format(constants.AUTH_DHCP_USER))
return os.EX_NOUSER
try:
group = grp.getgrgid(passwd.pw_gid)
except KeyError:
logger.critical("No such group: {:d}".format(passwd.pw_gid))
return os.EX_NOUSER
drop_privileges(passwd, group)
parser = create_parser(standalone=True)
args = parser.parse_args()
setup_cli_logging(parser.prog, args)
engine = engine_from_config(args.config)
return dispatch_commands(
args,
Context(
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
environ=os.environ,
environb=os.environb,
),
engine,
)
def dispatch_commands(
args,
context: Context,
engine: Engine,
) -> int:
""""""
# type: Dict[str, Callable[[Any, Context, Engine], int]]
funcs = {
"init": print_leases,
"add": add_lease,
"del": delete_lease,
"old": update_lease,
"no-op": do_nothing,
}
try:
return funcs[args.command](args, context, engine)
except ValueError as e:
logger.fatal(str(e), exc_info=e)
return os.EX_USAGE
if __name__ == "__main__":
import sys
sys.exit(main())
| 30.711765 | 95 | 0.634297 |
73f3902b17cc1a34e9178bd9c760c6d389418ea4 | 734 | py | Python | modelConfigs/ModelN.py | jesisca-tandi/video-actions-classification | c90aafc9d212982368565fc373ac7fd0ff08e1d6 | [
"MIT"
] | null | null | null | modelConfigs/ModelN.py | jesisca-tandi/video-actions-classification | c90aafc9d212982368565fc373ac7fd0ff08e1d6 | [
"MIT"
] | null | null | null | modelConfigs/ModelN.py | jesisca-tandi/video-actions-classification | c90aafc9d212982368565fc373ac7fd0ff08e1d6 | [
"MIT"
] | null | null | null | from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv1D, Conv2D, MaxPooling2D, Embedding, LSTM, TimeDistributed, Masking, Lambda, GRU, Bidirectional, LeakyReLU
from keras.optimizers import Adam
def createModel(modelName='ModelN'):
opt = Adam(lr=0.005)
model = Sequential()
model.add(Masking(mask_value=-1, input_shape=(None, 400)))
model.add(Bidirectional(LSTM(128, input_shape= (None, 400), return_sequences = True)))
model.add(Bidirectional(LSTM(128, return_sequences = True)))
model.add(TimeDistributed(Dense(48, activation='softmax')))
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model, modelName | 40.777778 | 160 | 0.747956 |
73f3b3c2c4a8524c61972a7a6fd62caf64dad099 | 11,641 | py | Python | avalon/mongodb.py | simonebarbieri/avalon-core | cfd4191e364b47de7364096f45d9d9d9a901692a | [
"MIT"
] | null | null | null | avalon/mongodb.py | simonebarbieri/avalon-core | cfd4191e364b47de7364096f45d9d9d9a901692a | [
"MIT"
] | null | null | null | avalon/mongodb.py | simonebarbieri/avalon-core | cfd4191e364b47de7364096f45d9d9d9a901692a | [
"MIT"
] | null | null | null | import os
import sys
import time
import functools
import logging
import pymongo
import ctypes
from uuid import uuid4
from avalon import schema
def extract_port_from_url(url):
if sys.version_info[0] == 2:
from urlparse import urlparse
else:
from urllib.parse import urlparse
parsed_url = urlparse(url)
if parsed_url.scheme is None:
_url = "mongodb://{}".format(url)
parsed_url = urlparse(_url)
return parsed_url.port
def requires_install(func):
func_obj = getattr(func, "__self__", None)
@functools.wraps(func)
def decorated(*args, **kwargs):
if func_obj is not None:
_obj = func_obj
else:
_obj = args[0]
if not _obj.is_installed():
if _obj.auto_install:
_obj.install()
else:
raise IOError(
"'{}.{}()' requires to run install() first".format(
_obj.__class__.__name__, func.__name__
)
)
return func(*args, **kwargs)
return decorated
def auto_reconnect(func):
"""Handling auto reconnect in 3 retry times"""
retry_times = 3
reconnect_msg = "Reconnecting..."
func_obj = getattr(func, "__self__", None)
@functools.wraps(func)
def decorated(*args, **kwargs):
if func_obj is not None:
_obj = func_obj
else:
_obj = args[0]
for retry in range(1, retry_times + 1):
try:
return func(*args, **kwargs)
except pymongo.errors.AutoReconnect:
if hasattr(_obj, "log"):
_obj.log.warning(reconnect_msg)
else:
print(reconnect_msg)
if retry >= retry_times:
raise
time.sleep(0.1)
return decorated
SESSION_CONTEXT_KEYS = (
# Root directory of projects on disk
"AVALON_PROJECTS",
# Name of current Project
"AVALON_PROJECT",
# Name of current Asset
"AVALON_ASSET",
# Name of current silo
"AVALON_SILO",
# Name of current task
"AVALON_TASK",
# Name of current app
"AVALON_APP",
# Path to working directory
"AVALON_WORKDIR",
# Optional path to scenes directory (see Work Files API)
"AVALON_SCENEDIR"
)
def session_data_from_environment(context_keys=False):
session_data = {}
if context_keys:
for key in SESSION_CONTEXT_KEYS:
value = os.environ.get(key)
session_data[key] = value or ""
else:
for key in SESSION_CONTEXT_KEYS:
session_data[key] = None
for key, default_value in (
# Name of current Config
# TODO(marcus): Establish a suitable default config
("AVALON_CONFIG", "no_config"),
# Name of Avalon in graphical user interfaces
# Use this to customise the visual appearance of Avalon
# to better integrate with your surrounding pipeline
("AVALON_LABEL", "Avalon"),
# Used during any connections to the outside world
("AVALON_TIMEOUT", "1000"),
# Address to Asset Database
("AVALON_MONGO", "mongodb://localhost:27017"),
# Name of database used in MongoDB
("AVALON_DB", "avalon"),
# Address to Sentry
("AVALON_SENTRY", None),
# Address to Deadline Web Service
# E.g. http://192.167.0.1:8082
("AVALON_DEADLINE", None),
# Enable features not necessarily stable, at the user's own risk
("AVALON_EARLY_ADOPTER", None),
# Address of central asset repository, contains
# the following interface:
# /upload
# /download
# /manager (optional)
("AVALON_LOCATION", "http://127.0.0.1"),
# Boolean of whether to upload published material
# to central asset repository
("AVALON_UPLOAD", None),
# Generic username and password
("AVALON_USERNAME", "avalon"),
("AVALON_PASSWORD", "secret"),
# Unique identifier for instances in working files
("AVALON_INSTANCE_ID", "avalon.instance"),
("AVALON_CONTAINER_ID", "avalon.container"),
# Enable debugging
("AVALON_DEBUG", None)
):
value = os.environ.get(key) or default_value
if value is not None:
session_data[key] = value
return session_data
class AvalonMongoConnection:
_mongo_client = None
_is_installed = False
_databases = {}
log = logging.getLogger("AvalonMongoConnection")
@classmethod
def register_database(cls, dbcon):
if dbcon.id in cls._databases:
return
cls._databases[dbcon.id] = {
"object": dbcon,
"installed": False
}
@classmethod
def database(cls):
return cls._mongo_client[os.environ["AVALON_DB"]]
@classmethod
def mongo_client(cls):
return cls._mongo_client
@classmethod
def install(cls, dbcon):
if not cls._is_installed or cls._mongo_client is None:
cls._mongo_client = cls.create_connection()
cls._is_installed = True
cls.register_database(dbcon)
cls._databases[dbcon.id]["installed"] = True
cls.check_db_existence()
@classmethod
def is_installed(cls, dbcon):
info = cls._databases.get(dbcon.id)
if not info:
return False
return cls._databases[dbcon.id]["installed"]
@classmethod
def _uninstall(cls):
try:
cls._mongo_client.close()
except AttributeError:
pass
cls._is_installed = False
cls._mongo_client = None
@classmethod
def uninstall(cls, dbcon, force=False):
if force:
for key in cls._databases:
cls._databases[key]["object"].uninstall()
cls._uninstall()
return
cls._databases[dbcon.id]["installed"] = False
cls.check_db_existence()
any_is_installed = False
for key in cls._databases:
if cls._databases[key]["installed"]:
any_is_installed = True
break
if not any_is_installed:
cls._uninstall()
@classmethod
def check_db_existence(cls):
items_to_pop = set()
for db_id, info in cls._databases.items():
obj = info["object"]
# TODO check if should check for 1 or more
cls.log.info(ctypes.c_long.from_address(id(obj)).value)
if ctypes.c_long.from_address(id(obj)).value == 1:
items_to_pop.add(db_id)
for db_id in items_to_pop:
cls._databases.pop(db_id, None)
@classmethod
def create_connection(cls):
timeout = int(os.environ["AVALON_TIMEOUT"])
mongo_url = os.environ["AVALON_MONGO"]
kwargs = {
"host": mongo_url,
"serverSelectionTimeoutMS": timeout
}
port = extract_port_from_url(mongo_url)
if port is not None:
kwargs["port"] = int(port)
mongo_client = pymongo.MongoClient(**kwargs)
for _retry in range(3):
try:
t1 = time.time()
mongo_client.server_info()
except Exception:
cls.log.warning("Retrying...")
time.sleep(1)
timeout *= 1.5
else:
break
else:
raise IOError((
"ERROR: Couldn't connect to {} in less than {:.3f}ms"
).format(mongo_url, timeout))
cls.log.info("Connected to {}, delay {:.3f}s".format(
mongo_url, time.time() - t1
))
return mongo_client
class AvalonMongoDB:
def __init__(self, session=None, auto_install=True):
self._id = uuid4()
self._database = None
self.auto_install = auto_install
if session is None:
session = session_data_from_environment(context_keys=False)
self.Session = session
self.log = logging.getLogger(self.__class__.__name__)
def __getattr__(self, attr_name):
attr = None
if self.is_installed() and self.auto_install:
self.install()
if self.is_installed():
attr = getattr(
self._database[self.active_project()],
attr_name,
None
)
if attr is None:
# Reraise attribute error
return self.__getattribute__(attr_name)
# Decorate function
if callable(attr):
attr = auto_reconnect(attr)
return attr
@property
def mongo_client(self):
AvalonMongoConnection.mongo_client()
@property
def id(self):
return self._id
@property
def database(self):
if not self.is_installed() and self.auto_install:
self.install()
if self.is_installed():
return self._database
raise IOError(
"'{}.database' requires to run install() first".format(
self.__class__.__name__
)
)
def is_installed(self):
return AvalonMongoConnection.is_installed(self)
def install(self):
"""Establish a persistent connection to the database"""
if self.is_installed():
return
AvalonMongoConnection.install(self)
self._database = AvalonMongoConnection.database()
def uninstall(self):
"""Close any connection to the database"""
AvalonMongoConnection.uninstall(self)
self._database = None
@requires_install
def active_project(self):
"""Return the name of the active project"""
return self.Session["AVALON_PROJECT"]
@requires_install
@auto_reconnect
def projects(self):
"""List available projects
Returns:
list of project documents
"""
for project_name in self._database.collection_names():
if project_name in ("system.indexes",):
continue
# Each collection will have exactly one project document
document = self._database[project_name].find_one({
"type": "project"
})
if document is not None:
yield document
@auto_reconnect
def insert_one(self, item, *args, **kwargs):
assert isinstance(item, dict), "item must be of type <dict>"
schema.validate(item)
return self._database[self.active_project()].insert_one(
item, *args, **kwargs
)
@auto_reconnect
def insert_many(self, items, *args, **kwargs):
# check if all items are valid
assert isinstance(items, list), "`items` must be of type <list>"
for item in items:
assert isinstance(item, dict), "`item` must be of type <dict>"
schema.validate(item)
return self._database[self.active_project()].insert_many(
items, *args, **kwargs
)
def parenthood(self, document):
assert document is not None, "This is a bug"
parents = list()
while document.get("parent") is not None:
document = self.find_one({"_id": document["parent"]})
if document is None:
break
if document.get("type") == "hero_version":
_document = self.find_one({"_id": document["version_id"]})
document["data"] = _document["data"]
parents.append(document)
return parents
| 27.585308 | 74 | 0.577012 |
73f3bd198dd98091e99fdec05b1c9b2d540b9dbe | 1,620 | py | Python | python/__init__.py | aska-0096/iGEMMgen | cff8507355d86e47f5b099cd9b8a81d94fab93d7 | [
"MIT"
] | 20 | 2020-04-14T14:39:24.000Z | 2022-02-23T19:37:04.000Z | python/__init__.py | aska-0096/iGEMMgen | cff8507355d86e47f5b099cd9b8a81d94fab93d7 | [
"MIT"
] | 38 | 2020-04-21T12:23:07.000Z | 2021-12-31T02:26:21.000Z | python/__init__.py | ROCmSoftwarePlatform/MISA | 7ced4086588377dabb347d8a21d7d3a704a28188 | [
"MIT"
] | 9 | 2020-04-20T06:34:16.000Z | 2022-02-23T19:37:06.000Z | ################################################################################
#
# MIT License
#
# Copyright (c) 2020-2021 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
# igemm module
import sys
from .codegen import *
from .igemm import *
from .operations import *
from .codegen_driver import *
from .sequence_driver import *
from .host_driver import *
if sys.hexversion < 0x30600f0:
print("must use python 3.6+. current is {}".format(sys.version))
sys.exit(-1)
| 41.538462 | 81 | 0.676543 |
73f3c82aa520a47f00c263255f2e453034ef9b7f | 5,040 | py | Python | sparv/modules/hunpos/morphtable_hist.py | heatherleaf/sparv-pipeline | 0fe5f27d0d82548ecc6cb21a69289668aac54cf1 | [
"MIT"
] | 17 | 2018-09-21T07:01:45.000Z | 2022-02-24T23:26:49.000Z | sparv/modules/hunpos/morphtable_hist.py | heatherleaf/sparv-pipeline | 0fe5f27d0d82548ecc6cb21a69289668aac54cf1 | [
"MIT"
] | 146 | 2018-11-13T19:13:25.000Z | 2022-03-31T09:57:56.000Z | sparv/modules/hunpos/morphtable_hist.py | heatherleaf/sparv-pipeline | 0fe5f27d0d82548ecc6cb21a69289668aac54cf1 | [
"MIT"
] | 5 | 2019-02-14T00:50:38.000Z | 2021-03-29T15:37:41.000Z | """Make morphtable for Swedish historical resources."""
import re
import sparv.util as util
from sparv import Model, ModelOutput, modelbuilder
# Constants
SALDO_TO_SUC = util.tagsets.mappings["saldo_to_suc"]
SALDO_TO_SUC["pm"] = {"PM.NOM"}
SALDO_TO_SUC["nl invar"] = {"NL.NOM"}
@modelbuilder("Hunpos morphtable for Swedish historical resources", language=["swe"])
def hist_morphtable(out: ModelOutput = ModelOutput("hunpos/hist/dalinm-swedberg_saldo_suc-tags.morphtable"),
swedberg: Model = Model("hunpos/hist/swedberg-gender.hunpos"),
dalin: Model = Model("hunpos/hist/dalinm.hunpos"),
saldosuc_morphtable: Model = Model("hunpos/saldo_suc-tags.morphtable")):
"""Read files and make a morphtable together with the information from SALDO (saldosuc_morphtable).
Args:
out (str, optional): Resulting morphtable file to be written.
Defaults to ModelOutput("hunpos/hist/dalinm-swedberg_saldo_suc-tags.morphtable").
swedberg (str, optional): Wordlist from Swedberg and corresponding SALDO MSD-tags.
Defaults to Model("hunpos/hist/swedberg-gender.hunpos").
dalin (str, optional): Wordlist from Dalin and corresponding SALDO MSD-tags.
Defaults to Model("hunpos/hist/dalinm.hunpos").
saldosuc_morphtable (str, optional): SALDO Hunpos morphtable.
Defaults to Model("hunpos/saldo_suc-tags.morphtable").
"""
words = {}
_read_saldosuc(words, saldosuc_morphtable.path)
for fil in [dalin, swedberg]:
for line in open(fil.path, encoding="utf-8").readlines():
if not line.strip():
continue
xs = line.split("\t")
word, msd = xs[0].strip(), xs[1].strip()
if " " in word:
if msd.startswith("nn"): # We assume that the head of a noun mwe is the last word
word = word.split()[-1]
if msd.startswith("vb"): # We assume that the head of a verbal mwe is the first word
word = word.split()[0]
# If the tag is not present, we try to translate it anyway
suc = SALDO_TO_SUC.get(msd, "")
if not suc:
suc = _force_parse(msd)
if suc:
words.setdefault(word.lower(), set()).update(suc)
words.setdefault(word.title(), set()).update(suc)
with open(out.path, encoding="UTF-8", mode="w") as out:
for w, ts in list(words.items()):
line = ("\t".join([w] + list(ts)) + "\n")
out.write(line)
def _read_saldosuc(words, saldosuc_morphtable):
for line in open(saldosuc_morphtable, encoding="utf-8").readlines():
xs = line.strip().split("\t")
words.setdefault(xs[0], set()).update(set(xs[1:]))
def _force_parse(msd):
# This is a modification of _make_saldo_to_suc in utils.tagsets.py
params = msd.split()
# try ignoring gender, m/f => u
for i, param in enumerate(params):
if param.strip() in ["m", "f"]:
params[i] = "u"
new_suc = SALDO_TO_SUC.get(" ".join(params), "")
if new_suc:
# print "Add translation", msd,new_suc
SALDO_TO_SUC[msd] = new_suc
return new_suc
# try changing place: nn sg n indef nom => nn n sg indef nom
if params[0] == "nn":
new_suc = SALDO_TO_SUC.get(" ".join([params[0], params[2], params[1], params[3], params[4]]), "")
if new_suc:
# print "Add translation", msd,new_suc
SALDO_TO_SUC[msd] = new_suc
return new_suc
# try adding case info: av pos def pl => av pos def pl nom/gen
if params[0] == "av":
new_suc = SALDO_TO_SUC.get(" ".join(params + ["nom"]), set())
new_suc.update(SALDO_TO_SUC.get(" ".join(params + ["gen"]), set()))
if new_suc:
# print "Add translation", msd,new_suc
SALDO_TO_SUC[msd] = new_suc
return new_suc
paramstr = " ".join(util.tagsets.mappings["saldo_params_to_suc"].get(prm, prm.upper()) for prm in params)
for (pre, post) in util.tagsets.tagmappings._suc_tag_replacements:
m = re.match(pre, paramstr)
if m:
break
if m is None:
return set()
sucfilter = m.expand(post).replace(" ", r"\.").replace("+", r"\+")
new_suc = set(suctag for suctag in util.tagsets.tags["suc_tags"] if re.match(sucfilter, suctag))
SALDO_TO_SUC[msd] = new_suc
return new_suc
@modelbuilder("Swedberg wordlist", language=["swe"])
def download_swedberg_wordlist(out: ModelOutput = ModelOutput("hunpos/hist/swedberg-gender.hunpos")):
"""Download Swedberg wordlist."""
out.download("https://github.com/spraakbanken/sparv-models/raw/master/hunpos/hist/swedberg-gender.hunpos")
@modelbuilder("Dalin wordlist", language=["swe"])
def download_dalin_wordlist(out: ModelOutput = ModelOutput("hunpos/hist/dalinm.hunpos")):
"""Download Dalin wordlist."""
out.download("https://github.com/spraakbanken/sparv-models/raw/master/hunpos/hist/dalinm.hunpos")
| 41.652893 | 110 | 0.628175 |
73f3e906e1322bca38dc0df14f78b58884386c45 | 2,958 | py | Python | components/shelly/switch.py | tefinger/hass | 30b58902b887bdfc39f65783cbf964fb56bfb1ca | [
"MIT"
] | null | null | null | components/shelly/switch.py | tefinger/hass | 30b58902b887bdfc39f65783cbf964fb56bfb1ca | [
"MIT"
] | null | null | null | components/shelly/switch.py | tefinger/hass | 30b58902b887bdfc39f65783cbf964fb56bfb1ca | [
"MIT"
] | null | null | null | """
Shelly platform for the switch component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/shelly/
"""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.helpers.entity import ToggleEntity
# from .sensor import ShellySensor
from . import (ShellyDevice, get_device_from_hass,
ShellyBlock, get_block_from_hass)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, _config, add_devices, discovery_info=None):
"""Setup the Shelly Switch platform."""
if 'firmware' in discovery_info:
block = get_block_from_hass(hass, discovery_info)
add_devices([ShellyFirmwareUpdate(block, hass)])
return
dev = get_device_from_hass(hass, discovery_info)
add_devices([ShellySwitch(dev, hass)])
class ShellySwitch(ShellyDevice, SwitchDevice):
"""Representation of an Shelly Switch."""
def __init__(self, dev, hass):
"""Initialize an ShellySwitch."""
ShellyDevice.__init__(self, dev, hass)
self._state = None
self.update()
@property
def current_power_w(self):
"""Return the current power usage in W."""
if hasattr(self._dev, 'sensorValues'):
return self._dev.sensorValues['watt']
return None
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return None
@property
def is_on(self):
"""Get device state"""
return self._state
def turn_on(self, **_kwargs):
"""Turn on device"""
self._dev.turn_on()
def turn_off(self, **_kwargs):
"""Turn off device"""
self._dev.turn_off()
def update(self):
"""Fetch new state data for this switch."""
self._state = self._dev.state
class ShellyFirmwareUpdate(ShellyBlock, SwitchDevice):
"""Representation of a script entity."""
def __init__(self, block, hass):
ShellyBlock.__init__(self, block, hass, "_firmware_update")
self.entity_id = "switch" + self.entity_id
self._name = "Upgrade firmware " + self._name
self._updating = False
block.firmware_switch = self
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if is on."""
return self._updating
async def async_turn_on(self, **_kwargs):
"""Trig the firmware update"""
self._updating = True
self.schedule_update_ha_state(False)
self._block.update_firmware()
async def async_turn_off(self, **_kwargs):
"""Do nothing"""
self.schedule_update_ha_state(False)
def remove(self):
self._block.firmware_switch = None
ShellyBlock.remove(self)
| 29.287129 | 72 | 0.630156 |
73f3f75fea02f76c887322fb5db2ad6bd8dec9b2 | 16,889 | py | Python | ENCODE_controlled_by_backfill.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | null | null | null | ENCODE_controlled_by_backfill.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | null | null | null | ENCODE_controlled_by_backfill.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: latin-1 -*-
import argparse
import os.path
import encodedcc
import sys
EPILOG = '''
Script to fix the controlled_by backfill problems
This is a dryrun default script, run with '--update' to PATCH data
Useage:
%(prog)s --infile MyFile.txt
%(prog)s --infile ENCSR000AAA
%(prog)s --infile ENCSR000AAA,ENCSR000AAB,ENCSR000AAC
%(prog)s --query "/search/?type=Experiment"
Script will take a file with single column list of accessions
Can also take a single accession or comma separated list of accessions
A query from which to gather accessions
%(prog)s --method single
%(prog)s --method multi
%(prog)s --method biosample
There are three methods to pick from
"single" assumes one replicate in the control
"multi" assumes one control with number of replicates equal to number of replicates in experiment
"biosample" assumes multiple controls that should be matched on biosample
***By NOT selecting the '--method' option the script will try to guess at what the correct method is***
%(prog)s --ignore_runtype
This makes the script ignore the value of the paired ends, default is off
%(prog)s --missing
Script will print out only the names of files missing controlled_by
For more details:
%(prog)s --help
'''
def getArgs():
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--method',
help="'single' = there is only one replicate in the control, \
'multi' = one control with same number of replicates as experiment has replicates, \
'biosample' = multiple controls should be matched on the biosample",
choices=["single", "multi", "biosample"])
parser.add_argument('--ignore_runtype',
help="Ignores value of paired-end. Default is off",
default=False,
action='store_true')
parser.add_argument('--infile',
help="file containing single column list of object accessions,\
single accession, or comma separated list of accessions")
parser.add_argument('--query',
help="query of objects you want to process")
parser.add_argument('--key',
default='default',
help="The keypair identifier from the keyfile. \
Default is --key=default")
parser.add_argument('--keyfile',
default=os.path.expanduser("~/keypairs.json"),
help="The keypair file. Default is --keyfile=%s" % (os.path.expanduser("~/keypairs.json")))
parser.add_argument('--debug',
default=False,
action='store_true',
help="Print debug messages. Default is off.")
parser.add_argument('--update',
default=False,
action='store_true',
help="Let the script PATCH the data. Default is off")
parser.add_argument('--missing',
default=False,
action='store_true',
help="Only print files that are missing controlled_by.\
Default is off")
args = parser.parse_args()
return args
class BackFill:
def __init__(self, connection, debug=False, missing=False, update=False, ignore_runtype=False):
self.connection = connection
self.DEBUG = debug
self.MISSING = missing
self.update = update
self.ignore_runtype = ignore_runtype
self.dataList = []
def updater(self, exp, con):
''' helper function runs the update step'''
temp = encodedcc.get_ENCODE(exp, self.connection).get("controlled_by", [])
if con not in temp:
control = temp + [con]
patch_dict = {"controlled_by": control}
print("patching experiment file {} with controlled_by {}".format(exp, con))
encodedcc.patch_ENCODE(exp, self.connection, patch_dict)
else:
print("ERROR: controlled_by for experiment file {} already contains {}".format(exp, con))
def single_rep(self, obj):
'''one control with one replicate in control,
multiple replicates in experiment'''
control_files = encodedcc.get_ENCODE(obj["possible_controls"][0]["accession"], self.connection, frame="embedded").get("files", [])
if len(control_files) == 0:
if self.DEBUG:
print("Control object {} has no files".format(obj["possible_controls"][0]["accession"]), file=sys.stderr)
return
for c in control_files:
if c.get("file_type", "") == "fastq":
exp_list = []
for e in obj["files"]:
if e.get("file_type", "") == "fastq":
if not self.MISSING or (self.MISSING and not e.get("controlled_by")):
exp_list.append(e["accession"])
for exp in exp_list:
temp = {"ExpAcc": obj["accession"], "Method": "Single", "ExpFile": exp, "ConFile": c["accession"]}
self.dataList.append(temp)
if self.update:
self.updater(exp, c["accession"])
if self.DEBUG:
print("ExpFile: {}, ConFile: {}".format(temp["ExpFile"], temp["ConFile"]))
def pair_dict_maker(self, x_data, x):
''' helper function makes the exp_data
and con_data dictionaries'''
x_file_bio_num = x.get("biological_replicates")
x_file_paired = x.get("paired_end")
x_file_acc = x["accession"]
if self.ignore_runtype:
x_file_paired = None
x_pair = str(x_file_bio_num[0]) + "-" + str(x_file_paired)
x_data[x_file_acc] = x_pair
def multi_rep(self, obj):
'''one control, with one replicate in
control per replicate in experiment'''
control_files = encodedcc.get_ENCODE(obj["possible_controls"][0]["accession"], self.connection, frame="embedded").get("files", [])
control_replicates = obj["possible_controls"][0].get("replicates", [])
exp_data = {}
con_data = {}
if len(control_replicates) != len(obj["replicates"]):
if self.DEBUG:
print("Control has {} replicates and experiment has {} replicates".format(len(control_replicates), len(obj["replicates"])), file=sys.stderr)
return
if len(control_files) == 0:
if self.DEBUG:
print("Control {} has no files".format(obj["possible_controls"][0]["accession"]), file=sys.stderr)
return
for e in obj["files"]:
if e.get("file_type", "") == "fastq":
if not self.MISSING or (self.MISSING and not e.get("controlled_by")):
self.pair_dict_maker(exp_data, e)
for c in control_files:
if c.get("file_type", "") == "fastq":
self.pair_dict_maker(con_data, c)
if self.ignore_runtype:
self.mini(exp_data, con_data, obj)
else:
self.mini(con_data, exp_data, obj)
def mini(self, x_data, y_data, obj):
''' just a helper function
does all the fancy sorting for multi rep
'''
for x_key in x_data.keys():
temp_list = []
for y_key in y_data.keys():
if x_data[x_key] == y_data[y_key]:
temp_list.append(y_key)
if self.ignore_runtype:
for t in temp_list:
temp = {"ExpAcc": obj["accession"], "Method": "Multi-runtype ignored", "ExpFile": x_key, "ConFile": t}
self.dataList.append(temp)
if self.update:
self.updater(x_key, t)
if self.DEBUG:
print("ExpFile: {}, ConFile: {}".format(temp["ExpFile"], temp["ConFile"]))
else:
for t in temp_list:
temp = {"ExpAcc": obj["accession"], "Method": "Multi", "ExpFile": t, "ConFile": x_key}
self.dataList.append(temp)
if self.update:
self.updater(t, x_key)
if self.DEBUG:
print("ExpFile: {}, ConFile: {}".format(temp["ExpFile"], temp["ConFile"]))
def multi_control(self, obj):
'''multiple controls, match on biosample'''
con_data = {}
val = True
for con in obj["possible_controls"]:
c = encodedcc.get_ENCODE(con["accession"], self.connection, frame="embedded")
if c.get("replicates"):
for rep in c["replicates"]:
if c.get("files"):
con_bio_acc = rep["library"]["biosample"]["accession"]
con_bio_num = rep["biological_replicate_number"]
for f in c["files"]:
if f.get("file_type", "") == "fastq":
con_file_bio_num = f["biological_replicates"]
if con_bio_num in con_file_bio_num:
con_file_acc = f["accession"]
con_data[con_bio_acc] = con_file_acc
else:
if self.DEBUG:
print("No files found for control {}".format(con["accession"]), file=sys.stderr)
val = False
else:
if self.DEBUG:
print("No replicates found in control {}".format(con["accession"]), file=sys.stderr)
val = False
if val:
exp_data = {}
for e in obj["replicates"]:
exp_bio_acc = e["library"]["biosample"]["accession"]
exp_bio_num = e["biological_replicate_number"]
for f in obj["files"]:
if f.get("file_type", "") == "fastq":
if not self.MISSING or (self.MISSING and not f.get("controlled_by")):
exp_file_bio_num = f["biological_replicates"]
if exp_bio_num in exp_file_bio_num:
exp_file_acc = f["accession"]
exp_data[exp_bio_acc] = exp_file_acc
for key in exp_data.keys():
if con_data.get(key):
temp = {"ExpAcc": obj["accession"], "Method": "Biosample", "ExpFile": exp_data[key], "ConFile": con_data[key]}
self.dataList.append(temp)
if self.update:
self.updater(exp_data[key], con_data[key])
if self.DEBUG:
print("Biosample: {}, ExpFile: {}, ConFile: {}".format(key, temp["ExpFile"], temp["ConFile"]))
def main():
args = getArgs()
key = encodedcc.ENC_Key(args.keyfile, args.key)
connection = encodedcc.ENC_Connection(key)
accessions = []
if args.update:
print("This is an UPDATE run data will be PATCHed")
else:
print("This is a dryrun, no data will be changed")
if args.infile:
if os.path.isfile(args.infile):
accessions = [line.rstrip('\n') for line in open(args.infile)]
else:
accessions = args.infile.split(",")
elif args.query:
if "search" in args.query:
temp = encodedcc.get_ENCODE(args.query, connection).get("@graph", [])
else:
temp = [encodedcc.get_ENCODE(args.query, connection)]
if any(temp):
for obj in temp:
if obj.get("accession"):
accessions.append(obj["accession"])
elif obj.get("uuid"):
accessions.append(obj["uuid"])
elif obj.get("@id"):
accessions.append(obj["@id"])
elif obj.get("aliases"):
accessions.append(obj["aliases"][0])
if len(accessions) == 0:
# if something happens and we end up with no accessions stop
print("ERROR: object has no identifier", file=sys.stderr)
sys.exit(1)
else:
for acc in accessions:
obj = encodedcc.get_ENCODE(acc, connection, frame="embedded")
isValid = True
check = ["replicates", "files"]
for c in check:
if not obj.get(c):
if args.debug:
print("Missing {} for {}".format(c, acc), file=sys.stderr)
isValid = False
if obj.get("possible_controls"):
for p in obj["possible_controls"]:
for c in check:
if not obj.get(c):
if args.debug:
print("Missing {} for {}".format(c, p["accession"]), file=sys.stderr)
isValid = False
else:
isValid = False
if args.debug:
print("Missing possible_controls for {}".format(acc), file=sys.stderr)
if isValid:
backfill = BackFill(connection, debug=args.debug, missing=args.missing, update=args.update, ignore_runtype=args.ignore_runtype)
if args.method == "single":
if args.debug:
print("SINGLE REP {}".format(acc))
backfill.single_rep(obj)
elif args.method == "multi":
if args.debug:
print("MULTI REP {}".format(acc))
backfill.multi_rep(obj)
elif args.method == "biosample":
if args.debug:
print("BIOSAMPLE {}".format(acc))
backfill.multi_control(obj)
else:
exp_rep = len(obj["replicates"])
exp_con = len(obj["possible_controls"])
if exp_con == 1:
# one possible control
con_rep = len(obj["possible_controls"][0]["replicates"])
if con_rep == exp_rep:
# same number experiment replicates as control replicates
# method is multi
if args.debug:
print("MULTI REP {}".format(acc))
backfill.multi_rep(obj)
elif con_rep == 1:
# one control replicate and multiple experiment replicates
# method is single
if args.debug:
print("SINGLE REP {}".format(acc))
backfill.single_rep(obj)
else:
if args.debug:
print("Experiment {} contains {} experiment replicates and {} control replicates and so does not fit the current pattern!".format(acc, exp_rep, con_rep))
elif exp_con > 1:
# more than one possible control
con_reps = 0
for con in obj["possible_controls"]:
if len(con["replicates"]) == 1:
con_reps += 1
if con_reps == exp_rep:
# same number of controls with one replicate as number of experiment replicates
# method is biosample
if args.debug:
print("BIOSAMPLE {}".format(acc))
backfill.multi_control(obj)
else:
if args.debug:
print("Experiment {} contains {} experiment replicates and {} control replicates between {} total controls and so does not fit the current pattern!".format(acc, exp_rep, con_rep, exp_con))
else:
if args.debug:
print("Experiment {} does not fit any of the current patterns!".format(acc))
if len(backfill.dataList) > 0:
print("Experiment\tMethod\tExperimentFile\tControlFile")
for data in backfill.dataList:
print("{ExpAcc}\t{Method}\t{ExpFile}\t{ConFile}".format(ExpAcc=data["ExpAcc"], Method=data["Method"], ExpFile=data["ExpFile"], ConFile=data["ConFile"]))
if __name__ == '__main__':
main()
| 46.019074 | 220 | 0.519214 |
73f3f970e787c02d1f35d2c8fa160399482e8fa5 | 2,029 | py | Python | spectrumdevice/__init__.py | KCL-BMEIS/spectrumdevice | f0eecbd1d3b58dc9a9f226c4c75d7f398b1ea551 | [
"MIT"
] | 4 | 2022-01-10T08:35:23.000Z | 2022-01-19T12:06:05.000Z | spectrumdevice/__init__.py | KCL-BMEIS/spectrumdevice | f0eecbd1d3b58dc9a9f226c4c75d7f398b1ea551 | [
"MIT"
] | 8 | 2022-01-05T14:07:10.000Z | 2022-03-17T20:03:27.000Z | spectrumdevice/__init__.py | KCL-BMEIS/spectrumdevice | f0eecbd1d3b58dc9a9f226c4c75d7f398b1ea551 | [
"MIT"
] | null | null | null | """
`spectrumdevice` is a high-level, object-oriented Python library for controlling Spectrum Instrumentation digitisers.
It can connect to individual digitisers or
[StarHubs](https://spectrum-instrumentation.com/en/m4i-star-hub) (e.g. the
[NetBox](https://spectrum-instrumentation.com/en/digitizernetbox)).
The main module `spectrumdevice` provides two classes `SpectrumCard` and `SpectrumStarHub` for controlling and receiving
data from individual digitisers and StarHubs respectively. Mock classes are also provided for testing software without
drivers installed or hardware connected.
The submodule `spectrumdevice.settings` provides Enums and Dataclasses wrapping the register values provided by the
Spectrum API, to be used for configuring hardware and interpreting responses received from hardware.
* [Source on GitHub](https://github.com/KCL-BMEIS/spectrumdevice)
* [README including quickstart](https://github.com/KCL-BMEIS/spectrumdevice/blob/main/README.md)
* [Examples](https://github.com/KCL-BMEIS/spectrumdevice/tree/main/example_scripts)
* [PyPi](https://pypi.org/project/spectrumdevice/)
* [API reference documentation](https://kcl-bmeis.github.io/spectrumdevice/)
"""
# Christian Baker, King's College London
# Copyright (c) 2021 School of Biomedical Engineering & Imaging Sciences, King's College London
# Licensed under the MIT. You may obtain a copy at https://opensource.org/licenses/MIT.
from .devices.measurement import Measurement
from .devices.spectrum_device import SpectrumDevice
from .devices.spectrum_card import SpectrumCard
from .devices.spectrum_channel import SpectrumChannel
from .devices.spectrum_star_hub import SpectrumStarHub
from spectrumdevice.devices.mocks import MockSpectrumCard, MockSpectrumStarHub
__all__ = [
"SpectrumDevice",
"SpectrumCard",
"SpectrumStarHub",
"SpectrumChannel",
"MockSpectrumCard",
"MockSpectrumStarHub",
"settings",
"Measurement",
]
from . import _version
__version__ = _version.get_versions()["version"] # type: ignore
| 42.270833 | 120 | 0.79448 |