{"repo": "openstate/open-raadsinformatie", "pull_number": 190, "instance_id": "openstate__open-raadsinformatie-190", "issue_numbers": "", "base_commit": "c633dec37c134359d19dac49630db71e4e315f00", "patch": "diff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -3,24 +3,22 @@\n from datetime import datetime\n import json\n from glob import glob\n-import gzip\n from hashlib import sha1\n import os\n import requests\n import sys\n-from urlparse import urljoin\n \n+import redis\n import click\n from click.core import Command\n from click.decorators import _make_command\n \n-from elasticsearch import helpers as es_helpers\n from elasticsearch.exceptions import RequestError\n \n from ocd_backend.es import elasticsearch as es\n from ocd_backend.pipeline import setup_pipeline\n from ocd_backend.settings import SOURCES_CONFIG_FILE, \\\n- DEFAULT_INDEX_PREFIX, DUMPS_DIR, API_URL, LOCAL_DUMPS_DIR\n+ DEFAULT_INDEX_PREFIX, DUMPS_DIR, REDIS_HOST, REDIS_PORT\n from ocd_backend.utils.misc import load_sources_config\n \n \n@@ -171,11 +169,6 @@ def extract():\n \"\"\"Extraction pipeline\"\"\"\n \n \n-@cli.group()\n-def frontend():\n- \"\"\"Front-end API\"\"\"\n-\n-\n @cli.group()\n def dumps():\n \"\"\"Create and load dumps of indices\"\"\"\n@@ -360,7 +353,7 @@ def extract_start(source_id, subitem, entiteit, sources_config):\n \n # Check for old-style json sources\n if 'id' in source:\n- setup_pipeline(source)\n+ setup_pipeline.delay(source)\n return\n \n # New-style behaviour\n@@ -379,269 +372,104 @@ def extract_start(source_id, subitem, entiteit, sources_config):\n \n # Processing each item\n for source_id, source in selected_sources.items():\n+ click.echo('[%s] Start extract for %s' % (source_id, source_id))\n+\n+ selected_entities = []\n for item in source.get('entities'):\n if (not entiteit and item) or (entiteit and item.get('entity') in entiteit):\n+ selected_entities.append(item.get('entity'))\n+\n new_source = deepcopy(source)\n new_source.update(item)\n- setup_pipeline(new_source)\n-\n-\n-@command('runserver')\n-@click.argument('host', default='0.0.0.0')\n-@click.argument('port', default=5000, type=int)\n-def frontend_runserver(host, port):\n- \"\"\"\n- Run development server on ``host:port``.\n+ setup_pipeline.delay(new_source)\n \n- :param host: host to run dev server on (defaults to 0.0.0.0)\n- :param port: defaults to 5000\n- \"\"\"\n- from werkzeug.serving import run_simple\n- from ocd_frontend.wsgi import application\n- run_simple(host, port, application, use_reloader=True, use_debugger=True)\n+ click.echo('[%s] Processed pipelines: %s' % (source_id, ', '.join(selected_entities)))\n \n \n-@command('create')\n-@click.option('--index', default=None)\n-@click.pass_context\n-def create_dump(ctx, index):\n- \"\"\"\n- Create a dump of an index. If you don't provide an ``--index`` option,\n- you will be prompted with a list of available index names. Dumps are\n- stored as a gzipped txt file in ``settings.DUMPS_DIR//<\n- timestamp>_.gz``, and a symlink ``_latest.gz`` is\n- created, pointing to the last created dump.\n-\n- :param ctx: Click context, so we can issue other management commands\n- :param index: name of the index you want to create a dump for\n+@command('process')\n+@click.argument('modus')\n+@click.option('--source_path', default='*')\n+@click.option('--sources_config', default=SOURCES_CONFIG_FILE)\n+def extract_process(modus, source_path, sources_config):\n \"\"\"\n- if not index:\n- available_idxs = ctx.invoke(available_indices)\n- if not available_idxs:\n- return\n- index = click.prompt('Name of index to dump')\n-\n- if index not in available_idxs:\n- click.secho('\"%s\" is not an available index' % index, fg='red')\n- return\n-\n- match_all = {'query': {'match_all': {}}}\n-\n- total_docs = es.count(index=index).get('count')\n-\n- path = _create_path(path=os.path.join(DUMPS_DIR, index))\n- dump_name = '%(index_name)s_%(timestamp)s.gz' % {\n- 'index_name': index,\n- 'timestamp': datetime.now().strftime('%Y%m%d%H%M%S')\n- }\n- new_dump = os.path.join(path, dump_name)\n-\n- with gzip.open(new_dump, 'wb') as g:\n- with click.progressbar(es_helpers.scan(es, query=match_all, scroll='1m',\n- index=index),\n- length=total_docs) as documents:\n- for doc in documents:\n- g.write('%s\\n' % json.dumps(doc))\n-\n- click.secho('Generating checksum', fg='green')\n- checksum = _checksum_file(new_dump)\n- checksum_path = os.path.join(DUMPS_DIR, index, '%s.sha1' % dump_name)\n-\n- with open(checksum_path, 'w') as f:\n- f.write(checksum)\n+ Start extraction based on the flags in Redis.\n+ It uses the source_path in Redis db 1 to identify which municipalities should be extracted.\n+ A municipality can be set using 'SET ori.ibabs.arnhem enabled'.\n+ Currently, possible values are: enabled, disabled and archived.\n \n- click.secho('Created dump \"%s\" (checksum %s)' % (dump_name, checksum),\n- fg='green')\n-\n- latest = os.path.join(path, '%s_latest.gz' % index)\n- try:\n- os.unlink(latest)\n- except OSError:\n- click.secho('First time creating dump, skipping unlinking',\n- fg='yellow')\n- os.symlink(new_dump, latest)\n- click.secho('Created symlink \"%s_latest.gz\" to \"%s\"' % (index, new_dump),\n- fg='green')\n-\n- latest_checksum = os.path.join(os.path.dirname(checksum_path), '%s_latest.gz.sha1' % index)\n- try:\n- os.unlink(latest_checksum)\n- except OSError:\n- click.secho('First time creating dump, skipping unlinking checksum',\n- fg='yellow')\n- os.symlink(checksum_path, latest_checksum)\n- click.secho('Created symlink \"%s_latest.gz.sha1\" to \"%s\"' % (index, checksum_path),\n- fg='green')\n-\n-\n-@command('list')\n-@click.option('--api-url', default=API_URL)\n-def list_dumps(api_url):\n+ :param modus: the configuration to use for processing, starting with an underscore. i.e. _enabled, _archived, _disabled. Looks for configuration in redis like _custom.start_date\n+ :param source_path: path in redis to search, i.e. ori.ibabs.arnhem. Defaults to *\n+ :param sources_config: Path to file containing pipeline definitions. Defaults to the value of ``settings.SOURCES_CONFIG_FILE``\n \"\"\"\n- List available dumps of API instance at ``api_address``. Use this option to\n- obtain information about dumps available at other OpenCultuurData API\n- instances.\n+ redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=1)\n \n- :param api_url: URL of API location\n- \"\"\"\n- url = urljoin(api_url, 'dumps')\n+ available_sources = load_sources_config(sources_config)\n+ redis_sources = redis_client.keys(source_path)\n \n- try:\n- r = requests.get(url)\n- except:\n- click.secho('No OCD API instance with dumps available at {url}'\n- .format(url=url), fg='red')\n- return\n+ sources = []\n+ for redis_source in redis_sources:\n+ if redis_source[0:1] == '_':\n+ # Settings are underscored so we skip them\n+ continue\n \n- if not r.ok:\n- click.secho('Request on {url} failed'.format(url=url), fg='red')\n-\n- dumps = r.json().get('dumps', {})\n- for index in dumps:\n- click.secho(index, fg='green')\n- for dump in sorted(dumps.get(index, []), reverse=True):\n- click.secho('\\t{dump}'.format(dump=dump), fg='green')\n-\n-\n-@command('download')\n-@click.option('--api-url', default=API_URL, help='URL to API instance to fetch '\n- 'dumps from.')\n-@click.option('--destination', '-d', default=LOCAL_DUMPS_DIR,\n- help='Directory to download dumps to.')\n-@click.option('--collections', '-c', multiple=True)\n-@click.option('--all-collections', '-a', is_flag=True, expose_value=True,\n- help='Download latest version of all collections available')\n-def download_dumps(api_url, destination, collections, all_collections):\n- \"\"\"\n- Download dumps of OCD collections to your machine, for easy ingestion.\n+ source_value = redis_client.get(redis_source)\n+ if source_value.startswith('disabled'):\n+ # If value equals disabled we will not process the source\n+ continue\n+ elif modus in source_value:\n+ sources.append(redis_source)\n \n- :param api_url: URL to API instance to fetch dumps from. Defaults to ``ocd_frontend.settings.API_URL``, which is set to the API instance hosted by OpenCultuurData itself.\n- :param destination: path to local directory where dumps should be stored. Defaults to ``ocd_frontend.settings.LOCAL_DUMPS_DIR``.\n- :param collections: Names of collections to fetch dumps for. Optional; you will be prompted to select collections when not provided.\n- :param all_collections: If this flag is set, download all available dumps. Optional; you will be prompted to select collections when not provided.\n- \"\"\"\n- url = urljoin(api_url, 'dumps')\n- try:\n- r = requests.get(url)\n- except:\n- click.secho('Could not connect to API', fg='red')\n+ if not redis_sources:\n+ click.echo('No sources found in redis')\n return\n-\n- available_collections = [(i+1, index) for i, index in enumerate(r.json().get('dumps'))]\n- dumps = r.json().get('dumps')\n-\n- if all_collections:\n- # Download all the things\n- for collection in dumps:\n- _download_dump([d for d in dumps.get(collection)\n- if d.endswith('_latest.gz')][0],\n- collection=collection,\n- target_dir=destination)\n+ elif not sources:\n+ click.echo('Redis sources found but non match the modus %s' % modus)\n return\n \n- if not collections:\n- for i, dump in available_collections:\n- click.secho('{i}) {index}'.format(i=i, index=dump), fg='yellow')\n-\n- collection = click.prompt('For which collection do you want to download'\n- ' a dump? Please provide the number correspon'\n- 'ding to the collection that you want to down'\n- 'load', type=click.Choice([str(i[0]) for i in available_collections]))\n-\n- collection = dict(available_collections)[int(collection)]\n- for i, dump in enumerate(dumps[collection]):\n- click.secho('{i}) {dump}'.format(i=i+1, dump=dump), fg='yellow')\n-\n- dump_url = click.prompt('Which dump of the collection \"{collection}\" do'\n- ' you want to download? Please provide the numb'\n- 'er corresponding to the dump that you want to '\n- 'download',\n- type=click.Choice([str(j) for j in range(1, len(dumps[collection]) + 1)]))\n- dump_url = dumps[collection][int(dump_url)]\n-\n- _download_dump(dump_url=dump_url, collection=collection,\n- target_dir=destination)\n+ settings_path = '_%s.*' % modus\n+ setting_keys = redis_client.keys(settings_path)\n+ if not setting_keys:\n+ click.echo('No settings found in redis for %s' % settings_path)\n return\n \n- for collection in collections:\n- if collection not in dumps.keys():\n- click.secho('\"{}\" is not available as a dump, skipping'.format(collection),\n- fg='red')\n- continue\n+ settings = {}\n+ enabled_entities = []\n+ for key in setting_keys:\n+ _, _, name = key.rpartition('.')\n+ value = redis_client.get(key)\n+ if name == 'entities':\n+ enabled_entities = value.split(' ')\n+ else:\n+ settings[name] = value\n \n- for i, dump in enumerate(dumps[collection]):\n- click.secho('{i}) {dump}'.format(i=i+1, dump=dump), fg='yellow')\n+ for source in sources:\n+ try:\n+ project, provider, source_name = source.split('.')\n+ available_source = available_sources['%s.%s' % (project, provider)][source_name]\n \n- dump_url = click.prompt('Which dump of the collection \"{collection}\" do'\n- ' you want to download? Please provide the numb'\n- 'er corresponding to the dump that you want to '\n- 'download',\n- type=click.Choice([str(j) for j in range(1, len(dumps[collection]) + 1)]))\n- dump_url = dumps[collection][int(dump_url)]\n+ click.echo('[%s] Start extract for %s' % (source_name, source_name))\n \n- _download_dump(dump_url=dump_url, collection=collection,\n- target_dir=destination)\n+ selected_entities = []\n+ for entity in available_source['entities']:\n+ if not enabled_entities or entity.get('entity') in enabled_entities:\n+ selected_entities.append(entity.get('entity'))\n \n+ # Redis settings are overruled by source definitions, for some sources a start_date must be enforced\n+ new_source = deepcopy(settings)\n+ new_source.update(deepcopy(available_source))\n+ new_source.update(entity)\n \n-@command('load')\n-@click.option('--collection-dump', '-c', help='Path to dump of collection to load',\n- default=None, type=click.Path(exists=True))\n-@click.option('--collection-name', '-n', help='An index will be created with th'\n- 'is name. If left empty, collecti'\n- 'on name will be derived from dum'\n- 'p name.', default=None)\n-def load_dump(collection_dump, collection_name):\n- \"\"\"\n- Restore an index from a dump file.\n+ setup_pipeline.delay(new_source)\n \n- :param collection_dump: Path to a local gzipped dump to load.\n- :param collection_name: Name for the local index to restore the dump to. Optional; will be derived from the dump name, at your own risk. Note that the pipeline will add a \"ocd_\" prefix string to the collection name, to ensure the proper mapping and settings are applied.\n- \"\"\"\n- available_dumps = glob(os.path.join(LOCAL_DUMPS_DIR, '*/*.gz'))\n- if not collection_dump:\n- choices = []\n- for i, dump in enumerate(available_dumps):\n- choices.append(unicode(i+1))\n- click.secho('{i}) {dump}'.format(i=i+1, dump=dump), fg='green')\n- dump_idx = click.prompt('Choose one of the dumps listed above',\n- type=click.Choice(choices))\n- collection_dump = available_dumps[int(dump_idx) - 1]\n-\n- collection = os.path.abspath(collection_dump)\n- collection_id = '_'.join(collection.split('/')[-1].split('.')[0].split('_')[:2])\n-\n- if not collection_name:\n- collection_name = collection_id.replace('ocd_', '')\n-\n- source_definition = {\n- 'id': collection_id,\n- 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n- 'transformer': 'ocd_backend.transformers.BaseTransformer',\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n- 'cleanup': 'ocd_backend.tasks.CleanupElasticsearch',\n- 'item': 'ocd_backend.items.LocalDumpItem',\n- 'dump_path': collection,\n- 'index_name': collection_name\n- }\n-\n- click.secho(str(source_definition), fg='yellow')\n-\n- setup_pipeline(source_definition)\n-\n- click.secho('Queued items from {}. Please make sure your Celery workers'\n- ' are running, so the loaded items are processed.'.format(collection),\n- fg='green')\n+ click.echo('[%s] Started pipelines: %s' % (source_name, ', '.join(selected_entities)))\n+ except ValueError:\n+ click.echo('Invalid source format %s in redis' % source)\n+ except KeyError:\n+ click.echo('Source %s in redis does not exist in available sources' % source)\n \n \n # Register commands explicitly with groups, so we can easily use the docstring\n # wrapper\n-frontend.add_command(frontend_runserver)\n-\n-dumps.add_command(load_dump)\n-dumps.add_command(list_dumps)\n-dumps.add_command(create_dump)\n-dumps.add_command(download_dumps)\n-\n elasticsearch.add_command(es_put_template)\n elasticsearch.add_command(es_put_mapping)\n elasticsearch.add_command(create_indexes)\n@@ -650,6 +478,7 @@ def load_dump(collection_dump, collection_name):\n \n extract.add_command(extract_list_sources)\n extract.add_command(extract_start)\n+extract.add_command(extract_process)\n \n \n if __name__ == '__main__':\ndiff --git a/ocd_backend/__init__.py b/ocd_backend/__init__.py\n--- a/ocd_backend/__init__.py\n+++ b/ocd_backend/__init__.py\n@@ -6,17 +6,27 @@\n \n from ocd_backend.settings import CELERY_CONFIG\n \n-celery_app = Celery('ocd_backend', include=[\n- 'ocd_backend.models',\n- 'ocd_backend.models.definitions',\n- 'ocd_backend.extractors',\n- 'ocd_backend.extractors.ggm',\n- 'ocd_backend.transformers',\n- 'ocd_backend.transformers.ggm',\n- 'ocd_backend.enrichers.media_enricher',\n- 'ocd_backend.enrichers.media_enricher.static',\n- 'ocd_backend.loaders',\n- 'ocd_backend.tasks',\n+celery_app = Celery('ocd_backend',\n+ include=[\n+ 'ocd_backend.pipeline',\n+ 'ocd_backend.models',\n+ 'ocd_backend.models.definitions',\n+ 'ocd_backend.extractors',\n+ 'ocd_backend.transformers.goapi_committee',\n+ 'ocd_backend.transformers.goapi_meeting',\n+ 'ocd_backend.transformers.gv',\n+ 'ocd_backend.transformers.ibabs_committee',\n+ 'ocd_backend.transformers.ibabs_meeting',\n+ 'ocd_backend.transformers.ibabs_person',\n+ 'ocd_backend.transformers.notubiz_committee',\n+ 'ocd_backend.transformers.notubiz_meeting',\n+ 'ocd_backend.transformers.organizations',\n+ 'ocd_backend.transformers.persons',\n+ 'ocd_backend.enrichers.media_enricher',\n+ 'ocd_backend.enrichers.media_enricher.static',\n+ 'ocd_backend.loaders.elasticsearch',\n+ 'ocd_backend.loaders.delta',\n+ 'ocd_backend.tasks',\n ])\n \n celery_app.conf.update(**CELERY_CONFIG)\ndiff --git a/ocd_frontend/__init__.py b/ocd_backend/alembic/__init__.py\nsimilarity index 100%\nrename from ocd_frontend/__init__.py\nrename to ocd_backend/alembic/__init__.py\ndiff --git a/ocd_backend/alembic/env.py b/ocd_backend/alembic/env.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/alembic/env.py\n@@ -0,0 +1,89 @@\n+# pylint: skip-file\n+\n+import os\n+import sys\n+from logging.config import fileConfig\n+\n+from sqlalchemy import create_engine\n+from sqlalchemy import pool\n+\n+from alembic import context\n+\n+# This needs to be added to allow the Alembic CLI to find the ocd_backend modules\n+parent_dir = os.path.abspath(os.path.join(os.getcwd(), \"../..\"))\n+sys.path.append(parent_dir)\n+\n+from ocd_backend.settings import POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, POSTGRES_DATABASE\n+from ocd_backend.models.postgres_models import Base\n+\n+\n+# this is the Alembic Config object, which provides\n+# access to the values within the .ini file in use.\n+config = context.config\n+\n+# Interpret the config file for Python logging.\n+# This line sets up loggers basically.\n+fileConfig(config.config_file_name)\n+\n+# add your model's MetaData object here\n+# for 'autogenerate' support\n+# from myapp import mymodel\n+# target_metadata = mymodel.Base.metadata\n+target_metadata = Base.metadata\n+\n+# other values from the config, defined by the needs of env.py,\n+# can be acquired:\n+# my_important_option = config.get_main_option(\"my_important_option\")\n+# ... etc.\n+\n+\n+def get_url():\n+ return 'postgresql://%s:%s@%s/%s' % (POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, POSTGRES_DATABASE)\n+\n+\n+def get_engine(*args, **kwargs):\n+ return create_engine(get_url(), *args, **kwargs)\n+\n+\n+def run_migrations_offline():\n+ \"\"\"Run migrations in 'offline' mode.\n+\n+ This configures the context with just a URL\n+ and not an Engine, though an Engine is acceptable\n+ here as well. By skipping the Engine creation\n+ we don't even need a DBAPI to be available.\n+\n+ Calls to context.execute() here emit the given string to the\n+ script output.\n+\n+ \"\"\"\n+ context.configure(\n+ url=get_url(), target_metadata=target_metadata, literal_binds=True\n+ )\n+\n+ with context.begin_transaction():\n+ context.run_migrations()\n+\n+\n+def run_migrations_online():\n+ \"\"\"Run migrations in 'online' mode.\n+\n+ In this scenario we need to create an Engine\n+ and associate a connection with the context.\n+\n+ \"\"\"\n+ connectable = get_engine(poolclass=pool.NullPool)\n+\n+ with connectable.connect() as connection:\n+ context.configure(\n+ connection=connection, target_metadata=target_metadata\n+ )\n+\n+ with context.begin_transaction():\n+ context.run_migrations()\n+\n+\n+if context.is_offline_mode():\n+ run_migrations_offline()\n+else:\n+ run_migrations_online()\ndiff --git a/ocd_backend/alembic/versions/03547c65804e_renamed_source_type_column.py b/ocd_backend/alembic/versions/03547c65804e_renamed_source_type_column.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/alembic/versions/03547c65804e_renamed_source_type_column.py\n@@ -0,0 +1,28 @@\n+\"\"\"renamed Source type column\n+\n+Revision ID: 03547c65804e\n+Revises: baeeac363d3d\n+Create Date: 2019-07-23 17:25:15.820503\n+\n+\"\"\"\n+from alembic import op\n+import sqlalchemy as sa\n+\n+\n+# revision identifiers, used by Alembic.\n+revision = '03547c65804e'\n+down_revision = 'baeeac363d3d'\n+branch_labels = None\n+depends_on = None\n+\n+\n+def upgrade():\n+ op.alter_column('source', 'type', new_column_name='entity_type')\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ # ### end Alembic commands ###\n+\n+\n+def downgrade():\n+ op.alter_column('source', 'entity_type', new_column_name='type')\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ # ### end Alembic commands ###\ndiff --git a/ocd_backend/alembic/versions/1afd444fda6b_change_property_order_column_to_.py b/ocd_backend/alembic/versions/1afd444fda6b_change_property_order_column_to_.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/alembic/versions/1afd444fda6b_change_property_order_column_to_.py\n@@ -0,0 +1,28 @@\n+\"\"\"change property order column to SmallInteger\n+\n+Revision ID: 1afd444fda6b\n+Revises: 03547c65804e\n+Create Date: 2019-07-31 12:14:20.967703\n+\n+\"\"\"\n+from alembic import op\n+import sqlalchemy as sa\n+\n+\n+# revision identifiers, used by Alembic.\n+revision = '1afd444fda6b'\n+down_revision = '03547c65804e'\n+branch_labels = None\n+depends_on = None\n+\n+\n+def upgrade():\n+ op.alter_column('property', 'order', type_=sa.SmallInteger)\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ # ### end Alembic commands ###\n+\n+\n+def downgrade():\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ pass\n+ # ### end Alembic commands ###\ndiff --git a/ocd_backend/alembic/versions/24dbf579420e_switch_to_canonical_iri_or_id.py b/ocd_backend/alembic/versions/24dbf579420e_switch_to_canonical_iri_or_id.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/alembic/versions/24dbf579420e_switch_to_canonical_iri_or_id.py\n@@ -0,0 +1,34 @@\n+\"\"\"switch to canonical iri or id\n+\n+Revision ID: 24dbf579420e\n+Revises: 1afd444fda6b\n+Create Date: 2019-07-31 17:10:49.264005\n+\n+\"\"\"\n+from alembic import op\n+import sqlalchemy as sa\n+\n+\n+# revision identifiers, used by Alembic.\n+revision = '24dbf579420e'\n+down_revision = '1afd444fda6b'\n+branch_labels = None\n+depends_on = None\n+\n+\n+def upgrade():\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ op.add_column('source', sa.Column('canonical_id', sa.String(), nullable=True))\n+ op.add_column('source', sa.Column('canonical_iri', sa.String(), nullable=True))\n+ op.drop_column('source', 'entity')\n+ op.drop_column('source', 'entity_type')\n+ # ### end Alembic commands ###\n+\n+\n+def downgrade():\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ op.add_column('source', sa.Column('entity_type', sa.VARCHAR(), autoincrement=False, nullable=True))\n+ op.add_column('source', sa.Column('entity', sa.VARCHAR(), autoincrement=False, nullable=True))\n+ op.drop_column('source', 'canonical_iri')\n+ op.drop_column('source', 'canonical_id')\n+ # ### end Alembic commands ###\ndiff --git a/ocd_backend/alembic/versions/baeeac363d3d_first_migration.py b/ocd_backend/alembic/versions/baeeac363d3d_first_migration.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/alembic/versions/baeeac363d3d_first_migration.py\n@@ -0,0 +1,63 @@\n+\"\"\"first migration\n+\n+Revision ID: baeeac363d3d\n+Revises: \n+Create Date: 2019-07-23 11:43:16.531116\n+\n+\"\"\"\n+from alembic import op\n+import sqlalchemy as sa\n+from sqlalchemy_utils.types.uuid import UUIDType\n+\n+\n+# revision identifiers, used by Alembic.\n+revision = 'baeeac363d3d'\n+down_revision = None\n+branch_labels = None\n+depends_on = None\n+\n+\n+def upgrade():\n+ op.execute(sa.schema.CreateSequence(sa.schema.Sequence('ori_id_seq')))\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ op.create_table('resource',\n+ sa.Column('ori_id', sa.BigInteger(), nullable=False),\n+ sa.Column('iri', sa.String(), nullable=True),\n+ sa.PrimaryKeyConstraint('ori_id')\n+ )\n+ op.create_table('property',\n+ sa.Column('id', UUIDType(), nullable=False),\n+ sa.Column('resource_id', sa.BigInteger(), nullable=False),\n+ sa.Column('predicate', sa.String(), nullable=False),\n+ sa.Column('order', sa.BigInteger(), nullable=True),\n+ sa.Column('prop_resource', sa.BigInteger(), nullable=True),\n+ sa.Column('prop_string', sa.String(), nullable=True),\n+ sa.Column('prop_datetime', sa.DateTime(), nullable=True),\n+ sa.Column('prop_integer', sa.BigInteger(), nullable=True),\n+ sa.Column('prop_url', sa.String(), nullable=True),\n+ sa.CheckConstraint(u'NOT(prop_resource IS NULL AND prop_string IS NULL AND prop_datetime IS NULL AND prop_integer IS NULL AND prop_url IS NULL)'),\n+ sa.ForeignKeyConstraint(['prop_resource'], ['resource.ori_id'], ),\n+ sa.ForeignKeyConstraint(['resource_id'], ['resource.ori_id'], ),\n+ sa.PrimaryKeyConstraint('id')\n+ )\n+ op.create_table('source',\n+ sa.Column('id', sa.BigInteger(), nullable=False),\n+ sa.Column('iri', sa.String(), nullable=True),\n+ sa.Column('resource_ori_id', sa.BigInteger(), nullable=False),\n+ sa.Column('type', sa.String(), nullable=True),\n+ sa.Column('entity', sa.String(), nullable=True),\n+ sa.Column('used_file', sa.String(), nullable=True),\n+ sa.Column('created_at', sa.DateTime(), nullable=True),\n+ sa.Column('updated_at', sa.DateTime(), nullable=True),\n+ sa.ForeignKeyConstraint(['resource_ori_id'], ['resource.ori_id'], ),\n+ sa.PrimaryKeyConstraint('id')\n+ )\n+ # ### end Alembic commands ###\n+\n+\n+def downgrade():\n+ # ### commands auto generated by Alembic - please adjust! ###\n+ op.drop_table('source')\n+ op.drop_table('property')\n+ op.drop_table('resource')\n+ # ### end Alembic commands ###\ndiff --git a/ocd_backend/enrichers/__init__.py b/ocd_backend/enrichers/__init__.py\n--- a/ocd_backend/enrichers/__init__.py\n+++ b/ocd_backend/enrichers/__init__.py\n@@ -11,7 +11,7 @@\n class BaseEnricher(celery_app.Task):\n \"\"\"The base class that enrichers should inherit.\"\"\"\n \n- def run(self, *args, **kwargs):\n+ def start(self, *args, **kwargs):\n \"\"\"Start enrichment of a single item.\n \n This method is called by the transformer or by another enricher\n@@ -25,28 +25,29 @@ def run(self, *args, **kwargs):\n self.enricher_settings = kwargs['enricher_settings']\n \n for _, doc in iterate(args):\n- try:\n- for prop, value in doc.properties(props=True, rels=True):\n- try:\n- if not hasattr(value, 'enricher_task'):\n- continue\n- except AttributeError:\n+ for model in doc.traverse():\n+ try:\n+ if not hasattr(model, 'enricher_task'):\n continue\n+ except AttributeError:\n+ continue\n \n- self.enrich_item(value)\n-\n- except SkipEnrichment as e:\n- bugsnag.notify(e, severity=\"info\")\n- log.info('Skipping %s, reason: %s'\n- % (self.__class__.__name__, e.message))\n- except IOError as e:\n- # In the case of an IOError, disk space or some other\n- # serious problem might occur.\n- bugsnag.notify(e, severity=\"error\")\n- log.critical(e)\n- except Exception as e:\n- bugsnag.notify(e, severity=\"warning\")\n- log.warning('Unexpected error: %s, reason: %s' % (self.__class__.__name__, e))\n+ try:\n+ self.enrich_item(model)\n+ except SkipEnrichment as e:\n+ bugsnag.notify(e, severity=\"info\")\n+ log.info('Skipping %s, reason: %s'\n+ % (self.__class__.__name__, e.message))\n+ except IOError as e:\n+ # In the case of an IOError, disk space or some other\n+ # serious problem might occur.\n+ bugsnag.notify(e, severity=\"error\")\n+ log.critical(e)\n+ raise\n+ except Exception as e:\n+ bugsnag.notify(e, severity=\"warning\")\n+ log.warning('Unexpected error: %s, reason: %s' % (self.__class__.__name__, e))\n+ raise\n \n return args\n \ndiff --git a/ocd_backend/enrichers/media_enricher/__init__.py b/ocd_backend/enrichers/media_enricher/__init__.py\n--- a/ocd_backend/enrichers/media_enricher/__init__.py\n+++ b/ocd_backend/enrichers/media_enricher/__init__.py\n@@ -1,3 +1,4 @@\n+from ocd_backend import celery_app\n from ocd_backend.enrichers import BaseEnricher\n from ocd_backend.enrichers.media_enricher.tasks import ImageMetadata, \\\n MediaType, FileToText\n@@ -7,7 +8,6 @@\n from ocd_backend.log import get_source_logger\n from ocd_backend.settings import RESOLVER_BASE_URL\n from ocd_backend.utils.http import HttpRequestMixin\n-from ocd_backend.utils.misc import get_sha1_hash\n \n log = get_source_logger('enricher')\n \n@@ -78,6 +78,11 @@ def enrich_item(self, item):\n item.original_url))\n continue\n \n- media_file.close()\n+ media_file.close()\n \n item.save()\n+\n+\n+@celery_app.task(bind=True, base=MediaEnricher, autoretry_for=(Exception,), retry_backoff=True)\n+def media_enricher(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\ndiff --git a/ocd_backend/enrichers/media_enricher/static.py b/ocd_backend/enrichers/media_enricher/static.py\n--- a/ocd_backend/enrichers/media_enricher/static.py\n+++ b/ocd_backend/enrichers/media_enricher/static.py\n@@ -1,3 +1,4 @@\n+from ocd_backend import celery_app\n from ocd_backend.enrichers.media_enricher import MediaEnricher\n from ocd_backend.log import get_source_logger\n from ocd_backend.utils.http import HttpRequestMixin, LocalCachingMixin, GCSCachingMixin\n@@ -15,3 +16,18 @@ class LocalStaticMediaEnricher(MediaEnricher, LocalCachingMixin):\n \n class GCSStaticMediaEnricher(MediaEnricher, GCSCachingMixin):\n bucket_name = 'ori-static'\n+\n+\n+@celery_app.task(bind=True, base=TemporaryFileMediaEnricher, autoretry_for=(Exception,), retry_backoff=True)\n+def temporary_file_media_enricher(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\n+\n+\n+@celery_app.task(bind=True, base=LocalStaticMediaEnricher, autoretry_for=(Exception,), retry_backoff=True)\n+def local_static_media_enricher(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\n+\n+\n+@celery_app.task(bind=True, base=GCSStaticMediaEnricher, autoretry_for=(Exception,), retry_backoff=True)\n+def gcs_static_media_enricher(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\ndiff --git a/ocd_backend/enrichers/media_enricher/tasks/__init__.py b/ocd_backend/enrichers/media_enricher/tasks/__init__.py\n--- a/ocd_backend/enrichers/media_enricher/tasks/__init__.py\n+++ b/ocd_backend/enrichers/media_enricher/tasks/__init__.py\n@@ -95,6 +95,7 @@ class FileToText(BaseMediaEnrichmentTask, FileToTextMixin):\n \n def enrich_item(self, media_item, content_type, file_object):\n # Make sure file_object is actually on the disk for pdf parsing\n+ temporary_file = None\n if isinstance(file_object, cStringIO.OutputType):\n temporary_file = NamedTemporaryFile(dir=TEMP_DIR_PATH)\n temporary_file.write(file_object.read())\n@@ -113,5 +114,8 @@ def enrich_item(self, media_item, content_type, file_object):\n # media_item.meta = meta\n pass\n \n+ if temporary_file:\n+ temporary_file.close()\n+\n def process_text(self, text, media_item):\n pass\ndiff --git a/ocd_backend/es.py b/ocd_backend/es.py\n--- a/ocd_backend/es.py\n+++ b/ocd_backend/es.py\n@@ -23,7 +23,7 @@ def dumps(self, data):\n \n \n def setup_elasticsearch(host=ELASTICSEARCH_HOST, port=ELASTICSEARCH_PORT):\n- return Elasticsearch([{'host': host, 'port': port, 'timeout': 20}],\n+ return Elasticsearch([{'host': host, 'port': port, 'timeout': 600}],\n serializer=JSONSerializerPython2())\n \n \ndiff --git a/ocd_backend/extractors/__init__.py b/ocd_backend/extractors/__init__.py\n--- a/ocd_backend/extractors/__init__.py\n+++ b/ocd_backend/extractors/__init__.py\n@@ -31,6 +31,8 @@ def run(self):\n ``application/json``)\n - the data in it's original format, as retrieved from the source\n (as a string)\n+ - the entity URL\n+ - the representation of the original item used\n \"\"\"\n raise NotImplementedError\n \n@@ -44,7 +46,7 @@ def _interval_delta(self):\n \"\"\"\n months = 1 # Max 1 months intervals by default\n if 'months_interval' in self.source_definition:\n- months = self.source_definition['months_interval']\n+ months = int(self.source_definition['months_interval'])\n \n if (months / 2.0) < 1.0:\n days = (months / 2.0) * 30\ndiff --git a/ocd_backend/extractors/allmanak.py b/ocd_backend/extractors/allmanak.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/extractors/allmanak.py\n@@ -0,0 +1,128 @@\n+import json\n+from urlparse import urljoin\n+\n+from ocd_backend.extractors import BaseExtractor\n+from ocd_backend.log import get_source_logger\n+from ocd_backend.utils.http import HttpRequestMixin\n+\n+\n+log = get_source_logger('extractor')\n+\n+\n+class AllmanakBaseExtractor(BaseExtractor, HttpRequestMixin):\n+ \"\"\"\n+ A base extractor for the OpenState Allmanak API.\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super(AllmanakBaseExtractor, self).__init__(*args, **kwargs)\n+ self.base_url = '%s/%s/' % ('https://rest-api.allmanak.nl', self.source_definition['allmanak_api_version'],)\n+\n+ def _request(self, path):\n+ # log.debug('Now retrieving: %s' % (urljoin(self.base_url, path),))\n+ response = self.http_session.get(urljoin(self.base_url, path), verify=False)\n+\n+ if response.status_code == 200:\n+ static_json = json.loads(response.content)\n+ return len(static_json), static_json\n+ else:\n+ log.error('[%s] Failed to extract from Allmanak path %s' % (\n+ self.source_definition['sitename'], urljoin(self.base_url, path))\n+ )\n+ return 0, []\n+\n+\n+class AllmanakMunicipalityExtractor(AllmanakBaseExtractor):\n+ \"\"\"\n+ Extracts a municipality from the OpenState Allmanak. There should always be exactly 1 result.\n+ \"\"\"\n+\n+ def run(self):\n+ path = self.base_url + 'overheidsorganisatie?systemid=eq.%s' % self.source_definition['allmanak_id']\n+\n+ total, static_json = self._request(path)\n+\n+ if total != 1:\n+ log.error('[%s] Number of extracted municipalities for %s is not equal to 1' % (\n+ self.source_definition['sitename'],\n+ self.source_definition['sitename'])\n+ )\n+ else:\n+ yield 'application/json', \\\n+ json.dumps(static_json[0]), \\\n+ path, \\\n+ static_json\n+ log.info(\"[%s] Extracted 1 Allmanak municipality.\" % self.source_definition['sitename'])\n+\n+\n+class AllmanakProvinceExtractor(AllmanakBaseExtractor):\n+ \"\"\"\n+ Extracts a province from the OpenState Allmanak. There should always be exactly 1 result.\n+ \"\"\"\n+\n+ def run(self):\n+ path = self.base_url + 'overheidsorganisatie?systemid=eq.%s' % self.source_definition['allmanak_id']\n+\n+ total, static_json = self._request(path)\n+\n+ if total != 1:\n+ log.error('[%s] Number of extracted provinces for %s is not equal to 1' % (\n+ self.source_definition['sitename'],\n+ self.source_definition['sitename'])\n+ )\n+ else:\n+ yield 'application/json', \\\n+ json.dumps(static_json[0]), \\\n+ path, \\\n+ static_json\n+ log.info(\"[%s] Extracted 1 Allmanak province.\" % self.source_definition['sitename'])\n+\n+\n+class AllmanakPartiesExtractor(AllmanakBaseExtractor):\n+ \"\"\"\n+ Extracts parties from the OpenState Allmanak.\n+ \"\"\"\n+\n+ def run(self):\n+ path = self.base_url + 'overheidsorganisatie?systemid=eq.%s&select=zetels' % self.source_definition['allmanak_id']\n+\n+ total, static_json = self._request(path)\n+\n+ if static_json[0]['zetels']:\n+ total_parties = 0\n+ for party in static_json[0]['zetels']:\n+ yield 'application/json', \\\n+ json.dumps(party), \\\n+ path, \\\n+ static_json\n+ total_parties += 1\n+ log.info(\"[%s] Extracted %d Allmanak parties.\" % (self.source_definition['sitename'], total_parties))\n+ else:\n+ log.warning('[%s] Allmanak does not list any parties for this source.' % self.source_definition['sitename'])\n+\n+\n+class AllmanakPersonsExtractor(AllmanakBaseExtractor):\n+ \"\"\"\n+ Extracts persons from the OpenState Allmanak.\n+ \"\"\"\n+\n+ def run(self):\n+ path = self.base_url + 'overheidsorganisatie?systemid=eq.%s&select=naam,functies(functie:functieid' \\\n+ '(naam,medewerkers(persoon:persoonid(systemid,naam,partij))))' \\\n+ '&functies.functie.naam=eq.Raadslid' % self.source_definition['allmanak_id']\n+\n+ total, static_json = self._request(path)\n+\n+ if static_json[0]['functies']:\n+ total_persons = 0\n+ for row in static_json[0]['functies']:\n+ if row['functie']:\n+ for person in row['functie']['medewerkers']:\n+ yield 'application/json', \\\n+ json.dumps(person['persoon']), \\\n+ path, \\\n+ static_json\n+ total_persons += 1\n+ log.info(\"[%s] Extracted %d Allmanak persons.\" % (self.source_definition['sitename'], total_persons))\n+ else:\n+ log.warning('[%s] Allmanak does not list any persons for this source.' % self.source_definition['sitename'])\ndiff --git a/ocd_backend/extractors/almanak.py b/ocd_backend/extractors/almanak.py\ndeleted file mode 100644\n--- a/ocd_backend/extractors/almanak.py\n+++ /dev/null\n@@ -1,85 +0,0 @@\n-import json\n-\n-from lxml import etree\n-\n-from ocd_backend.log import get_source_logger\n-from .staticfile import StaticHtmlExtractor\n-\n-log = get_source_logger('extractor')\n-\n-\n-class OrganisationsExtractor(StaticHtmlExtractor):\n- \"\"\"\n- Extract organisations (parties) from the Almanak.\n- \"\"\"\n-\n- def extract_items(self, static_content):\n- \"\"\"\n- Extracts organisations from the Almanak page source HTML.\n- \"\"\"\n-\n- parties = set()\n- html = etree.HTML(static_content)\n-\n- # Parties are listed in TD's with the attribute 'data-before=\"Partij\"'\n- for party in html.xpath('.//td[@data-before=\"Partij\"]/text()'):\n- parties.add(party)\n-\n- for party in parties:\n- yield 'application/json', json.dumps({'name': party, 'classification': u'Party'})\n-\n-\n-class PersonsExtractor(StaticHtmlExtractor):\n- \"\"\"\n- Extract persons from the Almanak.\n- \"\"\"\n-\n- def extract_items(self, static_content):\n- \"\"\"\n- Extracts persons from the Almanak page source HTML.\n- \"\"\"\n-\n- html = etree.HTML(static_content)\n- persons = []\n-\n- for element in html.xpath('//*[@id=\"functies-organisatie\"]/following::table[1]//td//a'):\n- name = element.xpath('text()')[0].strip()\n- if name.lower() == 'vacant':\n- # Exclude vacant positions\n- break\n-\n- try:\n- party = etree.tostring(element).split('(')[1].split(')')[0]\n- except:\n- # If no party can be extracted, the person is a clerk (griffier). Clerks are not\n- # affiliated with a party\n- party = None\n-\n- url = (u''.join(element.xpath('.//@href'))).strip()\n- id = url[1:].split('/')[0]\n- email = element.xpath('string(//a[starts-with(@href,\"mailto:\")]/text())').strip().split(' ')[0]\n- gender = u'male' if name.startswith(u'Dhr. ') else u'female'\n-\n- if party:\n- # Extraction of role requires a HTTP request to a separate page\n- # TODO: With the new Almanak layout this could be done without an extra request by taking the role\n- # from the previous TD element\n- request_url = u'https://almanak.overheid.nl%s' % (unicode(url),)\n- response = self.http_session.get(request_url, verify=False)\n- response.raise_for_status()\n- html = etree.HTML(response.content)\n- role = html.xpath('//*[@id=\"basis-medewerker\"]/following::table[1]/tr/td/text()')[0].strip()\n- else:\n- role = 'Griffier'\n-\n- persons.append({\n- 'id': id,\n- 'name': name,\n- 'email': email,\n- 'gender': gender,\n- 'party': party,\n- 'role': role,\n- })\n-\n- for person in persons:\n- yield 'application/json', json.dumps(person)\ndiff --git a/ocd_backend/extractors/cwc.py b/ocd_backend/extractors/cwc.py\n--- a/ocd_backend/extractors/cwc.py\n+++ b/ocd_backend/extractors/cwc.py\n@@ -1,4 +1,5 @@\n import json\n+import os\n \n from suds.client import Client # pylint: disable=import-error\n from suds.sudsobject import asdict # pylint: disable=import-error\n@@ -52,6 +53,8 @@ def __init__(self, *args, **kwargs):\n super(CompanyWebcastBaseExtractor, self).__init__(*args, **kwargs)\n \n self.client = Client(settings.CWC_WSDL)\n+ self.username = os.environ.get('%s.cwc_username' % self.source_definition['key'])\n+ self.password = os.environ.get('%s.cwc_password' % self.source_definition['key'])\n # self.client.set_options(port='BasicHttpsBinding_IPublic')\n \n \n@@ -62,45 +65,58 @@ class VideotulenExtractor(CompanyWebcastBaseExtractor):\n \n def run(self):\n current_page = 0\n- pagesize = self.source_definition['cwc_pagesize']\n- result_count = pagesize\n+ consecutive_errors = 0\n+ result_count = 0\n+ pagesize = self.source_definition.get('cwc_pagesize', 10)\n \n- while result_count == pagesize:\n- log.debug(\"Requesting page %s ...\" % (current_page,))\n+ while consecutive_errors < 5:\n+ log.debug(\"[%s] Requesting cwc page %s ...\" % (self.source_definition['key'], current_page,))\n results = self.client.service.WebcastSearch(\n- Username=self.source_definition['cwc_username'],\n- Password=self.source_definition['cwc_password'],\n- PageSize=self.source_definition['cwc_pagesize'],\n+ Username=self.username,\n+ Password=self.password,\n+ PageSize=pagesize,\n PageNumber=current_page,\n- Order='CreatedDesc')\n+ Order='CreatedDesc'\n+ )\n \n- if results.WebcastSearchResult != 0:\n- log.warning(\"Page %s resulted in error code : %s\" % (\n- current_page, results.WebcastSearchResult,))\n+ if results.WebcastSearchResult == 3:\n+ log.warning(\"[%s] CompanyWebcast credentials are incorrect\" % self.source_definition['key'])\n+ break\n+\n+ elif results.WebcastSearchResult != 0:\n+ log.warning(\"[%s] CompanyWebcast page %s resulted in error code : %s\" % (\n+ self.source_definition['key'], current_page, results.WebcastSearchResult)\n+ )\n+\n+ consecutive_errors += 1\n+ current_page += 1\n continue\n \n- result_count = 0\n+ if len(results.WebcastSummaries) < 1:\n+ break\n+\n for result in results.WebcastSummaries[0]:\n full_result = self.client.service.WebcastGet(\n- Username=self.source_definition['cwc_username'],\n- Password=self.source_definition['cwc_password'],\n+ Username=self.username,\n+ Password=self.password,\n Code=result.Code)\n \n result_count += 1\n if full_result.WebcastGetResult != 0:\n- log.warning(\"Webcast %s resulted in error code : %s\" % (\n- result.Code, full_result.WebcastGetResult,))\n+ log.warning(\"[%s] Webcast %s resulted in error code : %s\" % (\n+ self.source_definition['key'], result.Code, full_result.WebcastGetResult,))\n+ consecutive_errors += 1\n continue\n \n- if full_result is not None and full_result['Webcast'] is not None:\n- log.debug(\"%s: %s\" % (\n- full_result['Webcast']['Title'],\n- full_result['Webcast']['ScheduledStart'],))\n+ # if full_result is not None and full_result['Webcast'] is not None:\n+ # log.debug(\"%s: %s\" % (\n+ # full_result['Webcast']['Title'],\n+ # full_result['Webcast']['ScheduledStart'],))\n \n serialized_result = suds_to_json(full_result)\n yield 'application/json', serialized_result\n-\n- if not self.source_definition['cwc_paging']:\n- result_count = 0\n+ consecutive_errors = 0\n \n current_page += 1\n+\n+ log.info(\"[%s] Extracted total of %d cwc results\" % (self.source_definition['index_name'], result_count))\ndiff --git a/ocd_backend/extractors/extensions/__init__.py b/ocd_backend/extractors/extensions/__init__.py\ndeleted file mode 100644\n--- a/ocd_backend/extractors/extensions/__init__.py\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-from ocd_backend import celery_app\n-# from ocd_backend import settings\n-from ocd_backend.mixins import OCDBackendTaskFailureMixin\n-\n-\n-class BaseExtension(OCDBackendTaskFailureMixin, celery_app.Task):\n- def run(self, *args, **kwargs):\n- raise NotImplementedError\ndiff --git a/ocd_backend/extractors/ggm.py b/ocd_backend/extractors/ggm.py\ndeleted file mode 100644\n--- a/ocd_backend/extractors/ggm.py\n+++ /dev/null\n@@ -1,214 +0,0 @@\n-import json\n-import os\n-import time\n-from base64 import b64encode\n-from hashlib import sha1\n-from urllib import urlencode\n-from urlparse import urlparse, urlunparse, parse_qs\n-\n-import dateutil.parser\n-import redis\n-\n-from ocd_backend.extractors import BaseExtractor\n-from ocd_backend.log import get_source_logger\n-from ocd_backend.settings import REDIS_HOST, REDIS_PORT, DATA_DIR_PATH\n-from ocd_backend.utils.http import HttpRequestMixin\n-from ocd_backend.utils.misc import get_secret\n-\n-log = get_source_logger('loader')\n-\n-\n-class GegevensmagazijnBaseExtractor(BaseExtractor, HttpRequestMixin):\n- def run(self):\n- pass\n-\n- def __init__(self, source_definition):\n- super(GegevensmagazijnBaseExtractor, self).__init__(source_definition=source_definition)\n- user, password = get_secret(self.source_definition['id'])\n- self.http_session.headers['Authorization'] = 'Basic %s' % b64encode('%s:%s' % (user, password))\n- self.feed_url = self.source_definition['base_url'] + self.source_definition['feed_query']\n- self.redis = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=1)\n-\n-\n-class GegevensmagazijnFeedExtractor(GegevensmagazijnBaseExtractor):\n- def run(self):\n- self.url_hash = sha1(self.feed_url.encode(\"UTF-8\")).hexdigest()[:6]\n- self.path = os.path.join(DATA_DIR_PATH, self.source_definition['xml_directory'])\n- if not os.path.exists(self.path):\n- os.makedirs(self.path)\n-\n- second_last_piket, last_piket = self.get_last_pikets()\n-\n- if self.source_definition.get('resume_last'):\n- log.info(\"Configuration 'resume_last' set, resuming second last url:\\n%s\" %\n- self.piket_to_url(second_last_piket))\n- else:\n- pikets = self.get_pikets()\n- if second_last_piket:\n- log.info(\n- \"Yielding all entities for %i pikets from redis cache for url:\\n%s\" % (len(pikets), self.feed_url))\n-\n- i = 0\n- for piket in pikets:\n- for entity_id, modified_seconds in self.get_piket_entities(piket).items():\n- data = self.fetch_entity(piket, entity_id, modified_seconds)\n- i += 1\n- yield 'application/xml', data\n-\n- if i:\n- log.info(\"There were %i cached entity id's yielded\" % i)\n-\n- # Starting at the second last piket if available to check if the last\n- # piket is still the same\n- piket = second_last_piket\n-\n- while True:\n- if piket:\n- log.debug(\"Downloading next piket: %s\" % piket)\n- else:\n- self.redis.set('%s_url' % self.url_hash, self.feed_url)\n- log.debug(\"Downloading 'base_url' in configuration, \"\n- \"since redis cache for this url is empty\")\n-\n- self.http_session.headers['Accept'] = 'application/json; charset=\"utf-8\"'\n- resp = self.http_session.get(self.piket_to_url(piket))\n- resp.raise_for_status()\n-\n- feed = json.loads(resp.content)\n-\n- for entry in feed['entries']:\n- item = entry['content']['src']\n- entity_id = item.rsplit('/', 1)[1]\n-\n- updated = entry.get('updated')\n- if updated:\n- dt = dateutil.parser.parse(updated)\n- modified_seconds = int(time.mktime(dt.timetuple()))\n- data = self.fetch_entity(piket, entity_id, modified_seconds)\n- self.push_entity(piket, entity_id, modified_seconds)\n- if data:\n- yield 'application/xml', data\n- else:\n- log.warning(\"The GGM feed featured an entry that had no \"\n- \"'updated' field. Exiting. %s\" % entry)\n- break\n-\n- try:\n- next_url = [link[\"href\"] for link in feed['links'] if link['rel'] == 'next'][0]\n- except IndexError:\n- if 'resume' in [link['rel'] for link in feed['links']]:\n- log.info(\"Done processing! 'resume' was found.\")\n- break\n- else:\n- log.fatal(\"Neither the next piket or 'resume' was found. \"\n- \"Exiting.\")\n- break\n-\n- next_piket = self.url_to_piket(next_url)\n-\n- # Check if the next piket is the same as the last one in redis\n- if last_piket and next_piket != last_piket:\n- log.fatal('The next piket doesn\\'t match the last piket that '\n- 'was cached in redis! Maybe the data has changed. '\n- 'All keys in redis starting with %s need to be '\n- 'deleted to continue. Exiting.' % self.url_hash)\n- break\n-\n- if not last_piket:\n- self.push_piket(next_piket)\n-\n- # Passed the check so disabling it for next iteration\n- last_piket = None\n-\n- if piket == next_piket:\n- log.fatal('The next piket is the same as the current one! '\n- 'Exiting.')\n- break\n-\n- piket = next_piket\n-\n- def download_entity(self, file_path, entity_id):\n- self.http_session.headers[\n- 'Accept'] = 'application/xml; charset=\"utf-8\"'\n- resp = self.http_session.get(\n- '%sEntiteiten/%s' % (\n- self.source_definition['base_url'],\n- entity_id\n- ),\n- timeout=20\n- )\n- resp.raise_for_status()\n- data = resp.content\n- self.write_to_file(file_path, data)\n- return data\n-\n- def fetch_entity(self, piket, entity_id, modified_seconds):\n- file_path = \"%s/%s.xml\" % (self.path, entity_id)\n- try:\n- info = os.lstat(file_path)\n- # Check if the filesize is at least bigger than one byte\n- if info.st_size < 2:\n- raise OSError\n- except OSError:\n- # File does not exist, so let's download\n- return self.download_entity(file_path, entity_id)\n-\n- # File already exists, check if newer\n- if modified_seconds > self.get_entity_last_update(piket, entity_id):\n- return self.download_entity(file_path, entity_id)\n-\n- # Return the local file if force_old_files is enabled\n- elif self.source_definition.get('force_old_files'):\n- f = open(file_path, 'r')\n- data = f.read()\n- f.close()\n- return data\n-\n- @staticmethod\n- def write_to_file(file_path, data):\n- f = open(file_path, \"w\")\n- f.write(data)\n- f.close()\n-\n- @staticmethod\n- def url_to_piket(url):\n- return parse_qs(urlparse(url).query)['piket'][0]\n-\n- def piket_to_url(self, piket):\n- if not piket:\n- return self.feed_url\n-\n- url = urlparse(self.feed_url)\n- qs = parse_qs(url.query)\n- qs['piket'] = [piket]\n-\n- url = list(url)\n- url[4] = urlencode(qs, doseq=True)\n- return urlunparse(url)\n-\n- def get_last_pikets(self):\n- result = self.redis.lrange('%s_piketten' % self.url_hash, -2, -1)\n- try:\n- second_last_piket, last_piket = result\n- return second_last_piket, last_piket\n- except ValueError:\n- try:\n- return '', result[0]\n- except IndexError:\n- return '', ''\n-\n- def get_pikets(self):\n- return self.redis.lrange('%s_piketten' % self.url_hash, 0, -1)\n-\n- def push_piket(self, piket):\n- self.redis.rpush('%s_piketten' % self.url_hash, piket)\n-\n- def push_entity(self, piket, entity_id, modified_seconds):\n- self.redis.hmset('%s:%s' % (self.url_hash, piket),\n- {entity_id: int(modified_seconds)})\n-\n- def get_piket_entities(self, piket):\n- return self.redis.hgetall('%s:%s' % (self.url_hash, piket))\n-\n- def get_entity_last_update(self, piket, entity_id):\n- return self.redis.hget('%s:%s' % (self.url_hash, piket), entity_id)\ndiff --git a/ocd_backend/extractors/go.py b/ocd_backend/extractors/go.py\ndeleted file mode 100644\n--- a/ocd_backend/extractors/go.py\n+++ /dev/null\n@@ -1,197 +0,0 @@\n-import json\n-from time import sleep\n-\n-from lxml import etree\n-\n-from ocd_backend.extractors import BaseExtractor\n-from ocd_backend.log import get_source_logger\n-from ocd_backend.utils.http import HttpRequestMixin\n-\n-log = get_source_logger('extractor')\n-\n-\n-class GemeenteOplossingenBaseExtractor(BaseExtractor, HttpRequestMixin):\n- \"\"\"\n- A base extractor for scraping GemeenteOplossingen websites. This\n- base extractor just configures the base url to use for scraping.\n- \"\"\"\n-\n- def run(self):\n- pass\n-\n- def __init__(self, *args, **kwargs):\n- super(GemeenteOplossingenBaseExtractor, self).__init__(*args, **kwargs)\n-\n- self.base_url = self.source_definition['base_url']\n-\n- def _get_committees(self):\n- \"\"\"\n- Gets a list of committees, along with links to upcoming and archived\n- meetings.\n- \"\"\"\n-\n- committees = []\n-\n- resp = self.http_session.get(u'%s/Vergaderingen' % (self.base_url,))\n-\n- if resp.status_code != 200:\n- return committees\n-\n- html = etree.HTML(resp.content)\n- for c in html.xpath('//div[@class=\"orgaan block\"]'):\n- committee = {\n- 'name': u''.join(c.xpath('.//h3/text()')),\n- 'upcoming': u'%s%s' % (\n- self.base_url,\n- u''.join(c.xpath('.//li[@class=\"next\"]/a/@href')),\n- ),\n- 'archive': u'%s%s' % (\n- self.base_url,\n- u''.join(c.xpath('.//li[@class=\"prev\"]/a/@href')),\n- )\n- }\n- committees.append(committee)\n-\n- return committees\n-\n-\n-class GemeenteOplossingenCommitteesExtractor(GemeenteOplossingenBaseExtractor):\n- \"\"\"\n- Extracts the committees from GemeenteOplossingenWebsites. This is done\n- by scraping the meetings page and taking the meeting types which\n- have 'commissies' in the title.\n- \"\"\"\n-\n- def run(self):\n- committees = self._get_committees()\n- # log.debug(committees)\n-\n- for c in self._get_committees():\n- yield 'application/json', json.dumps(c)\n-\n-\n-class GemeenteOplossingenMeetingsExtractor(GemeenteOplossingenBaseExtractor):\n- def _get_upcoming_meetings(self, upcoming_url):\n- \"\"\"\n- Gets a list of upcoming meetings from the URL specified.\n- \"\"\"\n-\n- resp = self.http_session.get(upcoming_url)\n-\n- if resp.status_code != 200:\n- return []\n-\n- html = etree.HTML(resp.content)\n-\n- committee = u''.join(html.xpath('//h1/text()')).replace(\n- u'Komende vergaderingen ', u'').strip()\n-\n- return [{\n- 'committee': committee,\n- 'title': u''.join(a.xpath(\n- './/span[@class=\"komendevergadering_title\"]/text()')),\n- 'time': u''.join(a.xpath(\n- './/span[@class=\"komendevergadering_aanvang\"]/text()')),\n- 'url': u'%s%s' % (\n- self.base_url,\n- u''.join(a.xpath(\n- './/span[@class=\"komendevergadering_agenda\"]/a/@href')),)\n- } for a in html.xpath(\n- '//div[@class=\"komendevergadering overview list_arrow\"]')]\n-\n- def _get_archived_meetings(self, archive_url):\n- \"\"\"\n- Gets a list of archived meetings from the URL specified.\n- \"\"\"\n-\n- resp = self.http_session.get(archive_url)\n-\n- if resp.status_code != 200:\n- return []\n-\n- html = etree.HTML(resp.content)\n-\n- committee = u''.join(html.xpath('//div[@id=\"breadcrumb\"]/strong//text()')).strip()\n-\n- return [{\n- 'committee': committee,\n- 'title': u''.join(a.xpath('.//text()')),\n- 'url': u'%s%s' % (self.base_url, u''.join(a.xpath('.//@href')),)\n- } for a in html.xpath('//ul[@id=\"vergaderingen\"]//li/a')]\n-\n- def _get_pages(self):\n- if self.source_definition.get('upcoming', True):\n- field = 'upcoming'\n- else:\n- field = 'archive'\n- return [c[field] for c in self._get_committees()] # for now ...\n-\n- def filter_meeting(self, meeting, html):\n- \"\"\"\n- Should return true if the meeting is to be yielded.\n- \"\"\"\n-\n- return True\n-\n- def run(self):\n- pages = self._get_pages()\n-\n- for page in pages:\n- if self.source_definition.get('upcoming', True):\n- meetings = self._get_upcoming_meetings(page)\n- else:\n- meetings = self._get_archived_meetings(page)\n-\n- for meeting in meetings:\n- sleep(1)\n-\n- resp = self.http_session.get(meeting['url'])\n- if resp.status_code != 200:\n- continue\n-\n- html = etree.HTML(resp.content)\n-\n- if not self.filter_meeting(meeting, html):\n- continue\n-\n- # this is a bit ugly, but saves us from having to scrape\n- # all the meeting pages twice ...\n-\n- meeting_obj = {\n- 'type': 'meeting',\n- 'content': etree.tostring(html),\n- 'full_content': resp.content,\n- }\n-\n- yield 'application/json', json.dumps(meeting_obj)\n-\n- if not self.source_definition.get('extract_meeting_items', False):\n- continue\n-\n- for meeting_item_html in html.xpath(\n- '//li[contains(@class, \"agendaRow\")]'):\n- meeting_item_obj = {\n- 'type': 'meeting-item',\n- 'content': etree.tostring(meeting_item_html),\n- 'full_content': resp.content,\n- }\n-\n- yield 'application/json', json.dumps(meeting_item_obj)\n-\n-\n-class GemeenteOplossingenResolutionsExtractor(GemeenteOplossingenMeetingsExtractor):\n- def filter_meeting(self, meeting, html):\n- for item in html.xpath('//div[@id=\"documenten\"]//li'):\n- anchor = u''.join(item.xpath('.//a//text()'))\n- if u'Besluitenlijst' in anchor:\n- return True\n- return False\n-\n-\n-class GemeenteOplossingenReportsExtractor(GemeenteOplossingenMeetingsExtractor):\n- def filter_meeting(self, meeting, html):\n- # for item in html.xpath('//div[@id=\"downloaden\"]//li'):\n- # anchor = u''.join(item.xpath('.//a//@href'))\n- # if u'/mp3' in anchor:\n- # return True\n- return True\ndiff --git a/ocd_backend/extractors/goapi.py b/ocd_backend/extractors/goapi.py\n--- a/ocd_backend/extractors/goapi.py\n+++ b/ocd_backend/extractors/goapi.py\n@@ -1,7 +1,6 @@\n import json\n from datetime import datetime\n from urlparse import urljoin\n-from pprint import pprint\n \n from ocd_backend.extractors import BaseExtractor\n from ocd_backend.log import get_source_logger\n@@ -27,7 +26,7 @@ def __init__(self, *args, **kwargs):\n self.source_definition['base_url'], self.api_version,)\n \n def _request(self, path):\n- log.info('Now retrieving: %s' % (urljoin(self.base_url, path),))\n+ log.debug('Now retrieving: %s' % (urljoin(self.base_url, path),))\n resp = self.http_session.get(\n urljoin(self.base_url, path), verify=False)\n \n@@ -56,13 +55,16 @@ class GemeenteOplossingenCommitteesExtractor(GemeenteOplossingenBaseExtractor):\n \"\"\"\n \n def run(self):\n- committee_count = 1\n+ committee_count = 0\n total, static_json = self._request('dmus')\n for dmu in static_json:\n- yield 'application/json', json.dumps(dmu)\n+ yield 'application/json', \\\n+ json.dumps(dmu), \\\n+ urljoin(self.base_url, 'dmus'), \\\n+ dmu\n committee_count += 1\n \n- log.info(\"Extracted total of %d committees.\" % committee_count)\n+ log.info(\"[%s] Extracted total of %d GO API committees.\" % (self.source_definition['sitename'], committee_count))\n \n \n class GemeenteOplossingenMeetingsExtractor(GemeenteOplossingenBaseExtractor):\n@@ -73,18 +75,28 @@ class GemeenteOplossingenMeetingsExtractor(GemeenteOplossingenBaseExtractor):\n def run(self):\n meeting_count = 0\n for start_date, end_date in self.interval_generator():\n- url = 'meetings?date_from=%i&date_to=%i' % (\n- (start_date - datetime(1970, 1, 1)).total_seconds(),\n- (end_date - datetime(1970, 1, 1)).total_seconds()\n+ # v2 requires dates in YYYY-MM-DD format, instead of a unix timestamp\n+ if self.source_definition.get('api_version') == 'v2':\n+ url = 'meetings?date_from=%s&date_to=%s' % (\n+ start_date.strftime('%Y-%m-%d'),\n+ end_date.strftime('%Y-%m-%d')\n )\n+ else:\n+ url = 'meetings?date_from=%i&date_to=%i' % (\n+ (start_date - datetime(1970, 1, 1)).total_seconds(),\n+ (end_date - datetime(1970, 1, 1)).total_seconds()\n+ )\n total, static_json = self._request(url)\n \n for meeting in static_json:\n- yield 'application/json', json.dumps(meeting)\n+ yield 'application/json', \\\n+ json.dumps(meeting), \\\n+ urljoin(self.base_url, url), \\\n+ meeting\n meeting_count += 1\n \n- log.info(\"Now processing meetings from %s to %s\" % (start_date, end_date,))\n- log.info(\"Extracted total of %d meetings.\" % meeting_count)\n+ log.debug(\"[%s] Now processing meetings from %s to %s\" % (self.source_definition['sitename'], start_date, end_date,))\n+ log.info(\"[%s] Extracted total of %d GO API meetings.\" % (self.source_definition['sitename'], meeting_count))\n \n \n # class GemeenteOplossingenMeetingItemsExtractor(GemeenteOplossingenBaseExtractor):\n@@ -132,8 +144,13 @@ def run(self):\n end_date.isoformat()))\n \n for doc in docs:\n- yield 'application/json', json.dumps(doc)\n+ api_version = self.source_definition.get('api_version', 'v1')\n+ base_url = '%s/%s' % (self.source_definition['base_url'], api_version,)\n+ yield 'application/json', \\\n+ json.dumps(doc), \\\n+ u'%s/documents/%i' % (base_url, doc[u'id'],), \\\n+ doc\n meeting_count += 1\n \n- log.info(\"Now processing documents from %s to %s\" % (start_date, end_date,))\n- log.info(\"Extracted total of %d documents.\" % meeting_count)\n+ log.debug(\"[%s] Now processing documents from %s to %s\" % (self.source_definition['sitename'], start_date, end_date,))\n+ log.info(\"[%s] Extracted total of %d GO API documents.\" % (self.source_definition['sitename'], meeting_count))\ndiff --git a/ocd_backend/extractors/gv.py b/ocd_backend/extractors/gv.py\n--- a/ocd_backend/extractors/gv.py\n+++ b/ocd_backend/extractors/gv.py\n@@ -1,13 +1,10 @@\n import json\n from time import sleep\n \n-from requests.exceptions import HTTPError, RetryError\n-from urllib3.exceptions import MaxRetryError\n-\n-from ocd_backend.exceptions import ItemAlreadyProcessed, ConfigurationError\n+from ocd_backend.exceptions import ConfigurationError\n from ocd_backend.extractors import BaseExtractor\n from ocd_backend.log import get_source_logger\n-from ocd_backend.utils.http import GCSCachingMixin, HttpRequestMixin\n+from ocd_backend.utils.http import HttpRequestMixin\n \n log = get_source_logger('extractor')\n \n@@ -85,13 +82,12 @@ def run(self):\n \n for k, v in result.get(u'SETS', {}).iteritems():\n v[u'parent_objectid'] = result[u'default'][u'objectid']\n- v[u'bis_vergaderdatum'] = result[\n- u'default'][u'bis_vergaderdatum']\n+ v[u'bis_vergaderdatum'] = result[u'default'][u'bis_vergaderdatum']\n \n result2 = {u'default': v}\n- yield 'application/json', json.dumps(result2)\n+ yield 'application/json', json.dumps(result2), result2['default']['objectid'], result2\n \n- yield 'application/json', json.dumps(result)\n+ yield 'application/json', json.dumps(result), result['default']['objectid'], result\n \n params['start'] += len(results['objects'])\n fetch_next_page = (len(results['objects']) > 0)\n@@ -128,7 +124,13 @@ def run(self):\n if self.start_date is None:\n self.start_date = cur_start\n self.end_date = cur_end\n- log.info(\"%s: Now processing meetings from %s to %s\" % (\n- self.source_definition['key'], self.start_date, self.end_date,))\n+ log.debug(\"[%s] Now processing meetings from %s to %s\" % (\n+ self.source_definition['sitename'], self.start_date, self.end_date,))\n+\n+ total_meetings = 0\n for item in super(GreenValleyMeetingsExtractor, self).run():\n- yield item\n+ yield item[0], item[1], item[2], item[3]\n+ total_meetings += 1\n+\n+ log.info(\"[%s] Extracting total of %d GreenValley meetings\" % (\n+ self.source_definition['sitename'], total_meetings))\ndiff --git a/ocd_backend/extractors/ibabs.py b/ocd_backend/extractors/ibabs.py\n--- a/ocd_backend/extractors/ibabs.py\n+++ b/ocd_backend/extractors/ibabs.py\n@@ -1,7 +1,9 @@\n import json\n import re\n \n-from suds.client import Client # pylint: disable=import-error\n+from zeep.client import Client, Settings\n+from zeep.helpers import serialize_object\n+from zeep.exceptions import Error, TransportError\n \n from ocd_backend import settings\n from ocd_backend.extractors import BaseExtractor\n@@ -9,10 +11,8 @@\n from ocd_backend.utils.api import FrontendAPIMixin\n from ocd_backend.utils.http import HttpRequestMixin\n from ocd_backend.utils.ibabs import (\n- meeting_to_dict, meeting_item_to_dict,\n- meeting_type_to_dict, list_report_response_to_dict,\n- list_entry_response_to_dict, votes_to_dict, person_profile_to_dict)\n-from ocd_backend.utils.misc import full_normalized_motion_id\n+ meeting_to_dict, list_report_response_to_dict,\n+ list_entry_response_to_dict, votes_to_dict)\n \n log = get_source_logger('extractor')\n \n@@ -33,9 +33,15 @@ def __init__(self, *args, **kwargs):\n ibabs_wsdl = self.source_definition['wsdl']\n except Exception as e:\n ibabs_wsdl = settings.IBABS_WSDL\n- # log.debug(ibabs_wsdl)\n- self.client = Client(ibabs_wsdl)\n- self.client.set_options(port='BasicHttpsBinding_IPublic')\n+\n+ soap_settings = Settings(extra_http_headers={'User-Agent': settings.USER_AGENT})\n+\n+ try:\n+ self.client = Client(ibabs_wsdl,\n+ port_name='BasicHttpsBinding_IPublic',\n+ settings=soap_settings)\n+ except Error as e:\n+ log.error('Unable to instantiate iBabs client: ' + str(e))\n \n \n class IBabsCommitteesExtractor(IBabsBaseExtractor):\n@@ -48,18 +54,25 @@ class IBabsCommitteesExtractor(IBabsBaseExtractor):\n def run(self):\n committee_designator = self.source_definition.get(\n 'committee_designator', 'commissie')\n- log.info(\"Getting committees with designator: %s\" % (\n- committee_designator,))\n+ log.debug(\"[%s] Getting ibabs committees with designator: %s\" %\n+ (self.source_definition['index_name'], committee_designator))\n meeting_types = self.client.service.GetMeetingtypes(\n self.source_definition['sitename']\n )\n \n if meeting_types.Meetingtypes:\n- for mt in meeting_types.Meetingtypes[0]:\n+ total_count = 0\n+ for mt in meeting_types.Meetingtypes['iBabsMeetingtype']:\n if committee_designator in mt.Meetingtype.lower():\n- yield 'application/json', json.dumps(meeting_type_to_dict(mt))\n+ committee = serialize_object(mt, dict)\n+ yield 'application/json', \\\n+ json.dumps(committee), \\\n+ committee['Id'], \\\n+ 'used_file_placeholder'\n+ total_count += 1\n+ log.info(\"[%s] Extracted total of %d ibabs committees.\" % (self.source_definition['index_name'], total_count))\n else:\n- log.warning('SOAP service error for %s: %s' % (self.source_definition['index_name'], meeting_types.Message))\n+ log.warning('[%s] SOAP service error: %s' % (self.source_definition['index_name'], meeting_types.Message))\n \n \n class IBabsMeetingsExtractor(IBabsBaseExtractor):\n@@ -72,7 +85,7 @@ def _meetingtypes_as_dict(self):\n meeting_types = self.client.service.GetMeetingtypes(self.source_definition['sitename'])\n \n if meeting_types.Meetingtypes:\n- for o in meeting_types.Meetingtypes[0]:\n+ for o in meeting_types.Meetingtypes['iBabsMeetingtype']:\n include_regex = self.source_definition.get('include', None)\n if include_regex and not re.search(include_regex, o.Description):\n continue\n@@ -84,14 +97,13 @@ def _meetingtypes_as_dict(self):\n meeting_types[o.Id] = o.Description\n return meeting_types\n else:\n- log.warning('SOAP service error for %s: %s' % (self.source_definition['index_name'], meeting_types.Message))\n+ log.warning('[%s] SOAP service error: %s' % (self.source_definition['index_name'], meeting_types.Message))\n \n def run(self):\n meeting_count = 0\n meetings_skipped = 0\n- meeting_item_count = 0\n for start_date, end_date in self.interval_generator():\n- log.info(\"%s: Now processing meetings from %s to %s\" % (\n+ log.debug(\"[%s] Now processing meetings from %s to %s\" % (\n self.source_definition['sitename'], start_date, end_date,))\n \n meetings = self.client.service.GetMeetingsByDateRange(\n@@ -103,33 +115,165 @@ def run(self):\n meeting_types = self._meetingtypes_as_dict()\n \n if meetings.Meetings:\n- for meeting in meetings.Meetings[0]:\n- meeting_dict = meeting_to_dict(meeting)\n+ for meeting in meetings.Meetings['iBabsMeeting']:\n \n- # sometimes a meetingtype is actually a meeting for some\n+ meeting_dict = serialize_object(meeting, dict)\n+ # Convert unserializable keys to text\n+ meeting_dict['PublishDate'] = meeting_dict['PublishDate'].isoformat()\n+ meeting_dict['MeetingDate'] = meeting_dict['MeetingDate'].isoformat()\n+\n+ # sometimes a meeting type is not actually a meeting for some\n # reason. Let's ignore these for now\n if meeting_dict['MeetingtypeId'] not in meeting_types:\n meetings_skipped += 1\n continue\n \n- meeting_dict['Meetingtype'] = meeting_types[\n- meeting_dict['MeetingtypeId']]\n- yield 'application/json', json.dumps(meeting_dict)\n-\n- if meeting.MeetingItems is not None:\n- for meeting_item in meeting.MeetingItems[0]:\n- meeting_item_dict = meeting_item_to_dict(\n- meeting_item)\n- # This is a bit hacky, but we need to know this\n- meeting_item_dict['MeetingId'] = meeting_dict['Id']\n- meeting_item_dict['Meeting'] = meeting_dict\n- yield 'application/json', json.dumps(\n- meeting_item_dict)\n- meeting_item_count += 1\n+ meeting_dict['Meetingtype'] = meeting_types[meeting_dict['MeetingtypeId']]\n+ yield 'application/json', \\\n+ json.dumps(meeting_dict), \\\n+ meeting_dict['Id'], \\\n+ meeting_dict\n+\n meeting_count += 1\n \n- log.info(\"Extracted total of %d meetings and %d meeting items. Also \"\n- \"Skipped %d meetings in total.\" % (meeting_count, meeting_item_count, meetings_skipped,))\n+ log.info(\"[%s] Extracted total of %d ibabs meetings. Also skipped %d meetings in total.\" %\n+ (self.source_definition['sitename'], meeting_count, meetings_skipped,))\n+\n+\n+class IBabsReportsExtractor(IBabsBaseExtractor):\n+ \"\"\"\n+ Extracts reports from the iBabs SOAP Service. The source definition should\n+ state which kind of reports should be extracted.\n+ \"\"\"\n+\n+ def run(self):\n+ lists = self.client.service.GetLists(Sitename=self.source_definition['sitename'])\n+\n+ if len(lists) < 1:\n+ log.info(\"[%s] No ibabs reports defined\" % (self.source_definition['sitename'],))\n+ return\n+\n+ selected_lists = []\n+ for l in lists:\n+ include_regex = self.source_definition.get('include', None) or self.source_definition['regex']\n+ if not re.search(include_regex, l.Value.lower()):\n+ continue\n+ exclude_regex = self.source_definition.get('exclude', None) or r'^$'\n+ if re.search(exclude_regex, l.Value.lower()):\n+ continue\n+ selected_lists.append(l)\n+\n+ total_yield_count = 0\n+ for l in selected_lists:\n+ reports = self.client.service.GetListReports(Sitename=self.source_definition['sitename'], ListId=l.Key)\n+ report = reports[0]\n+ if len(reports) > 1:\n+ try:\n+ report = [\n+ r for r in reports if r.Value == l.Value][0]\n+ except IndexError as e:\n+ pass\n+\n+ active_page_nr = 0\n+ max_pages = self.source_definition.get('max_pages', 1)\n+ per_page = self.source_definition.get('per_page', 100)\n+ result_count = per_page\n+ total_count = 0\n+ yield_count = 0\n+ while (active_page_nr < max_pages) and (result_count == per_page):\n+ try:\n+ result = self.client.service.GetListReport(\n+ Sitename=self.source_definition['sitename'],\n+ ListId=l.Key, ReportId=report.Key,\n+ ActivePageNr=active_page_nr, RecordsPerPage=per_page)\n+ except Exception as e: # most likely an XML parse problem\n+ log.warning(\"[%s] Could not parse page %s correctly!: %s\" % (\n+ self.source_definition['sitename'], active_page_nr, e.message))\n+ result = None\n+ result_count = 0\n+ # log.debug(\"* %s: %s/%s - %d/%d\" % (\n+ # self.source_definition['sitename'],\n+ # result.ListName, result.ReportName,\n+ # active_page_nr, max_pages,))\n+\n+ if result is not None:\n+ try:\n+ document_element = result.Data.diffgram[0].DocumentElement[0]\n+ except AttributeError as e:\n+ document_element = None\n+ except IndexError as e:\n+ document_element = None\n+ else:\n+ document_element = None\n+\n+ if document_element is None:\n+ log.debug(\"[%s] No correct document element for this page!\" % self.source_definition['sitename'])\n+ total_count += per_page\n+ continue\n+\n+ for item in document_element.results:\n+ dict_item = list_report_response_to_dict(item)\n+ dict_item['_ListName'] = result.ListName\n+ dict_item['_ReportName'] = result.ReportName\n+ extra_info_item = self.client.service.GetListEntry(\n+ Sitename=self.source_definition['sitename'],\n+ ListId=l.Key, EntryId=dict_item['id'][0])\n+ dict_item['_Extra'] = list_entry_response_to_dict(\n+ extra_info_item)\n+ # if dict_item['kenmerk'][0].startswith('2018 M67'):\n+ # log.debug(dict_item)\n+ # try:\n+ # # this should be the motion's unique identifier\n+ # log.debug(full_normalized_motion_id(\n+ # dict_item['_Extra']['Values'][u'Onderwerp']))\n+ # except (KeyError, AttributeError) as e:\n+ # pass\n+ yield 'application/json', json.dumps(dict_item), dict_item['id'][0], dict_item\n+ yield_count += 1\n+ total_yield_count += 1\n+ result_count += 1\n+ total_count += result_count\n+ active_page_nr += 1\n+ log.debug(\"[%s] Report: %s -- total %s, results %s, yielded %s\" % (\n+ self.source_definition['sitename'], l.Value, total_count, result_count, yield_count))\n+\n+ log.info(\"[%s] Extracted total of %s ibabs reports yielded\" % (\n+ self.source_definition['sitename'], total_yield_count))\n+\n+\n+class IbabsPersonsExtractor(IBabsBaseExtractor):\n+ \"\"\"\n+ Extracts person profiles from the iBabs SOAP service.\n+ \"\"\"\n+\n+ def run(self):\n+ users = self.client.service.GetUsers(\n+ self.source_definition['sitename']\n+ )\n+\n+ if users.Users:\n+ total_count = 0\n+ for user in users.Users['iBabsUserBasic']:\n+ identifier = user['UniqueId']\n+\n+ user_details = self.client.service.GetUser(\n+ self.source_definition['sitename'],\n+ identifier\n+ )\n+\n+ profile = serialize_object(user_details.User.PublicProfile, dict)\n+ # Picture can't be JSON-encoded so we delete it\n+ del profile['Picture']\n+\n+ yield 'application/json', json.dumps(profile), profile['UserId'], profile\n+ total_count += 1\n+\n+ log.info(\"[%s] Extracted total of %s ibabs persons\" % (self.source_definition['index_name'], total_count))\n+\n+ elif users.Message == 'No users found!':\n+ log.info('[%s] No ibabs persons were found' % self.source_definition['index_name'])\n+ else:\n+ log.warning('[%s] SOAP service error: %s' % (self.source_definition['index_name'], users.Message))\n \n \n class IBabsVotesMeetingsExtractor(IBabsBaseExtractor):\n@@ -167,6 +311,11 @@ def run(self):\n dates = [x for x in self.interval_generator()]\n if self.source_definition.get('reverse_chronological', False):\n dates.reverse()\n+\n+ meeting_count = 0\n+ vote_count = 0\n+ passed_vote_count = 0\n+\n for start_date, end_date in dates:\n meetings = self.client.service.GetMeetingsByDateRange(\n Sitename=self.source_definition['sitename'],\n@@ -175,10 +324,6 @@ def run(self):\n MetaDataOnly=False)\n \n meeting_types = self._meetingtypes_as_dict()\n-\n- meeting_count = 0\n- vote_count = 0\n-\n meeting_sorting_key = self.source_definition.get(\n 'meeting_sorting', 'MeetingDate')\n \n@@ -193,23 +338,23 @@ def run(self):\n for meeting in sorted_meetings:\n meeting_dict = meeting_to_dict(meeting)\n # Getting the meeting type as a string is easier this way ...\n- log.debug(meeting_dict['Id'])\n+ # log.debug(meeting_dict['Id'])\n meeting_dict['Meetingtype'] = meeting_types[\n meeting_dict['MeetingtypeId']]\n \n- kv = self.client.factory.create('ns0:iBabsKeyValue')\n+ kv = self.client.factory.create('ns0:iBabsKeyValue') # pylint: disable=no-member\n kv.Key = 'IncludeMeetingItems'\n kv.Value = True\n \n- kv2 = self.client.factory.create('ns0:iBabsKeyValue')\n+ kv2 = self.client.factory.create('ns0:iBabsKeyValue') # pylint: disable=no-member\n kv2.Key = 'IncludeListEntries'\n kv2.Value = True\n \n- kv3 = self.client.factory.create('ns0:iBabsKeyValue')\n+ kv3 = self.client.factory.create('ns0:iBabsKeyValue') # pylint: disable=no-member\n kv3.Key = 'IncludeMeetingItems'\n kv3.Value = True\n \n- params = self.client.factory.create('ns0:ArrayOfiBabsKeyValue')\n+ params = self.client.factory.create('ns0:ArrayOfiBabsKeyValue') # pylint: disable=no-member\n params.iBabsKeyValue.append(kv)\n params.iBabsKeyValue.append(kv2)\n params.iBabsKeyValue.append(kv3)\n@@ -219,7 +364,7 @@ def run(self):\n MeetingId=meeting_dict['Id'],\n Options=params)\n meeting_dict_short = meeting_to_dict(vote_meeting.Meeting)\n- log.debug(meeting_dict_short['MeetingDate'])\n+ # log.debug(meeting_dict_short['MeetingDate'])\n if meeting_dict_short['MeetingItems'] is None:\n continue\n for mi in meeting_dict_short['MeetingItems']:\n@@ -245,13 +390,13 @@ def run(self):\n meeting_count += 1\n \n # log.debug(processed)\n- passed_vote_count = 0\n for result in processed:\n- yield 'application/json', json.dumps(result)\n+ yield 'application/json', json.dumps(result), 'entity_placeholder', result\n passed_vote_count += 1\n- log.info(\"Now processing meetings from %s to %s\" % (start_date, end_date,))\n- log.info(\"Extracted %d meetings and passed %s out of %d voting rounds.\" % (\n- meeting_count, passed_vote_count, vote_count,))\n+ log.debug(\"[%s] Now processing meetings from %s to %s\" % (self.source_definition['index_name'], start_date, end_date,))\n+\n+ log.info(\"[%s] Extracted total of %d ibabs meetings and passed %s out of %d voting rounds.\" % (\n+ self.source_definition['index_name'], meeting_count, passed_vote_count, vote_count,))\n \n \n class IBabsMostRecentCompleteCouncilExtractor(IBabsVotesMeetingsExtractor, HttpRequestMixin, FrontendAPIMixin):\n@@ -277,7 +422,7 @@ def process_meeting(self, meeting):\n result = None\n if (max_meetings <= 0) or (meeting_count < max_meetings):\n setattr(self, 'meeting_count', meeting_count + 1)\n- log.debug(\"Processing meeting %d\" % (meeting_count,))\n+ log.debug(\"[%s] Processing meeting %d\" % (self.source_definition['sitename'], meeting_count,))\n council = self.api_request(\n self.source_definition['index_name'], 'organizations',\n classification=u'Council')\n@@ -376,131 +521,3 @@ def process_meeting(self, meeting):\n return result\n else:\n return []\n-\n-\n-class IBabsReportsExtractor(IBabsBaseExtractor):\n- \"\"\"\n- Extracts reports from the iBabs SOAP Service. The source definition should\n- state which kind of reports should be extracted.\n- \"\"\"\n-\n- def run(self):\n- lists = self.client.service.GetLists(\n- Sitename=self.source_definition['sitename'])\n-\n- try:\n- kv = lists.iBabsKeyValue\n- except AttributeError as e:\n- log.info(\"No reports defined for %s\" % (\n- self.source_definition['sitename'],))\n- return\n-\n- selected_lists = []\n- for l in lists.iBabsKeyValue:\n- include_regex = self.source_definition.get('include', None) or self.source_definition['regex']\n- if not re.search(include_regex, l.Value.lower()):\n- continue\n- exclude_regex = self.source_definition.get('exclude', None) or r'^$'\n- if re.search(exclude_regex, l.Value.lower()):\n- continue\n- selected_lists.append(l)\n-\n- for l in selected_lists:\n- reports = self.client.service.GetListReports(\n- Sitename=self.source_definition['sitename'], ListId=l.Key)\n- report = reports.iBabsKeyValue[0]\n- if len(reports.iBabsKeyValue) > 1:\n- try:\n- report = [\n- r for r in reports.iBabsKeyValue if r.Value == l.Value][0]\n- except IndexError as e:\n- pass\n-\n- active_page_nr = 0\n- max_pages = self.source_definition.get('max_pages', 1)\n- per_page = self.source_definition.get('per_page', 100)\n- result_count = per_page\n- total_count = 0\n- yield_count = 0\n- while (active_page_nr < max_pages) and (result_count == per_page):\n- try:\n- result = self.client.service.GetListReport(\n- Sitename=self.source_definition['sitename'],\n- ListId=l.Key, ReportId=report.Key,\n- ActivePageNr=active_page_nr, RecordsPerPage=per_page)\n- except Exception as e: # most likely an XML parse problem\n- log.warning(\"Could not parse page %s correctly!: %s\" % (\n- active_page_nr, e.message))\n- result = None\n- result_count = 0\n- # log.debug(\"* %s: %s/%s - %d/%d\" % (\n- # self.source_definition['sitename'],\n- # result.ListName, result.ReportName,\n- # active_page_nr, max_pages,))\n-\n- if result is not None:\n- try:\n- document_element = result.Data.diffgram[0].DocumentElement[0]\n- except AttributeError as e:\n- document_element = None\n- except IndexError as e:\n- document_element = None\n- else:\n- document_element = None\n-\n- if document_element is None:\n- log.debug(\"No correct document element for this page!\")\n- total_count += per_page\n- continue\n-\n- for item in document_element.results:\n- dict_item = list_report_response_to_dict(item)\n- dict_item['_ListName'] = result.ListName\n- dict_item['_ReportName'] = result.ReportName\n- extra_info_item = self.client.service.GetListEntry(\n- Sitename=self.source_definition['sitename'],\n- ListId=l.Key, EntryId=dict_item['id'][0])\n- dict_item['_Extra'] = list_entry_response_to_dict(\n- extra_info_item)\n- # if dict_item['kenmerk'][0].startswith('2018 M67'):\n- # log.debug(dict_item)\n- try:\n- # this should be the motion's unique identifier\n- log.debug(full_normalized_motion_id(\n- dict_item['_Extra']['Values'][u'Onderwerp']))\n- except (KeyError, AttributeError) as e:\n- pass\n- yield 'application/json', json.dumps(dict_item)\n- yield_count += 1\n- result_count += 1\n- total_count += result_count\n- active_page_nr += 1\n- log.info(\"%s -- total: %s, results %s, yielded %s\" % (l.Value, total_count, result_count, yield_count,))\n-\n-\n-class IbabsPersonsExtractor(IBabsBaseExtractor):\n- \"\"\"\n- Extracts person profiles from the iBabs SOAP service.\n- \"\"\"\n-\n- def run(self):\n- users = self.client.service.GetUsers(\n- self.source_definition['sitename']\n- )\n-\n- if users.Users:\n- for user in users.Users[0]:\n- identifier = user['UniqueId']\n-\n- user_details = self.client.service.GetUser(\n- self.source_definition['sitename'],\n- identifier\n- )\n-\n- profile = person_profile_to_dict(user_details.User.PublicProfile)\n- yield 'application/json', json.dumps(profile)\n-\n- elif users.Message == 'No users found!':\n- log.info('No ibabs users were found for %s' % self.source_definition['index_name'])\n- else:\n- log.warning('SOAP service error for %s: %s' % (self.source_definition['index_name'], users.Message))\ndiff --git a/ocd_backend/extractors/notubiz.py b/ocd_backend/extractors/notubiz.py\n--- a/ocd_backend/extractors/notubiz.py\n+++ b/ocd_backend/extractors/notubiz.py\n@@ -35,7 +35,7 @@ def __init__(self, *args, **kwargs):\n try:\n response.raise_for_status()\n except HTTPError, e:\n- log.warning('%s: %s' % (e, response.request.url))\n+ log.warning('[%s] %s: %s' % (self.source_definition['sitename'], e, response.request.url))\n return\n \n # Create a dictionary of Notubiz organizations. Some child classes need information\n@@ -59,16 +59,23 @@ class NotubizCommitteesExtractor(NotubizBaseExtractor):\n def run(self):\n response = self.http_session.get(\n \"%s/organisations/%s/gremia\"\n- \"?format=json&version=1.10.8\" % (self.base_url, self.source_definition['organisation_id'])\n+ \"?format=json&version=1.10.8\" % (self.base_url, self.source_definition['notubiz_organization_id'])\n )\n response.raise_for_status()\n \n committee_count = 0\n for committee in json.loads(response.content)['gremia']:\n- yield 'application/json', json.dumps(committee)\n+ entity = '%s/organisations/%s/gremia/%s?format=json&version=1.10.8' % (\n+ self.base_url,\n+ self.source_definition['notubiz_organization_id'],\n+ committee['id'])\n+ yield 'application/json', \\\n+ json.dumps(committee), \\\n+ entity, \\\n+ committee\n committee_count += 1\n \n- log.info(\"Extracted total of %d committees.\" % committee_count)\n+ log.info(\"[%s] Extracted total of %d notubiz committees.\" % (self.source_definition['sitename'], committee_count))\n \n \n class NotubizMeetingsExtractor(NotubizBaseExtractor):\n@@ -82,7 +89,7 @@ def run(self):\n \n start_date, end_date = self.date_interval()\n \n- log.info(\"Now processing first page meeting(items) from %s to %s\" % (\n+ log.debug(\"Now processing first page meeting(items) from %s to %s\" % (\n start_date, end_date,))\n \n page = 1\n@@ -93,20 +100,20 @@ def run(self):\n \"&format=json&version=1.10.8&page=%i\" %\n (\n self.base_url,\n- self.source_definition['organisation_id'],\n+ self.source_definition['notubiz_organization_id'],\n start_date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n end_date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n page\n )\n )\n except (HTTPError, RetryError), e:\n- log.warning('%s: %s' % (e, response.request.url))\n+ log.warning('[%s] %s: %s' % (self.source_definition['sitename'], e, response.request.url))\n break\n \n try:\n response.raise_for_status()\n except HTTPError, e:\n- log.warning('%s: %s' % (e, response.request.url))\n+ log.warning('[%s] %s: %s' % (self.source_definition['sitename'], e, response.request.url))\n break\n \n event_json = response.json()\n@@ -115,31 +122,31 @@ def run(self):\n break\n \n if page > 1:\n- log.debug(\"Processing page %i\" % page)\n+ log.debug(\"[%s] Processing events page %i\" % (self.source_definition['sitename'], page))\n \n for item in event_json[self.source_definition['doc_type']]:\n+ # Skip meetings that are not public\n+ if item['permission_group'] != 'public':\n+ meetings_skipped += 1\n+ continue\n+\n try:\n- data = self.fetch_data(\n- \"%s/events/meetings/%i?format=json&version=1.10.8\" %\n- (\n- self.base_url,\n- item['id']\n- ),\n- \"events/meetings/%i\" % item['id'],\n- item['last_modified'],\n- )\n+ meeting_url = \"%s/events/meetings/%i?format=json&version=1.10.8\" % (self.base_url, item['id'])\n+ data = self.fetch_data(meeting_url,\n+ \"events/meetings/%i\" % item['id'],\n+ item['last_modified'])\n meeting_json = json.loads(data)['meeting']\n except ItemAlreadyProcessed, e:\n # This should no longer be triggered after the change to GCS caching\n meetings_skipped += 1\n- log.info(e)\n+ log.debug(\"[%s] %s\" % (self.source_definition['sitename'], e))\n continue\n except Exception as e:\n meetings_skipped += 1\n- log.warning('%s: %s' % (e, response.request.url))\n+ log.warning('[%s] %s: %s' % (self.source_definition['sitename'], e, response.request.url))\n continue\n \n- organization = self.organizations[self.source_definition['organisation_id']]\n+ organization = self.organizations[self.source_definition['notubiz_organization_id']]\n \n attributes = {}\n for meeting in meeting_json['attributes']:\n@@ -149,17 +156,20 @@ def run(self):\n pass\n meeting_json['attributes'] = attributes\n \n- yield 'application/json', json.dumps(meeting_json)\n+ yield 'application/json', \\\n+ json.dumps(meeting_json), \\\n+ meeting_url, \\\n+ meeting_json\n meeting_count += 1\n \n page += 1\n \n if not event_json['pagination']['has_more_pages']:\n- log.info(\"Done processing all pages!\")\n+ log.debug(\"[%s] Done processing all %d pages!\" % (self.source_definition['sitename'], page))\n break\n \n- log.info(\"Extracted total of %d meeting(items). Also skipped %d \"\n- \"meetings in total.\" % (meeting_count, meetings_skipped,))\n+ log.info(\"[%s] Extracted total of %d notubiz meeting(items). Also skipped %d \"\n+ \"meetings in total.\" % (self.source_definition['sitename'], meeting_count, meetings_skipped,))\n \n \n # class NotubizMeetingItemExtractor(NotubizBaseExtractor):\ndiff --git a/ocd_backend/extractors/odata.py b/ocd_backend/extractors/odata.py\ndeleted file mode 100644\n--- a/ocd_backend/extractors/odata.py\n+++ /dev/null\n@@ -1,38 +0,0 @@\n-import json\n-import os\n-\n-from ocd_backend import settings\n-from ocd_backend.log import get_source_logger\n-from .staticfile import StaticJSONExtractor\n-\n-log = get_source_logger('extractor')\n-\n-\n-class ODataExtractor(StaticJSONExtractor):\n- \"\"\"\n- Extract items from an OData Feed.\n- \"\"\"\n-\n- def extract_items(self, static_content):\n- \"\"\"\n- Extracts items from a JSON file. It is assumed to be an array\n- of items.\n- \"\"\"\n- static_json = {'value': []}\n-\n- gem_path = os.path.abspath(\n- os.path.join(settings.PROJECT_PATH, 'json', 'gemeenten.json'))\n- with open(gem_path) as gem_file:\n- static_json = json.load(gem_file)\n-\n- item_filter = self.source_definition['filter']\n- log.info(\"Searching for: %s\" % (item_filter,))\n-\n- for item in static_json['value']:\n- # log.debug(item)\n- passed_filter = (item_filter is None) or (\n- item[item_filter.keys()[0]] == item_filter.values()[0])\n-\n- if passed_filter:\n- log.debug(item)\n- yield 'application/json', json.dumps(item)\ndiff --git a/ocd_backend/extractors/popit.py b/ocd_backend/extractors/popit.py\ndeleted file mode 100644\n--- a/ocd_backend/extractors/popit.py\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-import json\n-\n-from ocd_backend.utils.http import HttpRequestMixin\n-from .staticfile import StaticJSONExtractor\n-\n-\n-class PopItExtractor(StaticJSONExtractor, HttpRequestMixin):\n- \"\"\"\n- Extract items from an OData Feed.\n- \"\"\"\n-\n- def extract_items(self, static_content):\n- \"\"\"\n- Extracts items from the result of a popit call. Does paging.\n- \"\"\"\n-\n- static_json = json.loads(static_content)\n- page_count = 1\n-\n- while static_json is not None:\n- for item in static_json['result']:\n- yield 'application/json', json.dumps(item)\n-\n- if static_json.get('next_url'):\n- page_count += 1\n- result = self.http_session.get(\n- static_json['next_url'])\n- static_json = result.json()\n- else:\n- static_json = None # force the end of the loop\ndiff --git a/ocd_backend/extractors/staticfile.py b/ocd_backend/extractors/staticfile.py\n--- a/ocd_backend/extractors/staticfile.py\n+++ b/ocd_backend/extractors/staticfile.py\n@@ -8,6 +8,9 @@\n from ocd_backend.exceptions import ConfigurationError\n from ocd_backend.extractors import BaseExtractor\n from ocd_backend.utils.http import HttpRequestMixin\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('extractor')\n \n \n class StaticFileBaseExtractor(BaseExtractor, HttpRequestMixin):\n@@ -46,9 +49,6 @@ def extract_items(self, static_content):\n \n def run(self):\n # Retrieve the static content from the source\n- # TODO: disable ssl verification fro now since the\n- # almanak implementation (of ssl) is broken.\n-\n try:\n r = self.http_session.get(self.file_url, verify=False)\n static_content = r.content\n@@ -145,8 +145,15 @@ def extract_items(self, static_content):\n except KeyError as e:\n pass\n \n+ item_total = 0\n for item in tree.xpath(self.item_xpath, namespaces=self.namespaces):\n yield 'application/html', etree.tostring(item)\n+ item_total += 1\n+\n+ log.info(\"[%s] Extracted total of %d %s %s items\" % (self.source_definition['sitename'],\n+ item_total,\n+ self.source_definition.get('classification', ''),\n+ self.source_definition['entity']))\n \n \n class StaticJSONExtractor(StaticFileBaseExtractor):\ndiff --git a/ocd_backend/items/__init__.py b/ocd_backend/items/__init__.py\ndeleted file mode 100644\n--- a/ocd_backend/items/__init__.py\n+++ /dev/null\n@@ -1,152 +0,0 @@\n-import json\n-from datetime import datetime\n-\n-from ocd_backend.exceptions import FieldNotAvailable\n-from ocd_backend.models import Metadata\n-from ocd_backend.models.misc import Uri\n-from ocd_backend.models.definitions import Mapping\n-\n-\n-class BaseItem(object):\n- \"\"\"Represents a single extracted and transformed item.\n-\n- :param source_definition: The configuration of a single source in\n- the form of a dictionary (as defined in the settings).\n- :type source_definition: dict\n- :param data_content_type: The content-type of the data retrieved\n- from the source (e.g. ``application/json``).\n- :type data_content_type: str\n- :param data: The data in it's original format, as retrieved\n- from the source.\n- :type data: unicode\n- :param item: the deserialized item retrieved from the source.\n- :param processing_started: The datetime we started processing this\n- item. If ``None``, the current datetime is used.\n- :type processing_started: datetime or None\n- \"\"\"\n-\n- def __init__(self, source_definition, data_content_type, data, item, run_node, processing_started=None,\n- final_try=False):\n- self.source_definition = source_definition\n- self.data_content_type = data_content_type\n- self.data = data\n- self.original_item = item\n- self.run_node = run_node\n- self.final_try = final_try\n-\n- # On init, all data should be available to construct self.meta\n- # and self.combined_item\n- self._construct_object_meta(processing_started)\n- self._store_object_data()\n-\n- def _construct_object_meta(self, processing_started=None):\n- source_defaults = {\n- 'source': 'ori/meta',\n- 'source_id_key': 'identifier',\n- 'organization': 'ori',\n- }\n-\n- # meta = Metadata(1)\n- #\n- # if not processing_started:\n- # meta.processing_started = datetime.now()\n- #\n- # meta.source_id = unicode(self.source_definition['id'])\n- # meta.collection = self.get_collection()\n- # meta.rights = self.get_rights()\n- #\n- # self.meta = meta\n-\n- def _store_object_data(self):\n- object_data = self.get_object_model()\n- # object_data.meta = self.meta\n-\n- object_data.save()\n-\n- self.object_data = object_data\n-\n- def get_object_model(self):\n- \"\"\"Construct the document that should be inserted into the index\n- belonging to the item's source.\n- \"\"\"\n- raise NotImplementedError\n-\n- def get_collection(self):\n- \"\"\"Retrieves the name of the collection the item belongs to.\n-\n- This method should be implemented by the class that inherits from\n- :class:`.BaseItem`.\n-\n- :rtype: unicode.\n- \"\"\"\n- raise NotImplementedError\n-\n- def get_rights(self):\n- \"\"\"Retrieves the rights of the item as defined by the source.\n- With 'rights' we mean information about copyright, licenses,\n- instructions for reuse, etcetera. \"Creative Commons Zero\" is an\n- example of a possible value of rights.\n-\n- This method should be implemented by the class that inherits from\n- :class:`.BaseItem`.\n-\n- :rtype: unicode.\n- \"\"\"\n- raise NotImplementedError\n-\n-\n-# todo needs revision v1\n-# class LocalDumpItem(BaseItem):\n-# \"\"\"\n-# Represents an Item extracted from a local dump\n-# \"\"\"\n-#\n-# def get_collection(self):\n-# collection = self.original_item['_source'].get('meta', {}) \\\n-# .get('collection')\n-# if not collection:\n-# raise FieldNotAvailable('collection')\n-# return collection\n-#\n-# def get_rights(self):\n-# rights = self.original_item['_source'].get('meta', {}).get('rights')\n-# if not rights:\n-# raise FieldNotAvailable('rights')\n-# return rights\n-#\n-# def get_object_model(self):\n-# combined_index_data = self.original_item['_source'] \\\n-# .get('combined_index_data')\n-# if not combined_index_data:\n-# raise FieldNotAvailable('combined_index_data')\n-#\n-# data = json.loads(combined_index_data)\n-# data.pop('meta')\n-# # Cast datetimes\n-# for key, value in data.iteritems():\n-# if self.combined_index_fields.get(key) == datetime:\n-# data[key] = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')\n-#\n-# return data\n-#\n-# def get_all_text(self):\n-# \"\"\"\n-# Returns the content that is stored in the combined_index_data.all_text\n-# field, and raise a `FieldNotAvailable` exception when it is not\n-# available.\n-#\n-# :rtype: unicode\n-# \"\"\"\n-# combined_index_data = json.loads(self.original_item['_source']\n-# .get('combined_index_data', {}))\n-# all_text = combined_index_data.get('all_text')\n-# if not all_text:\n-# raise FieldNotAvailable('combined_index_data.all_text')\n-# return all_text\n-#\n-# def get_index_data(self):\n-# \"\"\"Restore all fields that are originally indexed.\n-#\n-# :rtype: dict\n-# \"\"\"\n-# return self.original_item.get('_source', {})\ndiff --git a/ocd_backend/items/attendance.py b/ocd_backend/items/attendance.py\ndeleted file mode 100644\n--- a/ocd_backend/items/attendance.py\n+++ /dev/null\n@@ -1,83 +0,0 @@\n-# todo needs revision v1\n-# from ocd_backend.extractors import HttpRequestMixin\n-# from ocd_backend.items import BaseItem\n-# from ocd_backend.log import get_source_logger\n-# from ocd_backend.utils.api import FrontendAPIMixin\n-#\n-# log = get_source_logger('item')\n-#\n-#\n-# class AttendanceForEventItem(HttpRequestMixin, FrontendAPIMixin, BaseItem):\n-# combined_index_fields = {\n-# 'id': unicode,\n-# 'hidden': bool,\n-# 'doc': dict\n-# }\n-#\n-# def get_object_id(self):\n-# return unicode(self.original_item['id'])\n-#\n-# def get_original_object_id(self):\n-# return self.get_object_id()\n-#\n-# def get_original_object_urls(self):\n-# try:\n-# return self.original_item['meta']['original_object_urls']\n-# except KeyError as e:\n-# return {'html': self.original_item['html_url']}\n-#\n-# def get_rights(self):\n-# try:\n-# return self.original_item['meta']['rights']\n-# except KeyError as e:\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def _get_vote_event(self, event_id):\n-# try:\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'vote_events',\n-# legislative_session_id=event_id, size=30) # FIXME: for now, get the first\n-# log.debug(\"vote events found:\")\n-# for r in results:\n-# log.debug(\"* %s (%s)\" % (r['id'], u','.join(r.keys()),))\n-# return [r for r in results if 'votes' in r][0]\n-# except Exception as e:\n-# log.warning(\"Got exception:\", e)\n-# pass\n-#\n-# def _get_voters(self, vote_event):\n-# if 'votes' not in vote_event:\n-# log.info(\"No votes found for event id %s (%s)!\" % (self.original_item['id'], vote_event['id'],))\n-# return []\n-#\n-# return [{'id': p['voter_id']} for p in vote_event['votes']]\n-#\n-# def get_object_model(self):\n-# object_model = dict()\n-#\n-# object_model['id'] = self.original_item['id']\n-#\n-# vote_event = self._get_vote_event(self.original_item['id'])\n-# if vote_event is None:\n-# log.debug(\"No vote id found for event id %s!\" % (self.original_item['id'],))\n-# return {}\n-#\n-# object_model['hidden'] = self.source_definition['hidden']\n-# object_model['doc'] = {\n-# 'attendees': self._get_voters(vote_event)\n-# }\n-#\n-# return object_model\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\ndiff --git a/ocd_backend/items/cwc.py b/ocd_backend/items/cwc.py\ndeleted file mode 100644\n--- a/ocd_backend/items/cwc.py\n+++ /dev/null\n@@ -1,141 +0,0 @@\n-# todo needs revision v1\n-# import iso8601\n-#\n-# from ocd_backend.extractors import HttpRequestMixin\n-# from ocd_backend.utils.api import FrontendAPIMixin\n-# from ocd_backend.utils.file_parsing import FileToTextMixin\n-#\n-#\n-# class VideotulenItem(EventItem, HttpRequestMixin, FrontendAPIMixin, FileToTextMixin):\n-# def _get_council(self):\n-# \"\"\"\n-# Gets the organisation that represents the council.\n-# \"\"\"\n-#\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Council')\n-# return results[0]\n-#\n-# def _get_committees(self):\n-# \"\"\"\n-# Gets the committees that are active for the council.\n-# \"\"\"\n-#\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification=['committee', 'subcommittee'])\n-# return {unicode(c['name']): c for c in results}\n-#\n-# def get_original_object_id(self):\n-# return unicode(self.original_item['Webcast']['Id']).strip()\n-#\n-# def get_original_object_urls(self):\n-# return {\"html\": unicode(self.original_item['Webcast']['RegisterUrl']).strip()}\n-#\n-# @staticmethod\n-# def get_rights():\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def get_object_model(self):\n-# object_model = {}\n-# council = self._get_council()\n-# committees = self._get_committees()\n-#\n-# object_model['id'] = unicode(self.get_object_id())\n-#\n-# object_model['hidden'] = self.source_definition['hidden']\n-#\n-# if 'Title' in self.original_item['Webcast']:\n-# object_model['name'] = u'%s' % (\n-# unicode(self.original_item['Webcast']['Title']),)\n-# else:\n-# object_model['name'] = self.get_collection()\n-#\n-# object_model['identifiers'] = [\n-# {\n-# 'identifier': unicode(self.original_item['Webcast']['Id']),\n-# 'scheme': u'CWC'\n-# },\n-# {\n-# 'identifier': self.get_object_id(),\n-# 'scheme': u'ORI'\n-# }\n-# ]\n-#\n-# try:\n-# object_model['organization_id'] = committees[\n-# object_model['name']]['id']\n-# object_model['organization'] = committees[\n-# object_model['name']]\n-# except KeyError as e:\n-# object_model['organization_id'] = council['id']\n-# object_model['organization'] = council\n-#\n-# object_model['classification'] = u'Videotulen %s' % (\n-# object_model['name'],)\n-#\n-# topic_descriptions = []\n-# if self.original_item['Webcast']['Topics'] is not None:\n-# for topic in self.original_item['Webcast']['Topics']['Topic']:\n-# topic_description = topic.get('Description')\n-# if topic_description is None:\n-# topic_description = u''\n-# topic_descriptions.append(\n-# u'

%s

\\n

%s

' % (\n-# topic['Title'], topic_description,))\n-#\n-# if len(topic_descriptions) > 0:\n-# object_model['description'] = u'\\n'.join(topic_descriptions)\n-# else:\n-# object_model['description'] = self.original_item['Webcast']['Description']\n-#\n-# if 'ActualStart' in self.original_item['Webcast']:\n-# start_date_field = 'ActualStart'\n-# end_date_field = 'ActualEnd'\n-# else:\n-# start_date_field = 'ScheduledStart'\n-# end_date_field = 'ScheduledStart'\n-#\n-# object_model['start_date'] = iso8601.parse_date(\n-# self.original_item['Webcast'][start_date_field], )\n-# object_model['end_date'] = iso8601.parse_date(\n-# self.original_item['Webcast'][end_date_field], )\n-# object_model['location'] = u'Raadszaal'\n-# object_model['status'] = u'confirmed'\n-#\n-# object_model['sources'] = [\n-# {'url': a['Location'], 'note': a['Description']} for a in\n-# self.original_item['Webcast']['Attachments']['Attachment']]\n-#\n-# documents = []\n-# if self.original_item['Webcast']['Topics'] is not None:\n-# for topic in self.original_item['Webcast']['Topics']['Topic']:\n-# if topic['Attachments'] is None:\n-# continue\n-# for a in topic['Attachments']['Attachment']:\n-# try:\n-# description = self.file_get_contents(\n-# a['Location'],\n-# self.source_definition.get('pdf_max_pages', 20))\n-# except Exception as e:\n-# description = u''\n-# documents.append({\n-# 'url': a['Location'], 'note': a['Description'],\n-# 'description': description})\n-# object_model['sources'] += documents\n-#\n-# return object_model\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\ndiff --git a/ocd_backend/items/ggm.py b/ocd_backend/items/ggm.py\ndeleted file mode 100644\n--- a/ocd_backend/items/ggm.py\n+++ /dev/null\n@@ -1,361 +0,0 @@\n-# todo needs revision v1\n-# import re\n-# from hashlib import sha1\n-#\n-# import iso8601\n-# from lxml import etree\n-#\n-# from ocd_backend.extractors import HttpRequestMixin\n-# from ocd_backend.items import BaseItem\n-# from ocd_backend.log import get_source_logger\n-# from ocd_backend.models import *\n-# from ocd_backend.utils.api import FrontendAPIMixin\n-#\n-# log = get_source_logger('item')\n-#\n-#\n-# class GegevensmagazijnBaseItem(BaseItem, HttpRequestMixin, FrontendAPIMixin):\n-# def _xpath(self, path):\n-# if not hasattr(self, 'xpath'):\n-# self.xpath = etree.XPathEvaluator(self.original_item)\n-# return self.xpath(path, smart_strings=False)\n-#\n-# def _get_current_permalink(self):\n-# return u'%sEntiteiten/%s' % (self.source_definition['base_url'],\n-# self.get_original_object_id())\n-#\n-# def _get_resource_permalink(self, item_id):\n-# return u'%sResources/%s' % (self.source_definition['base_url'], item_id)\n-#\n-# def _get_party(self, party_id):\n-# try:\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Party')\n-#\n-# for x in results:\n-# if x.get('meta', {}).get('original_object_id') == party_id:\n-# return x\n-# except:\n-# pass\n-# return\n-#\n-# def get_original_object_id(self):\n-# return unicode(self._xpath(\"string(@id)\"))\n-#\n-# def get_original_object_urls(self):\n-# return {\"xml\": self._get_current_permalink()}\n-#\n-# def get_rights(self):\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-# return u' '.join(text_items)\n-#\n-#\n-# class EventItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# combined_index_data = {}\n-#\n-# current_permalink = self._get_current_permalink()\n-#\n-# event = Event(self.get_original_object_id())\n-# event.name = self._xpath(\"string(onderwerp)\")\n-# # combined_index_data['start_date'] = iso8601.parse_date(self._xpath(\"string(planning/begin)\"))\n-# # debug.log(iso8601.parse_date(self._xpath(\"string(planning/einde)\")))\n-# # combined_index_data['end_date'] = self._xpath(\"string(planning/einde)\")\n-# event.location = self._xpath(\"string(locatie)\")\n-# event.categories = u'event_item'\n-#\n-# # try:\n-# # combined_index_data[AgendaItem.position] = int(self._xpath(\"number(volgorde)\"))\n-# # except:\n-# # pass\n-#\n-# event.agenda = [\n-# AgendaItem(item) for item in self._xpath(\"agendapunt/@id\")\n-# ]\n-#\n-# event.attendee = [\n-# Person(item.xpath(\"string(@id)\"))\n-# for item in self._xpath(\"deelnemer\")\n-# ]\n-#\n-# event.absentee = [\n-# Person(item.xpath(\"string(@id)\"))\n-# for item in self._xpath(\"afgemeld\")\n-# ]\n-#\n-# event.motion = [\n-# Motion(item.xpath(\"string(@id)\"))\n-# for item in self._xpath(\"besluit\")\n-# ]\n-#\n-# event.organizer = [\n-# Organization(item.xpath(\"string(name())\"))\n-# for item in self._xpath(\"kamers/*\")\n-# ]\n-#\n-# # Status might be with or without a capital\n-# if self._xpath(\"contains(status, 'itgevoerd')\"): # Uitgevoerd\n-# event.eventStatus = EventStatusType.EventCompleted\n-# elif self._xpath(\"contains(status, 'ervplaats')\"): # Verplaatst\n-# event.eventStatus = EventStatusType.EventRescheduled # u'postponed'\n-# elif self._xpath(\"contains(status, 'epland')\"): # Gepland\n-# event.eventStatus = EventStatusType.EventScheduled # u'tentative'\n-# elif self._xpath(\"contains(status, 'eannuleerd')\"): # Geannuleerd\n-# event.eventStatus = EventStatusType.EventCancelled # u'cancelled'\n-#\n-# event.ggmIdentifier = self.get_original_object_id()\n-# event.ggmVrsNummer = self._xpath(\"string(vrsNummer)\")\n-# event.ggmNummer = self._xpath(\"string(nummer)\")\n-#\n-# # try:\n-# # combined_index_data['organization'] = committees[\n-# # self.original_item['dmu']['id']]\n-# # except KeyError:\n-# # pass\n-#\n-# # combined_index_data['organization_id'] = unicode(\n-# # self.original_item['dmu']['id'])\n-#\n-# combined_index_data['sources'] = [\n-# {\n-# 'url': current_permalink,\n-# 'note': u''\n-# }\n-# ]\n-#\n-# # if 'items' in self.original_item:\n-# # combined_index_data['children'] = [\n-# # self.get_meeting_id(mi) for mi in self.original_item['items']]\n-#\n-# combined_index_data['sources'] = []\n-#\n-# return event\n-#\n-#\n-# class MotionItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# motion = Motion(self.get_original_object_id())\n-# motion.name = self._xpath(\"string(onderwerp)\")\n-# motion.voteEvent = [\n-# VoteEvent(self._xpath(\"string(@id)\"))\n-# ]\n-#\n-# # log.debug(\"Motion orig:%s id:%s\" % (self._xpath(\"string(@id)\"),\n-# # VoteEvent.get_object_id(\n-# # self._xpath(\"string(@id)\"))))\n-#\n-# return motion\n-#\n-#\n-# class ZaakMotionItem(GegevensmagazijnBaseItem):\n-# def _get_vote_event(self, object_id):\n-# results = self.api_request_object(\n-# self.source_definition['index_name'], \"vote_events\", object_id)\n-# return results\n-#\n-# def get_combined_index_data(self):\n-# motion = None\n-#\n-# soort = self._xpath(\"string(soort)\")\n-# if soort == \"Motie\":\n-# motion = Motion(self.get_original_object_id())\n-# elif soort == \"Amendement\":\n-# motion = Amendment(self.get_original_object_id())\n-# elif soort == \"Wetgeving\":\n-# motion = Bill(self.get_original_object_id())\n-# elif soort == \"Initiatiefwetgeving\":\n-# motion = PrivateMembersBill(self.get_original_object_id())\n-# elif soort == \"Verzoekschrift\":\n-# motion = Petition(self.get_original_object_id())\n-#\n-# motion.name = self._xpath(\"string(onderwerp)\")\n-# motion.dateSubmitted = iso8601.parse_date(self._xpath(\"string(gestartOp)\"))\n-# motion.publisher = Organization(self._xpath(\"string(organisatie)\"))\n-# motion.creator = Person(self._xpath(\"string(indiener/persoon/@ref)\"))\n-# motion.voteEvent = [VoteEvent(v) for v in self._xpath(\"besluit/@ref\")]\n-# motion.cocreator = [Person(p) for p in self._xpath(\"string(medeindiener/persoon/@ref)\")]\n-# motion.ggmNummer = self._xpath(\"string(nummer)\")\n-# motion.ggmVolgnummer = self._xpath(\"string(volgnummer)\")\n-# # combined_index_data['concluded'] = self._xpath(\"boolean(afgedaan)\")\n-#\n-# return motion\n-#\n-#\n-# class VoteEventItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# vote_event = VoteEvent(self.get_original_object_id())\n-# vote_event.motion = Motion(self._xpath(\"string(@id)\"))\n-#\n-# # combined_index_data[VoteEvent.identifier] = unicode(\n-# # self._xpath(\"string(../volgorde)\"))\n-#\n-# result = re.sub(r'[^ a-z]', '', self._xpath(\"string(slottekst)\").lower())\n-# if result == \"aangenomen\":\n-# vote_event.result = Result.ResultPass\n-# elif result == \"verworpen\":\n-# vote_event.result = Result.ResultFail\n-# elif result == \"aangehouden\":\n-# vote_event.result = Result.ResultKept\n-# elif result == \"uitgesteld\":\n-# vote_event.result = Result.ResultPostponed\n-# elif result == \"ingetrokken\":\n-# vote_event.result = Result.ResultWithdrawn\n-# elif result == \"vervallen\":\n-# vote_event.result = Result.ResultExpired\n-# elif result == \"inbreng geleverd\":\n-# vote_event.result = Result.ResultDiscussed\n-# elif result == \"vrijgegeven\":\n-# vote_event.result = Result.ResultPublished\n-# else:\n-# log.warning(\"Result %s does not exists for: %s\" % (result, self.original_item))\n-#\n-# sub_events = self.source_definition['mapping']['vote_event']['sub_items']\n-# vote_event.count = [Count(c) for c in self._xpath(sub_events['count'] + \"/@id\")]\n-# vote_event.vote = [Vote(v) for v in self._xpath(sub_events['vote'] + \"/@id\")]\n-#\n-# return vote_event\n-#\n-#\n-# class CountItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# count = None\n-#\n-# soort = unicode(self._xpath(\"string(soort)\"))\n-# if soort == \"Voor\":\n-# count = YesCount(self.get_original_object_id())\n-# elif soort == \"Tegen\":\n-# count = NoCount(self.get_original_object_id())\n-# elif soort == \"Onthouding\":\n-# count = AbstainCount(self.get_original_object_id())\n-# elif soort == \"Niet deelgenomen\":\n-# count = AbsentCount(self.get_original_object_id())\n-#\n-# # count.group = self._get_party(self._xpath(\"string(fractie/@ref)\"))\n-# count.group = Organization(self._xpath(\"string(fractie/@ref)\"))\n-# count.value = self._xpath(\"number(fractieGrootte)\")\n-#\n-# return count\n-#\n-#\n-# class VoteItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# vote = Vote(self.get_original_object_id())\n-#\n-# soort = unicode(self._xpath(\"string(soort)\"))\n-# if soort == \"Voor\":\n-# vote.Vote = VoteOption.VoteOptionYes\n-# elif soort == \"Tegen\":\n-# combined_index_data[TYPE] = VoteOption.VoteOptionNo\n-# elif soort == \"Onthouding\":\n-# combined_index_data[TYPE] = VoteOption.VoteOptionAbstain\n-# elif soort == \"Niet deelgenomen\":\n-# combined_index_data[TYPE] = VoteOption.VoteOptionAbsent\n-#\n-# # person = self._get_person(self._xpath(\"string(persoon/@ref)\"))\n-# # if person:\n-# # person.update({\"@context\": PersonItem.ld_context, \"@type\":\n-# # \"http://www.w3.org/ns/person#Person\"})\n-# # combined_index_data['group'] = person\n-#\n-# return combined_index_data\n-#\n-#\n-# class PersonItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# combined_index_data = dict()\n-#\n-# combined_index_data[CONTEXT] = context\n-# combined_index_data[ID] = unicode(\n-# self.get_object_id()) # unicode(self.get_object_iri())\n-# combined_index_data[TYPE] = Person.type\n-# combined_index_data[HIDDEN] = self.source_definition['hidden']\n-#\n-# # combined_index_data['honorific_prefix'] = unicode(self._xpath(\"string(/persoon/titels)\"))\n-# combined_index_data[Person.familyName] = unicode(\n-# self._xpath(\"string(/persoon/achternaam)\"))\n-# combined_index_data[Person.givenName] = unicode(\n-# self._xpath(\"string(/persoon/voornamen)\"))\n-# combined_index_data[Person.name] = u' '.join([x for x in [\n-# self._xpath(\"string(/persoon/roepnaam)\"),\n-# self._xpath(\"string(/persoon/tussenvoegsel)\"),\n-# combined_index_data[Person.familyName]] if x != ''])\n-#\n-# gender = self._xpath(\"string(/persoon/geslacht)\")\n-# if gender == 'man':\n-# combined_index_data[Person.gender] = u\"male\"\n-# elif gender == 'vrouw':\n-# combined_index_data[Person.gender] = u\"female\"\n-#\n-# if self._xpath(\"string(/persoon/geboortdatum)\"):\n-# combined_index_data[Person.birthDate] = iso8601.parse_date(\n-# self._xpath(\"string(/persoon/geboortdatum)\"))\n-# if self._xpath(\"string(/persoon/overlijdensdatum)\"):\n-# combined_index_data[Person.deathDate] = iso8601.parse_date(\n-# self._xpath(\"string(/persoon/overlijdensdatum)\"))\n-#\n-# combined_index_data[Person.nationalIdentity] = unicode(\n-# self._xpath(\"string(/persoon/geboorteland)\"))\n-# combined_index_data[Person.email] = unicode(self._xpath(\n-# \"string(/persoon/contactinformatie[soort='E-mail']/waarde)\"))\n-# combined_index_data[Person.seeAlso] = [unicode(self._xpath(\n-# \"string(/persoon/contactinformatie[soort='Website']/waarde)\"))]\n-#\n-# image = self._xpath(\"string(/persoon/afbeelding/@ref)\")\n-# if image:\n-# permalink = self._get_resource_permalink(image)\n-# hashed_url = sha1(permalink).hexdigest()\n-# combined_index_data['media_urls'] = [\n-# {\n-# \"url\": \"/v0/resolve/%s\" % hashed_url,\n-# \"original_url\": permalink\n-# }\n-# ]\n-#\n-# combined_index_data[ggmIdentifier] = self.get_original_object_id()\n-# combined_index_data[oriIdentifier] = self.get_object_id()\n-#\n-# # combined_index_data['memberships'] = [\n-# # {\n-# # 'label': role,\n-# # 'role': role,\n-# # 'person_id': combined_index_data['id'],\n-# # 'organization_id': council_id,\n-# # 'organization': council_obj\n-# # }\n-# # ]\n-# return combined_index_data\n-#\n-#\n-# class DocumentItem(GegevensmagazijnBaseItem):\n-# def get_combined_index_data(self):\n-# combined_index_data = dict()\n-#\n-# combined_index_data[CONTEXT] = context\n-# combined_index_data[ID] = unicode(\n-# self.get_object_id()) # unicode(self.get_object_iri())\n-# combined_index_data[TYPE] = Attachment.type\n-# combined_index_data[HIDDEN] = self.source_definition['hidden']\n-#\n-# document = unicode(self._xpath(\"string(bestand/@ref)\"))\n-# if document:\n-# combined_index_data['media_urls'] = [\n-# {\n-# \"url\": \"/v0/resolve/\",\n-# \"note\": \"\",\n-# \"original_url\": self._get_resource_permalink(document)\n-# }\n-# ]\n-#\n-# return combined_index_data\ndiff --git a/ocd_backend/items/go_committee.py b/ocd_backend/items/go_committee.py\ndeleted file mode 100644\n--- a/ocd_backend/items/go_committee.py\n+++ /dev/null\n@@ -1,59 +0,0 @@\n-# todo needs revision v1\n-# from ocd_backend.items.popolo import OrganisationItem\n-#\n-# from ocd_backend.utils.misc import slugify\n-#\n-#\n-# class CommitteeItem(OrganisationItem):\n-# def get_object_id(self):\n-# return slugify(unicode(self.original_item['name']).strip())\n-#\n-# def get_original_object_id(self):\n-# return unicode(self.original_item['name']).strip()\n-#\n-# def get_original_object_urls(self):\n-# return {\"html\": self.original_item['archive']}\n-#\n-# @staticmethod\n-# def get_rights():\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def get_object_model(self):\n-# object_model = dict()\n-#\n-# object_model['id'] = unicode(self.get_object_id())\n-#\n-# object_model['hidden'] = self.source_definition['hidden']\n-# object_model['name'] = unicode(\n-# self.original_item['name'])\n-# object_model['identifiers'] = [\n-# {\n-# 'identifier': self.get_object_id(),\n-# 'scheme': u'ORI'\n-# },\n-# {\n-# 'identifier': unicode(self.original_item['name']),\n-# 'scheme': u'GemeenteOplossingen'\n-# }\n-# ]\n-# if 'sub' in self.original_item['name']:\n-# classification = u'subcommittee'\n-# else:\n-# classification = u'committee'\n-# object_model['classification'] = classification\n-# object_model['description'] = object_model['name']\n-#\n-# return object_model\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\ndiff --git a/ocd_backend/items/go_meeting.py b/ocd_backend/items/go_meeting.py\ndeleted file mode 100644\n--- a/ocd_backend/items/go_meeting.py\n+++ /dev/null\n@@ -1,310 +0,0 @@\n-# todo needs revision v1\n-# import re\n-# import urlparse\n-# from hashlib import sha1\n-#\n-# import iso8601\n-# from lxml import etree\n-# from ocd_backend.items import BaseItem\n-# from ocd_backend.extractors import HttpRequestMixin\n-# from ocd_backend.log import get_source_logger\n-# from ocd_backend.utils.api import FrontendAPIMixin\n-# from ocd_backend.utils.file_parsing import FileToTextMixin\n-#\n-# log = get_source_logger('item')\n-#\n-#\n-# class MeetingItem(BaseItem, HttpRequestMixin, FrontendAPIMixin, FileToTextMixin):\n-# @property\n-# def html(self):\n-# _old_html = getattr(self, '_html', None)\n-#\n-# if _old_html is not None:\n-# return _old_html\n-#\n-# self._html = etree.HTML(self.original_item['content'])\n-# return self._html\n-#\n-# @property\n-# def full_html(self):\n-# _old_html = getattr(self, '_full_html', None)\n-#\n-# if _old_html is not None:\n-# return _old_html\n-#\n-# self._full_html = etree.HTML(self.original_item['full_content'])\n-# return self._full_html\n-#\n-# def _get_council(self):\n-# \"\"\"\n-# Gets the organisation that represents the council.\n-# \"\"\"\n-#\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Council')\n-# return results[0]\n-#\n-# @staticmethod\n-# def _find_meeting_type_id(org):\n-# results = [x for x in org['identifiers'] if x['scheme'] == u'GemeenteOplossingen']\n-# return results[0]['identifier']\n-#\n-# def _get_committees(self):\n-# \"\"\"\n-# Gets the committees that are active for the council.\n-# \"\"\"\n-#\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification=['committee', 'subcommittee'])\n-# return {self._find_meeting_type_id(c): c for c in results}\n-#\n-# @staticmethod\n-# def _convert_date(date_str):\n-# month_names2int = {\n-# u'januari': u'01',\n-# u'februari': u'02',\n-# u'maart': u'03',\n-# u'april': u'04',\n-# u'mei': u'05',\n-# u'juni': u'06',\n-# u'juli': u'07',\n-# u'augustus': u'08',\n-# u'september': u'09',\n-# u'oktober': u'10',\n-# u'november': u'11',\n-# u'december': u'12',\n-# }\n-# output = date_str\n-# for k, v in month_names2int.iteritems():\n-# output = output.replace(k, v)\n-# parts = output.split(u' ')\n-# return u'%s-%s-%s' % (parts[2], parts[1], parts[0],)\n-#\n-# def _get_object_id_for(self, object_id, urls=None):\n-# \"\"\"Generates a new object ID which is used within OCD to identify\n-# the item.\n-#\n-# By default, we use a hash containing the id of the source, the\n-# original object id of the item (:meth:`~.get_original_object_id`)\n-# and the original urls (:meth:`~.get_original_object_urls`).\n-#\n-# :raises UnableToGenerateObjectId: when both the original object\n-# id and urls are missing.\n-# :rtype: str\n-# \"\"\"\n-#\n-# if urls is None:\n-# urls = {}\n-# if not object_id and not urls:\n-# raise UnableToGenerateObjectId('Both original id and urls missing')\n-#\n-# hash_content = self.source_definition['id'] + object_id # + u''.join(sorted(urls.values()))\n-#\n-# return sha1(hash_content.decode('utf8')).hexdigest()\n-#\n-# def _get_meeting_item_id(self):\n-# return self.html.xpath('.//@id')[0].replace(u'agendapunt', '').split(u'_')[0]\n-#\n-# def _get_meeting_item_documents_url(self):\n-# return u'%s/modules/risbis/risbis.php?g=get_docs_for_ag&agendapunt_object_id=%s' % (\n-# self.source_definition['base_url'], self._get_meeting_item_id(),)\n-#\n-# def _get_documents_html_for_item(self):\n-# url = self._get_meeting_item_documents_url()\n-#\n-# resp = self.http_session.get(url)\n-# if resp.status_code == 200:\n-# return resp.content\n-#\n-# return u''\n-#\n-# def _get_current_permalink(self, include_hash=True):\n-# \"\"\"\n-# GemeenteOplossing has instable perma links -- The URLs change when\n-# the time of the meeting is changed. This accounts for it and returns\n-# a the current permalink that is used externally.\n-# \"\"\"\n-#\n-# permalink = u''.join(\n-# self.full_html.xpath('//meta[@property=\"og:url\"]/@content')).strip()\n-# if self.original_item['type'] == 'meeting':\n-# return permalink\n-# else:\n-# if include_hash:\n-# return u'%s#%s' % (permalink, self.html.xpath('.//@id')[0],)\n-# else:\n-# return permalink\n-#\n-# def _get_stable_permalink(self, include_hash=True):\n-# \"\"\"\n-# GemeenteOplossing has instable perma links -- The URLs change when\n-# the time of the meeting is changed. This accounts for it and returns\n-# a stable permalink that is used internally.\n-# \"\"\"\n-#\n-# permalink = self._get_current_permalink(include_hash)\n-# return re.sub(r'/\\d{2}:\\d{2}/', u'/00:00/', permalink)\n-#\n-# def get_object_id(self):\n-# hash_content = self.source_definition['id'] + self.get_original_object_id()\n-# return sha1(hash_content.decode('utf8')).hexdigest()\n-#\n-# def get_original_object_id(self):\n-# return self._get_stable_permalink()\n-#\n-# def get_original_object_urls(self):\n-# # FIXME: what to do when there is not an original URL?\n-# return {\"html\": self._get_current_permalink()}\n-#\n-# @staticmethod\n-# def get_rights():\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def get_object_model(self):\n-# object_model = {}\n-#\n-# council = self._get_council()\n-# committees = self._get_committees()\n-#\n-# object_model['id'] = unicode(self.get_object_id())\n-#\n-# object_model['hidden'] = self.source_definition['hidden']\n-#\n-# if self.original_item['type'] == 'meeting':\n-# object_model['name'] = u''.join(\n-# self.full_html.xpath('//title/text()')).strip()\n-# object_model['classification'] = u'Agenda'\n-# else:\n-# meeting_item_index = (\n-# u''.join(self.html.xpath('.//div[@class=\"first\"]//text()')).strip())\n-# object_model['name'] = u'%s. %s' % (\n-# meeting_item_index,\n-# u''.join(self.html.xpath('.//div[@class=\"title\"]/h3//text()')).strip(),\n-# )\n-# object_model['classification'] = u'Agendapunt'\n-#\n-# object_model['identifiers'] = [\n-# {\n-# 'identifier': unicode(self._get_current_permalink()),\n-# 'scheme': u'GemeenteOplossingen'\n-# },\n-# {\n-# 'identifier': self.get_object_id(),\n-# 'scheme': u'ORI'\n-# }\n-# ]\n-#\n-# organisation_name = u''.join(\n-# self.full_html.xpath('//h2[@class=\"page_title\"]/span//text()'))\n-# try:\n-# object_model['organization_id'] = committees[\n-# organisation_name]['id']\n-# object_model['organization'] = committees[\n-# organisation_name]\n-# except KeyError as e:\n-# object_model['organization_id'] = council['id']\n-# object_model['organization'] = council\n-#\n-# if self.original_item['type'] != 'meeting':\n-# object_model['description'] = u''.join(self.html.xpath('.//div[@class=\"toelichting\"]//text()'), )\n-# else:\n-# object_model['description'] = u''\n-#\n-# meeting_date = u''.join(\n-# self.full_html.xpath('//span[@class=\"date\"]//text()')).strip()\n-# meeting_time = u''.join(\n-# self.full_html.xpath('//span[@class=\"time\"]//text()')).strip()\n-#\n-# object_model['start_date'] = iso8601.parse_date(u'%sT%s:00Z' % (\n-# self._convert_date(meeting_date), meeting_time,))\n-# object_model['end_date'] = object_model['start_date']\n-#\n-# object_model['location'] = u'Gemeentehuis'\n-# object_model['status'] = u'confirmed'\n-# object_model['sources'] = [\n-# {\n-# 'url': self._get_current_permalink(),\n-# 'note': u''\n-# }\n-# ]\n-#\n-# if self.original_item['type'] != 'meeting':\n-# parent_url = self._get_stable_permalink(False)\n-# log.debug(parent_url)\n-# object_model['parent_id'] = unicode(self._get_object_id_for(\n-# parent_url, {\"html\": parent_url}))\n-#\n-# # FIXME: in order to get the documents for an meeting item you\n-# # need to do a separate request:\n-# # https://gemeenteraad.denhelder.nl/modules/risbis/risbis.php?g=get_docs_for_ag&agendapunt_object_id=19110\n-#\n-# if len(self.html.xpath('.//a[contains(@class, \"bijlage_true\")]')) > 0:\n-# docs_contents = self._get_documents_html_for_item()\n-# if docs_contents:\n-# docs_html = etree.HTML(docs_contents)\n-# else:\n-# docs_html = etree.HTML('
    ')\n-#\n-# for doc in docs_html.xpath('//li/a'):\n-# doc_url = u''.join(doc.xpath('.//@href')).strip()\n-# if not doc_url.startswith('http'):\n-# doc_url = u'%s%s' % (self.source_definition['base_url'], doc_url,)\n-# doc_note = u''.join(doc.xpath('.//text()')).strip()\n-# if doc_note != u'notitie':\n-# object_model['sources'].append({\n-# 'url': doc_url,\n-# 'note': doc_note\n-# })\n-# else:\n-# object_model['children'] = [\n-# unicode(self._get_object_id_for(\n-# i, {\"html\": i}\n-# )) for i in self.full_html.xpath(\n-# '//li[contains(@class, \"agendaRow\")]/div[@class=\"first\"]/a/@href'\n-# )]\n-#\n-# base_url = u''.join(\n-# self.full_html.xpath('//meta[@property=\"og:url\"]/@content')).strip()\n-#\n-# for doc in self.full_html.xpath('//div[@id=\"documenten\"]//li/a'):\n-# doc_url = urlparse.urljoin(\n-# base_url, u''.join(doc.xpath('.//@href')).strip())\n-# doc_note = u''.join(doc.xpath('.//text()')).strip()\n-# if doc_note != u'notitie':\n-# object_model['sources'].append({\n-# 'url': doc_url,\n-# 'note': doc_note\n-# })\n-#\n-# for doc in self.full_html.xpath('//div[@id=\"downloaden\"]/ul//li/a'):\n-# doc_url = urlparse.urljoin(\n-# base_url, u''.join(doc.xpath('.//@href')).strip())\n-# doc_note = u''.join(doc.xpath('.//text()')).strip()\n-# if doc_note != u'notitie':\n-# object_model['sources'].append({\n-# 'url': doc_url,\n-# 'note': doc_note\n-# })\n-#\n-# for source in object_model['sources']:\n-# if not source['url'].lower().endswith('.pdf'):\n-# continue\n-# source['description'] = self.file_get_contents(\n-# source['url'], self.source_definition.get('pdf_max_pages', 20))\n-#\n-# return object_model\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\ndiff --git a/ocd_backend/items/go_report.py b/ocd_backend/items/go_report.py\ndeleted file mode 100644\n--- a/ocd_backend/items/go_report.py\n+++ /dev/null\n@@ -1,64 +0,0 @@\n-# todo needs revision v1\n-# import urlparse\n-#\n-# from .go_meeting import MeetingItem\n-#\n-#\n-# class ReportItem(MeetingItem):\n-# def _get_report_document(self):\n-# for item in self.full_html.xpath('//div[@id=\"downloaden\"]//li/a[1]'):\n-# anchor = u''.join(item.xpath('./@href')).strip()\n-# if u'mp3' in anchor:\n-# return anchor\n-#\n-# def _get_mp3_link(self):\n-# anchor = self._get_report_document()\n-# if anchor is not None:\n-# og_url = u''.join(\n-# self.full_html.xpath('//meta[@property=\"og:url\"]/@content')).strip()\n-# return urlparse.urljoin(og_url, anchor)\n-#\n-# def _get_meeting_items(self):\n-# items = []\n-# for meeting_item_html in self.full_html.xpath(\n-# '//li[contains(@class, \"agendaRow\")]'):\n-# meeting_item_obj = {\n-# 'index': u''.join(meeting_item_html.xpath('.//div[@class=\"first\"]//text()')).strip(),\n-# 'title': u''.join(meeting_item_html.xpath('.//div[@class=\"title\"]/h3//text()')),\n-# 'description': u''.join(meeting_item_html.xpath('.//div[@class=\"toelichting\"]//text()')),\n-# }\n-# items.append(meeting_item_obj)\n-# return items\n-#\n-# def get_original_object_id(self):\n-# return u'%s#downloaden' % (self._get_stable_permalink(),)\n-#\n-# def get_original_object_urls(self):\n-# urls = {\n-# \"html\": u'%s#downloaden' % (self._get_current_permalink(),)\n-# }\n-# mp3_link = self._get_mp3_link()\n-# if mp3_link is not None:\n-# urls['mp3'] = mp3_link\n-# return urls\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def get_object_model(self):\n-# object_model = super(ReportItem, self).get_object_model()\n-#\n-# object_model['name'] = u'Verslag %s' % (object_model['name'],)\n-#\n-# object_model['classification'] = u'Verslag'\n-#\n-# # TODO: get all the descriptive content?\n-# items = self._get_meeting_items()\n-# object_model['description'] = u'\\n'.join(\n-# [u\"%s. %s\\n\\n%s\" % (i['index'], i['title'], i['description'],) for i in items])\n-#\n-# for identifier in object_model['identifiers']:\n-# if identifier['scheme'] == u'GemeenteOplossingen':\n-# identifier['identifier'] += '#downloaden'\n-#\n-# return object_model\ndiff --git a/ocd_backend/items/go_resolution.py b/ocd_backend/items/go_resolution.py\ndeleted file mode 100644\n--- a/ocd_backend/items/go_resolution.py\n+++ /dev/null\n@@ -1,47 +0,0 @@\n-# todo needs revision v1\n-# import urlparse\n-#\n-# from .go_meeting import MeetingItem\n-#\n-#\n-# class ResolutionItem(MeetingItem):\n-# def _get_resolution_document(self):\n-#\n-# for item in self.full_html.xpath('//div[@id=\"documenten\"]//li/a[1]'):\n-# anchor = u''.join(item.xpath('./@href')).strip()\n-# if u'Besluitenlijst' in anchor:\n-# return anchor\n-#\n-# def _get_pdf_link(self):\n-# og_url = u''.join(\n-# self.full_html.xpath('//meta[@property=\"og:url\"]/@content')).strip()\n-# return urlparse.urljoin(og_url, self._get_resolution_document())\n-#\n-# def get_original_object_id(self):\n-# return u'%s#documenten' % (self._get_stable_permalink(),)\n-#\n-# def get_original_object_urls(self):\n-# return {\n-# \"html\": u'%s#documenten' % (self._get_current_permalink(),),\n-# \"pdf\": self._get_pdf_link()\n-# }\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def get_object_model(self):\n-# object_model = super(ResolutionItem, self).get_object_model()\n-#\n-# object_model['name'] = u'Besluitenlijst %s' % (object_model['name'],)\n-#\n-# object_model['classification'] = u'Besluitenlijst'\n-#\n-# object_model['description'] = self.file_get_contents(\n-# self.get_original_object_urls()['pdf'],\n-# self.source_definition.get('pdf_max_pages', 20))\n-#\n-# for identifier in object_model['identifiers']:\n-# if identifier['scheme'] == u'GemeenteOplossingen':\n-# identifier['identifier'] += '#documenten'\n-#\n-# return object_model\ndiff --git a/ocd_backend/items/goapi_committee.py b/ocd_backend/items/goapi_committee.py\ndeleted file mode 100644\n--- a/ocd_backend/items/goapi_committee.py\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-\n-\n-class CommitteeItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'gemeenteoplossingen',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['index_name'],\n- }\n-\n- committee = Organization(self.original_item['id'], **source_defaults)\n- committee.name = self.original_item['name']\n- if self.original_item['name'] == 'Gemeenteraad':\n- committee.classification = 'Council'\n- else:\n- committee.classification = 'Committee'\n-\n- # Attach the committee node to the municipality node\n- committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- committee.subOrganizationOf.merge(collection=self.source_definition['key'])\n-\n- return committee\ndiff --git a/ocd_backend/items/goapi_document.py b/ocd_backend/items/goapi_document.py\ndeleted file mode 100644\n--- a/ocd_backend/items/goapi_document.py\n+++ /dev/null\n@@ -1,92 +0,0 @@\n-from datetime import datetime\n-from hashlib import sha1\n-from pprint import pprint\n-\n-import iso8601\n-import pytz\n-\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-from ocd_backend.log import get_source_logger\n-\n-log = get_source_logger('goapi_meeting')\n-\n-\n-class GemeenteOplossingenDocument(BaseItem):\n- def _get_current_permalink(self):\n- api_version = self.source_definition.get('api_version', 'v1')\n- base_url = '%s/%s' % (\n- self.source_definition['base_url'], api_version,)\n-\n- return u'%s/documents/%i' % (base_url, self.original_item[u'id'],)\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def _get_documents_as_media_urls(self, documents):\n- current_permalink = self._get_current_permalink()\n-\n- output = []\n- for document in documents:\n- # sleep(1)\n- url = current_permalink\n- output.append({\n- 'url': url,\n- 'note': document[u'filename']})\n- return output\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'gemeenteoplossingen',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- event = Meeting(self.original_item[u'id'], **source_defaults)\n-\n- try:\n- date_tz = pytz.timezone(\n- self.original_item['publicationDate']['timezone'])\n- except Exception:\n- date_tz = None\n- start_date = iso8601.parse_date(\n- self.original_item['publicationDate']['date'].replace(' ', 'T'))\n- if date_tz is not None:\n- try:\n- start_date = start_date.astimezone(date_tz)\n- except Exception:\n- pass\n-\n- event.start_date = start_date\n- event.end_date = event.start_date # ?\n-\n- event.name = self.original_item[u'description']\n-\n- event.classification = [self.original_item['documentTypeLabel']]\n- event.description = self.original_item[u'description']\n-\n- # object_model['last_modified'] = iso8601.parse_date(\n- # self.original_item['last_modified'])\n-\n- # if self.original_item['canceled']:\n- # event.status = EventCancelled()\n- # elif self.original_item['inactive']:\n- # event.status = EventUnconfirmed()\n- # else:\n- # event.status = EventConfirmed()\n- event.status = EventConfirmed()\n-\n- event.attachment = []\n- for doc in self._get_documents_as_media_urls(\n- self.original_item.get('documents', [])\n- ):\n- attachment = MediaObject(doc['url'], **source_defaults)\n- attachment.identifier_url = doc['url'] # Trick to use the self url for enrichment\n- attachment.original_url = doc['url']\n- attachment.name = doc['note']\n- event.attachment.append(attachment)\n-\n- return event\ndiff --git a/ocd_backend/items/goapi_meeting.py b/ocd_backend/items/goapi_meeting.py\ndeleted file mode 100644\n--- a/ocd_backend/items/goapi_meeting.py\n+++ /dev/null\n@@ -1,129 +0,0 @@\n-import iso8601\n-\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-from ocd_backend.log import get_source_logger\n-\n-log = get_source_logger('goapi_meeting')\n-\n-\n-class GemeenteOplossingenMeetingItem(BaseItem):\n- def _get_current_permalink(self):\n- api_version = self.source_definition.get('api_version', 'v1')\n- base_url = '%s/%s' % (\n- self.source_definition['base_url'], api_version,)\n-\n- return u'%s/meetings/%i' % (base_url, self.original_item[u'id'],)\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def _get_documents_as_media_urls(self, documents):\n- current_permalink = self._get_current_permalink()\n-\n- output = []\n- for document in documents:\n- # sleep(1)\n- url = u\"%s/documents/%s\" % (current_permalink, document['id'])\n- output.append({\n- 'url': url,\n- 'note': document[u'filename']})\n- return output\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'gemeenteoplossingen',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- print(source_defaults)\n- event = Meeting(self.original_item[u'id'], **source_defaults)\n-\n- # dates in v1 have a time in them and in v2 they don't\n- if ':' in self.original_item['date']:\n- start_date = self.original_item['date']\n- else:\n- start_date = \"%sT%s:00\" % (\n- self.original_item['date'],\n- self.original_item.get('startTime', '00:00',))\n-\n- event.start_date = iso8601.parse_date(start_date)\n- event.end_date = event.start_date # ?\n-\n- # Some meetings are missing a name...\n- event.name = self.original_item[u'description'] or 'None'\n-\n- event.classification = [u'Agenda']\n- event.description = self.original_item[u'description']\n-\n- try:\n- event.location = self.original_item[u'location'].strip()\n- except (AttributeError, KeyError):\n- pass\n-\n- # Attach the meeting to the municipality node\n- event.organization = Organization(self.source_definition['key'], **source_defaults)\n- event.organization.merge(collection=self.source_definition['key'])\n-\n- # Attach the meeting to the committee node. GO always lists either the name of the committee or 'Raad'\n- # if it is a non-committee meeting so we can attach it to a committee node without any extra checks\n- # as opposed to iBabs\n- event.committee = Organization(self.original_item[u'dmu'][u'id'], **source_defaults)\n- # Re-attach the committee node to the municipality node\n- # TODO: Why does the committee node get detached from the municipality node when meetings are attached to it?\n- event.committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- event.committee.subOrganizationOf.merge(collection=self.source_definition['key'])\n-\n- # object_model['last_modified'] = iso8601.parse_date(\n- # self.original_item['last_modified'])\n-\n- # TODO: This is untested so we log any cases that are not the default\n- if 'canceled' in self.original_item and self.original_item['canceled']:\n- log.info('Found a GOAPI event with status EventCancelled: %s' % str(event.values))\n- event.status = EventCancelled()\n- elif 'inactive' in self.original_item and self.original_item['inactive']:\n- log.info('Found a GOAPI event with status EventUnconmfirmed: %s' % str(event.values))\n- event.status = EventUnconfirmed()\n- else:\n- event.status = EventConfirmed()\n-\n- event.agenda = []\n- for item in self.original_item.get('items', []):\n- if not item['sortorder']:\n- continue\n-\n- agendaitem = AgendaItem(item['id'], **source_defaults)\n- agendaitem.__rel_params__ = {'rdf': '_%i' % item['sortorder']}\n- agendaitem.description = item['description']\n- agendaitem.name = '%s: %s' % (item['number'], item['title'],)\n- agendaitem.position = item['sortorder']\n- agendaitem.parent = event\n- agendaitem.start_date = event.start_date\n- agendaitem.attachment = []\n-\n- for doc in self._get_documents_as_media_urls(\n- item.get('documents', [])\n- ):\n- attachment = MediaObject(doc['url'], **source_defaults)\n- attachment.identifier_url = doc['url'] # Trick to use the self url for enrichment\n- attachment.original_url = doc['url']\n- attachment.name = doc['note']\n- agendaitem.attachment.append(attachment)\n-\n- event.agenda.append(agendaitem)\n-\n- event.attachment = []\n- for doc in self._get_documents_as_media_urls(\n- self.original_item.get('documents', [])\n- ):\n- attachment = MediaObject(doc['url'], **source_defaults)\n- attachment.identifier_url = doc['url'] # Trick to use the self url for enrichment\n- attachment.original_url = doc['url']\n- attachment.name = doc['note']\n- event.attachment.append(attachment)\n-\n- return event\ndiff --git a/ocd_backend/items/gv.py b/ocd_backend/items/gv.py\ndeleted file mode 100644\n--- a/ocd_backend/items/gv.py\n+++ /dev/null\n@@ -1,160 +0,0 @@\n-from datetime import datetime\n-from hashlib import sha1\n-# import iso8601\n-\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-\n-\n-class GreenValleyItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def _get_documents_as_media_urls(self):\n- media_urls = {}\n- if u'attachmentlist' in self.original_item:\n- for att_key, att in self.original_item.get(u'attachmentlist', {}).iteritems():\n- if att[u'objecttype'] == 'AGENDAPAGE':\n- continue\n-\n- url = \"https://staten.zuid-holland.nl/dsresource?objectid=%s\" % (\n- att[u'objectid'].encode('utf8'),)\n-\n- doc_hash = unicode(\n- sha1(url + ':' + att[u'objectname'].encode('utf8')).hexdigest())\n- media_urls[doc_hash] = {\n- \"note\": att[u'objectname'],\n- \"original_url\": url\n- }\n- else:\n- default = self.original_item['default']\n- if default[u'objecttype'] != 'AGENDAPAGE':\n- url = \"https://staten.zuid-holland.nl/dsresource?objectid=%s\" % (\n- default[u'objectid'].encode('utf8'),)\n-\n- doc_hash = unicode(\n- sha1(url + ':' + default[u'objectname'].encode('utf8')).hexdigest()\n- )\n- media_urls[doc_hash] = {\n- \"note\": default[u'objectname'],\n- \"original_url\": url\n- }\n-\n- if media_urls:\n- return media_urls.values()\n- else:\n- return None\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'greenvalley',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- meeting = self.original_item[u'default']\n-\n- event = Meeting(meeting[u'objectid'], **source_defaults)\n-\n- if meeting.get(u'bis_vergaderdatum', u'').strip() != u'':\n- event.start_date = datetime.fromtimestamp(\n- float(meeting[u'bis_vergaderdatum']) +\n- (float(meeting.get(u'bis_starttijduren', '0') or '0') * 3600) +\n- (float(meeting.get(u'bis_starttijdminuten', '0') or '0') * 60))\n- event.end_date = datetime.fromtimestamp(\n- float(meeting[u'bis_vergaderdatum']) +\n- (float(meeting.get(u'bis_eindtijduren', '0') or '0') * 3600) +\n- (float(meeting.get(u'bis_eindtijdminuten', '0') or '0') * 60))\n- elif u'publishdate' in meeting:\n- event.start_date = datetime.fromtimestamp(\n- float(meeting[u'publishdate']))\n- event.end_date = datetime.fromtimestamp(\n- float(meeting[u'publishdate']))\n-\n- event.name = meeting[u'objectname']\n-\n- objecttype2classification = {\n- 'agenda': 'Agenda',\n- 'agendapage': 'Agendapunt',\n- 'bestuurlijkstuk': 'Bestuurlijk stuk',\n- 'notule': 'Verslag',\n- 'ingekomenstuk': 'Ingekomen stuk',\n- 'antwoordstuk': 'Antwoord' # ?\n- }\n- event.classification = [u'Agenda']\n- try:\n- event.classification = [unicode(\n- objecttype2classification[meeting[u'objecttype'].lower()])]\n- except LookupError:\n- event.classification = [unicode(\n- meeting[u'objecttype'].capitalize())]\n- event.description = meeting[u'objectname']\n-\n- try:\n- event.location = meeting[u'bis_locatie'].strip()\n- except (AttributeError, KeyError):\n- pass\n-\n- try:\n- event.organization = Organization(\n- meeting[u'bis_orgaan'], **source_defaults)\n- event.committee = Organization(\n- meeting[u'bis_orgaan'], **source_defaults)\n- except LookupError as e:\n- pass\n-\n- # object_model['last_modified'] = iso8601.parse_date(\n- # self.original_item['last_modified'])\n-\n- # if self.original_item['canceled']:\n- # event.status = EventCancelled()\n- # elif self.original_item['inactive']:\n- # event.status = EventUnconfirmed()\n- # else:\n- # event.status = EventConfirmed()\n- event.status = EventConfirmed()\n-\n- event.attachment = []\n- for doc in self._get_documents_as_media_urls():\n- attachment = MediaObject(doc['original_url'], **source_defaults)\n- attachment.identifier_url = doc['original_url'] # Trick to use the self url for enrichment\n- attachment.original_url = doc['original_url']\n- attachment.name = doc['note']\n- event.attachment.append(attachment)\n-\n- return event\n-\n-\n-class GreenValleyMeeting(GreenValleyItem):\n- def get_object_model(self):\n- event = super(GreenValleyMeeting, self).get_object_model()\n-\n- source_defaults = {\n- 'source': 'greenvalley',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- event.agenda = []\n-\n- children = []\n- for a, v in self.original_item.get(u'SETS', {}).iteritems():\n- if v[u'objecttype'].lower() == u'agendapage':\n- result = {u'default': v}\n- children.append(result)\n-\n- for item in children:\n- meeting = item[u'default']\n- agendaitem = AgendaItem(meeting['objectid'], **source_defaults)\n- agendaitem.__rel_params__ = {\n- 'rdf': '_%i' % int(meeting['agendapagenumber'])}\n- agendaitem.description = meeting[u'objectname']\n- agendaitem.name = meeting[u'objectname']\n- agendaitem.position = int(meeting['agendapagenumber'])\n-\n- event.agenda.append(agendaitem)\n-\n- return event\ndiff --git a/ocd_backend/items/ibabs_committee.py b/ocd_backend/items/ibabs_committee.py\ndeleted file mode 100644\n--- a/ocd_backend/items/ibabs_committee.py\n+++ /dev/null\n@@ -1,31 +0,0 @@\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-\n-\n-class CommitteeItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'ibabs',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['index_name'],\n- }\n-\n- committee = Organization(self.original_item['Id'], **source_defaults)\n- committee.name = self.original_item['Meetingtype']\n- committee.description = self.original_item['Abbreviation']\n- if 'sub' in self.original_item['Meetingtype']:\n- committee.classification = u'Subcommittee'\n- else:\n- committee.classification = u'Committee'\n-\n- # Attach the committee node to the municipality node\n- committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- committee.subOrganizationOf.merge(collection=self.source_definition['key'])\n-\n- return committee\ndiff --git a/ocd_backend/items/ibabs_meeting.py b/ocd_backend/items/ibabs_meeting.py\ndeleted file mode 100644\n--- a/ocd_backend/items/ibabs_meeting.py\n+++ /dev/null\n@@ -1,203 +0,0 @@\n-import re\n-\n-import iso8601\n-\n-from ocd_backend.items import BaseItem\n-from ocd_backend.log import get_source_logger\n-from ocd_backend.models import *\n-\n-log = get_source_logger('ibabs_meeting')\n-\n-\n-class IBabsMeetingItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'ibabs',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['index_name'],\n- }\n-\n- # Sometimes the meeting is contained in a sub-dictionary called 'Meeting'\n- if 'Meeting' in self.original_item:\n- meeting = self.original_item['Meeting']\n- else:\n- meeting = self.original_item\n-\n- item = Meeting(meeting['Id'], **source_defaults)\n- item.name = meeting['Meetingtype']\n- item.chair = meeting['Chairman']\n- item.location = meeting['Location']\n- item.start_date = iso8601.parse_date(meeting['MeetingDate'], ).strftime(\"%s\")\n-\n- # TODO: This is untested so we log any cases that are not the default\n- if 'canceled' in meeting and meeting['canceled']:\n- log.info('Found an iBabs event with status EventCancelled: %s' % str(item.values))\n- item.status = EventCancelled()\n- elif 'inactive' in meeting and meeting['inactive']:\n- log.info('Found an iBabs event with status EventUnconfirmed: %s' % str(item.values))\n- item.status = EventUnconfirmed()\n- else:\n- item.status = EventConfirmed()\n-\n- # Attach the meeting to the municipality node\n- item.organization = Organization(self.source_definition['key'], **source_defaults)\n- item.organization.merge(collection=self.source_definition['key'])\n-\n- # Check if this is a committee meeting and if so connect it to the committee node. If it is\n- # not a committee meeting we attach it to the 'Gemeenteraad' committee node\n- committee_designator = self.source_definition.get('committee_designator', 'commissie')\n- if committee_designator in meeting['Meetingtype'].lower():\n- # Attach the meeting to the committee node\n- item.committee = Organization(meeting['MeetingtypeId'], **source_defaults)\n- item.committee.name = meeting['Meetingtype']\n- if 'sub' in meeting['MeetingtypeId']:\n- item.committee.classification = u'Subcommittee'\n- else:\n- item.committee.classification = u'Committee'\n- # Re-attach the committee node to the municipality node\n- # TODO: Why does the committee node get detached from the municipality node when meetings are attached to it?\n- item.committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- item.committee.subOrganizationOf.merge(collection=self.source_definition['key'])\n- else:\n- # This is not a committee meeting, so attach it to the 'Gemeenteraad' committee node\n- item.committee = Organization('gemeenteraad', **source_defaults)\n- item.committee.name = 'Gemeenteraad'\n- item.committee.classification = 'Council'\n- item.committee.collection = self.source_definition['key'] + '-gemeenteraad'\n- item.committee.merge(collection=self.source_definition['key'] + '-gemeenteraad')\n- # Re-attach the 'Gemeenteraad' committee node to the municipality node\n- # TODO: Same problem as above\n- item.committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- item.committee.subOrganizationOf.merge(collection=self.source_definition['key'])\n-\n- if 'MeetingItems' in meeting:\n- item.agenda = list()\n- for i, mi in enumerate(meeting['MeetingItems'] or [], start=1):\n- agenda_item = AgendaItem(mi['Id'], **source_defaults)\n- agenda_item.parent = item\n- agenda_item.name = mi['Title']\n- agenda_item.start_date = item.start_date\n- agenda_item.__rel_params__ = {'rdf': '_%i' % i}\n-\n- agenda_item.attachment = list()\n- for document in meeting['Documents'] or []:\n- attachment = MediaObject(document['Id'], **source_defaults)\n- attachment.identifier_url = 'ibabs/agenda_item/%s' % document['Id']\n- attachment.original_url = document['PublicDownloadURL']\n- attachment.size_in_bytes = document['FileSize']\n- attachment.name = document['DisplayName']\n- agenda_item.attachment.append(attachment)\n-\n- item.agenda.append(agenda_item)\n-\n- item.invitee = list()\n- for invitee in meeting['Invitees'] or []:\n- item.invitee.append(Person(invitee['UniqueId'],\n- **source_defaults))\n-\n- # Double check because sometimes 'EndTime' is in meeting but it is set to None\n- if 'EndTime' in meeting and meeting['EndTime']:\n- meeting_date, _, _ = meeting['MeetingDate'].partition('T')\n- meeting_datetime = '%sT%s:00' % (meeting_date, meeting['EndTime'])\n- item.end_date = iso8601.parse_date(meeting_datetime).strftime(\"%s\")\n- else:\n- item.end_date = iso8601.parse_date(meeting['MeetingDate'], ).strftime(\"%s\")\n-\n- item.attachment = list()\n- for document in meeting['Documents'] or []:\n- attachment = MediaObject(document['Id'], **source_defaults)\n- attachment.identifier_url = 'ibabs/meeting/%s' % document['Id']\n- attachment.original_url = document['PublicDownloadURL']\n- attachment.size_in_bytes = document['FileSize']\n- attachment.name = document['DisplayName']\n- item.attachment.append(attachment)\n-\n- return item\n-\n-\n-class IBabsReportItem(BaseItem):\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'ibabs',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['index_name'],\n- }\n-\n- report = CreativeWork(self.original_item['id'][0], **source_defaults) # todo\n-\n- report_name = self.original_item['_ReportName'].split(r'\\s+')[0]\n- report.classification = u'Report'\n-\n- name_field = None\n- try:\n- name_field = self.source_definition['fields'][report_name]['name']\n- except KeyError:\n- for field in self.original_item.keys():\n- # Search for things that look like title\n- if field.lower()[0:3] == 'tit':\n- name_field = field\n- break\n-\n- id_for_field = '%sIds' % (field,)\n- if id_for_field in self.original_item and name_field is None:\n- name_field = field\n- break\n-\n- report.name = self.original_item[name_field][0]\n-\n- # Temporary binding reports to municipality as long as events and agendaitems are not\n- # referenced in the iBabs API\n- report.creator = Organization(self.source_definition['key'], **source_defaults)\n- report.creator.merge(collection=self.source_definition['key'])\n-\n- try:\n- name_field = self.source_definition['fields'][report_name]['description']\n- report.description = self.original_item[name_field][0]\n- except KeyError:\n- try:\n- report.description = self.original_item['_Extra']['Values']['Toelichting']\n- except KeyError:\n- pass\n-\n- try:\n- datum_field = self.source_definition['fields'][report_name]['start_date']\n- except KeyError:\n- datum_field = 'datum'\n-\n- datum = None\n- if datum_field in self.original_item:\n- if isinstance(self.original_item[datum_field], list):\n- datum = self.original_item[datum_field][0]\n- else:\n- datum = self.original_item[datum_field]\n-\n- if datum is not None:\n- # msgpack does not like microseconds for some reason.\n- # no biggie if we disregard it, though\n- report.start_date = iso8601.parse_date(re.sub(r'\\.\\d+\\+', '+', datum))\n- report.end_date = iso8601.parse_date(re.sub(r'\\.\\d+\\+', '+', datum))\n-\n- report.status = EventConfirmed()\n-\n- report.attachment = list()\n- for document in self.original_item['_Extra']['Documents'] or []:\n- attachment_file = MediaObject(document['Id'], **source_defaults)\n- attachment_file.original_url = document['PublicDownloadURL']\n- attachment_file.size_in_bytes = document['FileSize']\n- attachment_file.name = document['DisplayName']\n- report.attachment.append(attachment_file)\n-\n- return report\ndiff --git a/ocd_backend/items/ibabs_motion.py b/ocd_backend/items/ibabs_motion.py\ndeleted file mode 100644\n--- a/ocd_backend/items/ibabs_motion.py\n+++ /dev/null\n@@ -1,332 +0,0 @@\n-# todo needs revision v1\n-# import re\n-# from time import sleep\n-#\n-# import iso8601\n-# from ocd_backend.items.popolo import MotionItem, VotingEventItem\n-#\n-# from ocd_backend import settings\n-# from ocd_backend.extractors import HttpRequestMixin\n-# from ocd_backend.log import get_source_logger\n-# from ocd_backend.utils.api import FrontendAPIMixin\n-# from ocd_backend.utils.file_parsing import FileToTextMixin\n-# from ocd_backend.utils.misc import full_normalized_motion_id\n-#\n-# log = get_source_logger('item')\n-#\n-#\n-# class IBabsMotionVotingMixin(HttpRequestMixin, FrontendAPIMixin, FileToTextMixin):\n-# def _get_council(self):\n-# \"\"\"\n-# Gets the organisation that represents the council.\n-# \"\"\"\n-#\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Council')\n-# return results[0]\n-#\n-# def _get_council_members(self):\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'persons', size=100) # 100\n-# return results\n-#\n-# def _get_council_parties(self):\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Party', size=100) # 100 for now ...\n-# return results\n-#\n-# @staticmethod\n-# def _get_classification():\n-# return u'Moties'\n-#\n-# def _value(self, key):\n-# # log.debug(self.source_definition['fields']['Moties'])\n-# try:\n-# actual_key = self.source_definition[\n-# 'fields']['Moties']['Extra'][key]\n-# except KeyError:\n-# actual_key = key\n-# try:\n-# return self.original_item['_Extra']['Values'][actual_key]\n-# except KeyError:\n-# return None\n-#\n-# @staticmethod\n-# def _get_creator(creators_str, members, parties):\n-# # FIXME: currently only does the first. what do we do with the others?\n-# log.debug(\"Creators: %s\" % creators_str)\n-#\n-# if creators_str is None:\n-# return\n-#\n-# creator_str = re.split(r'\\)[,;]\\s*', creators_str)[0]\n-# log.debug(\"Looking for : %s\" % (creator_str,))\n-#\n-# party_match = re.search(r' \\(([^)]*?)\\)?$', creator_str)\n-# if not party_match:\n-# return\n-#\n-# log.debug(\"Party match: %s, parties: %s\" % (\n-# party_match.group(1),\n-# u','.join([p.get('name', u'') for p in parties]),))\n-# try:\n-# party = \\\n-# [p for p in parties if unicode(p.get('name', u'')).lower() == unicode(party_match.group(1)).lower()][0]\n-# except Exception as e:\n-# party = None\n-#\n-# if not party:\n-# return\n-#\n-# log.debug(\"Found party: %s\" % (party['name']))\n-#\n-# last_name_match = re.match(r'^([^,]*), ', creator_str)\n-# if not last_name_match:\n-# return\n-#\n-# last_name_members = [m for m in members if last_name_match.group(1) in m['name']]\n-# if len(last_name_members) <= 0:\n-# return\n-#\n-# log.debug(\"Found last name candidates: %s\" % (u','.join([m['name'] for m in last_name_members]),))\n-#\n-# if len(last_name_members) == 1:\n-# log.debug(\"Found final candidate base on last name: %s\" % (last_name_members[0]['name'],))\n-# return last_name_members[0]\n-#\n-# for m in last_name_members:\n-# correct_party_affiliations = [ms for ms in m['memberships'] if ms['organization_id'] == party['id']]\n-# if len(correct_party_affiliations) > 0:\n-# log.debug(\"Found final candidate base on last name and party: %s\" % (m['name'],))\n-# return m\n-#\n-# return None\n-#\n-# def _find_legislative_session(self, motion_date, council, members, parties):\n-# # FIXME: match motions and ev ents when they're closest, not the first you run into\n-# motion_day_start = re.sub(r'T\\d{2}:\\d{2}:\\d{2}', 'T00:00:00', motion_date.isoformat())\n-# motion_day_end = re.sub(r'T\\d{2}:\\d{2}:\\d{2}', 'T23:59:59', motion_date.isoformat())\n-# # log.debug((motion_date.isoformat(), motion_day_start, motion_day_end))\n-# try:\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'events',\n-# classification=u'Agenda',\n-# start_date={\n-# 'from': motion_day_start, 'to': motion_day_end})\n-# # log.debug(len(results))\n-# # filtered_results = [r for r in results if r['organization_id'] == council['id']]\n-# # return filtered_results[0]\n-# if results is not None:\n-# return results[0]\n-# except (KeyError, IndexError) as e:\n-# log.error(\"Error blaat\")\n-# return None\n-#\n-# def _get_motion_id_encoded(self):\n-# return unicode(\n-# full_normalized_motion_id(self._value('Onderwerp')))\n-#\n-# def get_object_id(self):\n-# return self._get_motion_id_encoded()\n-#\n-# def get_original_object_id(self):\n-# return self._get_motion_id_encoded()\n-#\n-# @staticmethod\n-# def get_original_object_urls():\n-# # FIXME: what to do when there is not an original URL?\n-# return {\"html\": settings.IBABS_WSDL}\n-#\n-# @staticmethod\n-# def get_rights():\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def _get_motion_data(self, council, members, parties):\n-# object_model = dict()\n-#\n-# object_model['id'] = unicode(self.get_original_object_id())\n-#\n-# object_model['hidden'] = self.source_definition['hidden']\n-#\n-# object_model['name'] = unicode(self._value('Onderwerp'))\n-#\n-# object_model['identifier'] = object_model['id']\n-#\n-# object_model['organization_id'] = council['id']\n-# object_model['organization'] = council\n-#\n-# # TODO: this gets only the first creator listed. We should fix it to\n-# # get all of them\n-# creator = self._get_creator(self._value('Indiener(s)'), members, parties)\n-# if creator is not None:\n-# object_model['creator_id'] = creator['id']\n-# object_model['creator'] = creator\n-#\n-# object_model['classification'] = u'Moties'\n-#\n-# object_model['date'] = iso8601.parse_date(self.original_item['datum'][0], )\n-# # TODO: this is only for searching compatability ...\n-# object_model['start_date'] = object_model['date']\n-# object_model['end_date'] = object_model['date']\n-#\n-# # finding the event where this motion was put to a voting round\n-# legislative_session = self._find_legislative_session(\n-# object_model['date'], council, members, parties)\n-# if legislative_session is not None:\n-# object_model['legislative_session_id'] = legislative_session['id']\n-# object_model['legislative_session'] = legislative_session\n-#\n-# object_model['result'] = self._value('Status')\n-# object_model['requirement'] = u'majority'\n-# object_model['sources'] = []\n-#\n-# object_model['vote_events'] = [self.get_original_object_id()]\n-#\n-# try:\n-# documents = self.original_item['_Extra']['Documents']\n-# except KeyError as e:\n-# documents = []\n-# if documents is None:\n-# documents = []\n-#\n-# # Default the text to \"-\". If a document contains actual text\n-# # then that text will be used.\n-# object_model['text'] = u\"-\"\n-# for document in documents:\n-# sleep(1)\n-# log.debug(u\"%s: %s\" % (\n-# object_model['name'], document['DisplayName'],))\n-# description = self.file_get_contents(\n-# public_download_url,\n-# self.source_definition.get('pdf_max_pages', 20)).strip()\n-# object_model['sources'].append({\n-# 'url': document['PublicDownloadURL'],\n-# 'note': document['DisplayName'],\n-# 'description': description\n-# })\n-# # FIXME: assumes that there is only one document from which\n-# # we can extract text; is that a valid assumption?\n-# if len(description) > 0:\n-# object_model['text'] = description\n-#\n-# return object_model\n-#\n-# def get_object_model(self):\n-# council = self._get_council()\n-# members = self._get_council_members()\n-# parties = self._get_council_parties()\n-#\n-# return self._get_motion_data(council, members, parties)\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\n-#\n-#\n-# class IBabsMotionItem(IBabsMotionVotingMixin, MotionItem):\n-# pass\n-#\n-#\n-# class IBabsVoteEventItem(IBabsMotionVotingMixin, VotingEventItem):\n-# def get_object_model(self):\n-# object_model = {}\n-# council = self._get_council()\n-# members = self._get_council_members()\n-# parties = self._get_council_parties()\n-# # log.debug(parties)\n-# object_model['motion'] = self._get_motion_data(\n-# council, members, parties)\n-#\n-# object_model['classification'] = u'Stemmingen'\n-# object_model['hidden'] = self.source_definition['hidden']\n-# object_model['start_date'] = object_model['motion']['date']\n-# object_model['end_date'] = object_model['motion']['date']\n-#\n-# # we can copy some fields from the motion\n-# for field in [\n-# 'id', 'organization_id', 'organization', 'identifier', 'result',\n-# 'sources', 'legislative_session_id'\n-# ]:\n-# try:\n-# object_model[field] = object_model['motion'][field]\n-# except KeyError as e:\n-# pass\n-#\n-# # Not all motions are actually voted on\n-# # FIXME: are there more. is every municipality specifying the same?\n-# # allowed_results = [\n-# # 'Motie aangenomen',\n-# # 'Motie verworpen',\n-# # ]\n-#\n-# object_model['counts'] = []\n-# object_model['votes'] = []\n-#\n-# # if object_model['result'] not in allowed_results:\n-# # return object_model\n-# #\n-# # party_ids = [p['id'] for p in parties if p.has_key('id')]\n-# #\n-# # # make the vote a bit random, but retain te result by majority vote\n-# # majority_count = (len(members) // 2) + 1\n-# # vote_count_to_result = len(members)\n-# # new_vote_count_to_result = vote_count_to_result\n-# # current_votes = {p['id']: object_model['result'] for p in parties if p.has_key('name')}\n-# # party_sizes = {p['id']: len(list(set([m['person_id'] for m in p['memberships']]))) for p in parties if p.has_key('name')}\n-# # parties_voted = []\n-# #\n-# # while new_vote_count_to_result >= majority_count:\n-# # if new_vote_count_to_result != vote_count_to_result:\n-# # vote_count_to_result = new_vote_count_to_result\n-# # current_votes[party_id] = random.choice([r for r in allowed_results if r != object_model['result']])\n-# # parties_voted.append(party_id)\n-# #\n-# # # pick a random party\n-# # party_id = random.choice([p for p in party_ids if p not in parties_voted])\n-# #\n-# # new_vote_count_to_result = new_vote_count_to_result - party_sizes[party_id]\n-# #\n-# # # now record the votes\n-# # for party in parties:\n-# # if not party.has_key('name'):\n-# # continue\n-# # try:\n-# # num_members = len(list(set([m['person_id'] for m in party['memberships']])))\n-# # except KeyError as e:\n-# # num_members = 0\n-# # object_model['counts'].append({\n-# # 'option': current_votes[party['id']], # object_model['result'],\n-# # 'value': num_members,\n-# # 'group': {\n-# # 'name': party.get('name', '')\n-# # }\n-# # })\n-# #\n-# # # FIXME: get the actual individual votes, depends on the voting kind\n-# # for m in members:\n-# # try:\n-# # member_party = [ms['organization_id'] for ms in m['memberships'] if ms['organization_id'] in party_ids][0]\n-# # member_vote = current_votes[member_party]\n-# # except (KeyError, IndexError) as e:\n-# # member_party = None\n-# # member_vote = object_model['result']\n-# #\n-# # object_model['votes'].append({\n-# # 'voter_id' : m['id'],\n-# # 'voter': m,\n-# # 'option': member_vote, # FIXME: actual vote\n-# # 'group_id': member_party\n-# # })\n-#\n-# return object_model\ndiff --git a/ocd_backend/items/ibabs_person.py b/ocd_backend/items/ibabs_person.py\ndeleted file mode 100644\n--- a/ocd_backend/items/ibabs_person.py\n+++ /dev/null\n@@ -1,59 +0,0 @@\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-\n-\n-class IbabsPersonItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'ibabs',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- person = Person(self.original_item['UserId'], **source_defaults)\n- person.name = self.original_item['Name']\n- person.family_name = self.original_item['LastName']\n- person.biography = self.original_item['AboutMe']\n- person.email = self.original_item['Email']\n- person.phone = self.original_item['Phone']\n-\n- municipality = Organization(self.source_definition['almanak_id'], **source_defaults)\n- municipality.name = self.source_definition['sitename']\n- municipality.merge(name=self.source_definition['sitename'])\n-\n- municipality_member = Membership(**source_defaults)\n- municipality_member.organization = municipality\n- # TODO: Setting member = person causes infinite recursion\n- # municipality_member.member = person\n- # FunctionName is often set to 'None' in the source, in that case we fall back to 'Member'\n- if self.original_item['FunctionName'] == 'None':\n- municipality_member.role = 'Member'\n- else:\n- municipality_member.role = self.original_item['FunctionName']\n-\n- person.member_of = [municipality_member]\n-\n- if self.original_item['PoliticalPartyId']:\n- # Currently there is no way to merge parties from the Almanak with parties from ibabs because\n- # they do not share any consistent identifiers, so new nodes will be created for parties that ibabs\n- # persons are linked to. This causes ibabs sources that have persons to have duplicate party nodes.\n- # These duplicate nodes are necessary to cover ibabs sources that have no persons, otherwise those\n- # sources would not have any parties.\n- party = Organization(self.original_item['PoliticalPartyId'], **source_defaults)\n- party.name = self.original_item['PoliticalPartyName']\n-\n- party_member = Membership(**source_defaults)\n- party_member.organization = party\n- # TODO: Setting member = person causes infinite recursion\n- # party_member.member = person\n- party_member.role = 'Member'\n-\n- person.member_of.append(party_member)\n-\n- return person\ndiff --git a/ocd_backend/items/notubiz_committee.py b/ocd_backend/items/notubiz_committee.py\ndeleted file mode 100644\n--- a/ocd_backend/items/notubiz_committee.py\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-\n-\n-class CommitteeItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'notubiz',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- committee = Organization(self.original_item['id'], **source_defaults)\n- committee.name = self.original_item['title']\n- if self.original_item['title'] == 'Gemeenteraad':\n- committee.classification = 'Council'\n- else:\n- committee.classification = 'Committee'\n-\n- # Attach the committee node to the municipality node\n- committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- committee.subOrganizationOf.merge(collection=self.source_definition['index_name'])\n-\n- return committee\ndiff --git a/ocd_backend/items/notubiz_meeting.py b/ocd_backend/items/notubiz_meeting.py\ndeleted file mode 100644\n--- a/ocd_backend/items/notubiz_meeting.py\n+++ /dev/null\n@@ -1,89 +0,0 @@\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-from ocd_backend.log import get_source_logger\n-\n-log = get_source_logger('notubiz_meeting')\n-\n-\n-class NotubizMeetingItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'notubiz',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- event = Meeting(self.original_item['id'], **source_defaults)\n- event.start_date = self.original_item['plannings'][0]['start_date']\n- event.end_date = self.original_item['plannings'][0]['end_date']\n- event.name = self.original_item['attributes'].get('Titel', 'Vergadering %s' % event.start_date)\n- event.classification = [u'Agenda']\n- event.location = self.original_item['attributes'].get('Locatie')\n-\n- # Attach the meeting to the municipality node\n- event.organization = Organization(self.original_item['organisation']['id'], **source_defaults)\n- event.organization.merge(collection=self.source_definition['index_name'])\n-\n- # Attach the meeting to the committee node\n- event.committee = Organization(self.original_item['gremium']['id'], **source_defaults)\n- # Re-attach the committee node to the municipality node\n- # TODO: Why does the committee node get detached from the municipality node when meetings are attached to it?\n- event.committee.subOrganizationOf = Organization(self.source_definition['key'], **source_defaults)\n- event.committee.subOrganizationOf.merge(collection=self.source_definition['index_name'])\n-\n- event.agenda = []\n- for item in self.original_item.get('agenda_items', []):\n- if not item['order']:\n- continue\n-\n- # If it's a 'label' type skip the item for now, since it only gives little information about what is to come\n- if item['type'] == 'label':\n- continue\n-\n- agendaitem = AgendaItem(item['id'], **source_defaults)\n- agendaitem.__rel_params__ = {'rdf': '_%i' % item['order']}\n- agendaitem.description = item['type_data']['attributes'][0]['value']\n- agendaitem.name = self.original_item['attributes']['Titel']\n- agendaitem.position = self.original_item['order']\n- agendaitem.parent = event\n- agendaitem.start_date = event.start_date\n-\n- agendaitem.attachment = []\n- for doc in item.get('documents', []):\n- attachment = MediaObject(doc['id'], **source_defaults)\n- attachment.identifier_url = doc['self'] # Trick to use the self url for enrichment\n- attachment.original_url = doc['url']\n- attachment.name = doc['title']\n- attachment.date_modified = doc['last_modified']\n- agendaitem.attachment.append(attachment)\n-\n- event.agenda.append(agendaitem)\n-\n- # object_model['last_modified'] = iso8601.parse_date(\n- # self.original_item['last_modified'])\n-\n- if 'canceled' in self.original_item and self.original_item['canceled']:\n- log.info('Found a Notubiz event with status EventCancelled: %s' % str(event.values))\n- event.status = EventCancelled()\n- elif 'inactive' in self.original_item and self.original_item['inactive']:\n- log.info('Found a Notubiz event with status EventUncomfirmed: %s' % str(event.values))\n- event.status = EventUnconfirmed()\n- else:\n- event.status = EventConfirmed()\n-\n- event.attachment = []\n- for doc in self.original_item.get('documents', []):\n- attachment = MediaObject(doc['id'], **source_defaults)\n- attachment.identifier_url = doc['self'] # Trick to use the self url for enrichment\n- attachment.original_url = doc['url']\n- attachment.name = doc['title']\n- attachment.date_modified = doc['last_modified']\n- event.attachment.append(attachment)\n-\n- return event\ndiff --git a/ocd_backend/items/organisations.py b/ocd_backend/items/organisations.py\ndeleted file mode 100644\n--- a/ocd_backend/items/organisations.py\n+++ /dev/null\n@@ -1,88 +0,0 @@\n-import re\n-\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import Organization\n-\n-\n-class MunicipalityOrganisationItem(BaseItem):\n- \"\"\"\n- Extracts municipality information from the Almanak.\n- \"\"\"\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'cbs',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- object_model = Organization(self.original_item['Key'], **source_defaults)\n- object_model.name = unicode(self.original_item['Title'])\n- object_model.classification = u'Municipality'\n- object_model.description = self.original_item['Description']\n- object_model.collection = self.get_collection()\n-\n- return object_model\n-\n-\n-class AlmanakOrganisationItem(BaseItem):\n- \"\"\"\n- Extracts organizations (parties) from the Almanak.\n- \"\"\"\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'almanak',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- object_model = Organization(self.original_item['name'], **source_defaults)\n- object_model.name = self.original_item['name'] # todo dubbel?\n- object_model.classification = self.original_item['classification']\n- object_model.subOrganizationOf = Organization(self.source_definition['almanak_id'], **source_defaults)\n- object_model.subOrganizationOf.merge(collection=self.source_definition['index_name'])\n-\n- return object_model\n-\n-\n-class HTMLOrganisationItem(BaseItem):\n-\n- def _get_name(self):\n- name = unicode(u''.join(self.original_item.xpath('.//text()'))).strip()\n- name = re.sub(r'\\s*\\(\\d+ zetels?\\)\\s*', '', name)\n- return unicode(name)\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'almanak',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- object_model = Organization(\n- self._get_name(), **source_defaults)\n- object_model.name = self._get_name() # todo dubbel?\n- object_model.classification = unicode(\n- self.source_definition.get('classification', 'Party'))\n- object_model.collection = self.get_collection()\n-\n- return object_model\ndiff --git a/ocd_backend/items/persons.py b/ocd_backend/items/persons.py\ndeleted file mode 100644\n--- a/ocd_backend/items/persons.py\n+++ /dev/null\n@@ -1,153 +0,0 @@\n-from urlparse import urljoin\n-\n-from lxml import etree\n-\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import *\n-from ocd_backend.models.model import Relationship\n-from ocd_backend.utils.http import HttpRequestMixin\n-from ocd_backend.log import get_source_logger\n-\n-log = get_source_logger('persons')\n-\n-\n-class AlmanakPersonItem(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'almanak',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- person = Person(self.original_item['id'], **source_defaults)\n- person.name = self.original_item['name']\n- person.email = self.original_item['email']\n- person.gender = self.original_item['gender']\n-\n- municipality = Organization(self.source_definition['almanak_id'], **source_defaults)\n- municipality.name = self.source_definition['sitename']\n-\n- municipality_member = Membership(**source_defaults)\n- municipality_member.organization = municipality\n- # TODO: Setting member = person causes infinite recursion\n- # municipality_member.member = person\n- municipality_member.role = self.original_item['role']\n-\n- person.member_of = [municipality_member]\n-\n- if self.original_item['party']:\n- party = Organization(self.original_item['party'], **source_defaults)\n- party.name = self.original_item['party']\n-\n- party_member = Membership(**source_defaults)\n- party_member.organization = party\n- # TODO: Setting member = person causes infinite recursion\n- # party_member.member = person\n- party_member.role = self.original_item['role']\n-\n- person.member_of.append(party_member)\n-\n- return person\n-\n-\n-class HTMLPersonItem(HttpRequestMixin, BaseItem):\n- def _get_name(self):\n- return u''.join(\n- self.original_item.xpath(\n- self.source_definition.get(\n- 'persons_name_xpath', './/h2//text()'))).strip()\n-\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n- source_defaults = {\n- 'source': 'almanak',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- pname = self._get_name()\n- print pname\n- person = Person(pname, **source_defaults)\n- person.name = self._get_name()\n-\n- return person\n-\n-\n-class HTMLPersonFromLinkItem(HTMLPersonItem):\n- def get_object_model(self):\n- person = super(HTMLPersonFromLinkItem, self).get_object_model()\n-\n- source_defaults = {\n- 'source': 'almanak',\n- 'source_id_key': 'identifier',\n- 'organization': self.source_definition['key'],\n- }\n-\n- # log.info(etree.tostring(self.original_item))\n- # log.info('Persons URL path: %s' % (self.source_definition['persons_link_path'],))\n- try:\n- request_url = urljoin(\n- self.source_definition['file_url'],\n- self.original_item.xpath(\n- self.source_definition['persons_link_path'])[0])\n- except LookupError as e:\n- log.error(e)\n- return person\n-\n- log.info('Now downloading URL: %s' % (request_url,))\n- r = self.http_session.get(request_url, verify=False)\n- r.raise_for_status()\n- html = etree.HTML(r.content)\n-\n- try:\n- person.email = html.xpath(\n- 'string(//a[starts-with(@href,\"mailto:\")]/text())').strip().split(' ')[0]\n- except LookupError:\n- pass\n-\n- # TODO: not sure how to determine gender\n- # person.gender = u'male' if person.name.startswith(u'Dhr. ') else u'female'\n-\n- municipality = Organization(\n- self.source_definition['almanak_id'], **source_defaults)\n- if municipality is None:\n- log.debug('Could not find almanak organization')\n- return person\n-\n- party_name = u''.join(html.xpath(\n- self.source_definition['organization_xpath']))\n- log.info('Found party %s on personal page based on name' % (party_name,))\n- party = Organization(party_name, **source_defaults)\n- log.info('Found party %s on NEO4j' % (party,))\n- #\n- municipality_member = Membership()\n- municipality_member.organization = municipality\n- # TODO: Setting member = person causes infinite recursion\n- # municipality_member.member = person\n- municipality_member.role = 'Fractielid'\n- # municipality_member.role = html.xpath('string(//div[@id=\"content\"]//h3/text())').strip()\n- #\n- party_member = Membership()\n- party_member.organization = party\n- # TODO: Setting member = person causes infinite recursion\n- # party_member.member = person\n- party_member.role = 'Member'\n-\n- person.member_of = [municipality_member, party_member]\n-\n- # person.member_of(municipality_member, party_member, rel=party)\n-\n- # person.member_of = Relationship(municipality, rel=party)\n-\n- return person\ndiff --git a/ocd_backend/items/popit.py b/ocd_backend/items/popit.py\ndeleted file mode 100644\n--- a/ocd_backend/items/popit.py\n+++ /dev/null\n@@ -1,118 +0,0 @@\n-# todo needs revision v1\n-# from datetime import datetime\n-#\n-# import iso8601\n-# from ocd_backend.items.popolo import (\n-# PersonItem, OrganisationItem, MembershipItem)\n-#\n-#\n-# class PopitBaseItem(object):\n-# \"\"\"\n-# Base class for importing things from a Popit instance.\n-# \"\"\"\n-#\n-# ignored_list_fields = {\n-# 'memberships': [\n-# # FIXME: start and end dates for memberships borked due to ES configuration (?)\n-# 'start_date', 'end_date',\n-# 'url', 'html_url', 'contact_details', 'images', 'links'\n-# ],\n-# # FIXME: start and end dates for memberships borked due to ES configuration (?)\n-# # 'start_date', 'end_date'\n-# 'area': ['id', 'name']\n-# }\n-#\n-# def get_object_id(self):\n-# return unicode(self.original_item['id'])\n-#\n-# def get_original_object_id(self):\n-# return self.get_object_id()\n-#\n-# def get_original_object_urls(self):\n-# try:\n-# return self.original_item['meta']['original_object_urls']\n-# except KeyError as e:\n-# pass\n-# try:\n-# return {'html': self.original_item['html_url']}\n-# except KeyError as e:\n-# pass\n-# return {}\n-#\n-# def get_rights(self):\n-# try:\n-# return self.original_item['meta']['rights']\n-# except (TypeError, KeyError) as e:\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def get_object_model(self):\n-# object_model = {\n-# 'hidden': self.source_definition['hidden']\n-# }\n-#\n-# for field in self.combined_index_fields:\n-# if field not in self.original_item:\n-# continue\n-#\n-# if self.combined_index_fields[field] == unicode:\n-# object_model[field] = unicode(\n-# self.original_item[field])\n-# elif self.combined_index_fields[field] == datetime:\n-# if self.original_item[field] is not None:\n-# try:\n-# object_model[field] = iso8601.parse_date(\n-# self.original_item[field])\n-# except iso8601.ParseError as e:\n-# object_model[field] = None\n-# elif self.combined_index_fields[field] == list:\n-# if field in self.ignored_list_fields:\n-# object_model[field] = [\n-# {k: v for k, v in l.iteritems() if k not in self.ignored_list_fields[field]} for l in\n-# self.original_item[field]]\n-# else:\n-# object_model[field] = self.original_item[field]\n-# elif self.combined_index_fields[field] == dict:\n-# if field in self.ignored_list_fields:\n-# object_model[field] = {\n-# k: v for k, v in self.original_item[field].iteritems() if\n-# k not in self.ignored_list_fields[field]}\n-# else:\n-# object_model[field] = self.original_item[field]\n-# else:\n-# object_model[field] = self.original_item[field]\n-#\n-# return object_model\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\n-#\n-#\n-# class PopitPersonItem(PopitBaseItem, PersonItem):\n-# \"\"\"\n-# Imports persons from a popit instance.\n-# \"\"\"\n-# pass\n-#\n-#\n-# class PopitOrganisationItem(PopitBaseItem, OrganisationItem):\n-# \"\"\"\n-# Imports organizations from a popit instance.\n-# \"\"\"\n-# pass\n-#\n-#\n-# class PopitMembershipItem(PopitBaseItem, MembershipItem):\n-# \"\"\"\n-# Imports a membership from a popit instance.\n-# \"\"\"\n-# pass\ndiff --git a/ocd_backend/items/voting_round.py b/ocd_backend/items/voting_round.py\ndeleted file mode 100644\n--- a/ocd_backend/items/voting_round.py\n+++ /dev/null\n@@ -1,143 +0,0 @@\n-# todo needs revision v1\n-# from ocd_backend import settings\n-# from ocd_backend.extractors import HttpRequestMixin\n-# from ocd_backend.items import BaseItem\n-# from ocd_backend.utils.api import FrontendAPIMixin\n-# from ocd_backend.utils.misc import full_normalized_motion_id\n-#\n-#\n-# class IBabsVotingRoundItem(HttpRequestMixin, FrontendAPIMixin, BaseItem):\n-# combined_index_fields = {\n-# 'id': unicode,\n-# 'hidden': bool,\n-# 'doc': dict\n-# }\n-#\n-# def _get_council(self):\n-# \"\"\"\n-# Gets the organisation that represents the council.\n-# \"\"\"\n-#\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Council')\n-# return results[0]\n-#\n-# def _get_council_members(self):\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'persons',\n-# size=100) # 100 for now ...\n-# return results\n-#\n-# def _get_council_parties(self):\n-# results = self.api_request(\n-# self.source_definition['index_name'], 'organizations',\n-# classification='Party', size=100) # 100 for now\n-# return results\n-#\n-# def get_object_id(self):\n-# return unicode(full_normalized_motion_id(\n-# self.original_item['entry']['EntryTitle']))\n-#\n-# def get_original_object_id(self):\n-# return self.get_object_id()\n-#\n-# @staticmethod\n-# def get_original_object_urls():\n-# return {\"html\": settings.IBABS_WSDL}\n-#\n-# def get_rights(self):\n-# return u'undefined'\n-#\n-# def get_collection(self):\n-# return unicode(self.source_definition['index_name'])\n-#\n-# def _get_result(self):\n-# if not self.original_item['entry']['ListCanVote']:\n-# return \"Motie aangehouden\"\n-# if self.original_item['entry']['VoteResult']:\n-# return \"Motie aangenomen\"\n-# else:\n-# return \"Motie verworpen\"\n-#\n-# def _get_group_results(self, parties):\n-# if not self.original_item['entry']['ListCanVote']:\n-# return []\n-# id2names = dict(list(set(\n-# [(v['GroupId'], v['GroupName']) for v in self.original_item['votes']])))\n-# counts = {}\n-# for v in self.original_item['votes']:\n-# vote_as_str = \"yes\" if v['Vote'] else \"no\"\n-# try:\n-# counts[(v['GroupId'], vote_as_str)] += 1\n-# except KeyError as e:\n-# counts[(v['GroupId'], vote_as_str)] = 1\n-# return [{\n-# \"option\": group_info[1],\n-# \"value\": num_votes,\n-# \"group_id\": group_info[0],\n-# \"group\": {\n-# \"name\": id2names[group_info[0]]\n-# }\n-# } for group_info, num_votes in counts.iteritems()]\n-#\n-# def _get_counts(self, council, parties, members):\n-# if not self.original_item['entry']['ListCanVote']:\n-# return []\n-# return [\n-# {\n-# \"option\": \"yes\",\n-# \"value\": self.original_item['entry']['VotesInFavour'],\n-# \"group\": {\n-# \"name\": \"Gemeenteraad\"\n-# }\n-# },\n-# {\n-# \"option\": \"no\",\n-# \"value\": self.original_item['entry']['VotesAgainst'],\n-# \"group\": {\n-# \"name\": \"Gemeenteraad\"\n-# }\n-# }\n-# ]\n-#\n-# def _get_votes(self, council, parties, members):\n-# if not self.original_item['entry']['ListCanVote']:\n-# return []\n-#\n-# members_by_id = {m['id']: {'name': m['name']} for m in members}\n-# return [{\n-# 'voter_id': v['UserId'],\n-# 'voter': members_by_id.get(v['UserId'], None),\n-# 'group_id': v['GroupId'],\n-# 'option': \"yes\" if v['Vote'] else \"no\"\n-# } for v in self.original_item['votes']]\n-#\n-# def get_object_model(self):\n-# object_model = dict()\n-#\n-# object_model['id'] = self.original_item['motion_id']\n-# object_model['hidden'] = self.source_definition['hidden']\n-#\n-# council = self._get_council()\n-# members = self._get_council_members()\n-# parties = self._get_council_parties()\n-#\n-# object_model['doc'] = {\n-# \"result\": self._get_result(),\n-# \"group_results\": self._get_group_results(parties),\n-# \"counts\": self._get_counts(council, parties, members),\n-# \"votes\": self._get_votes(council, parties, members)\n-# }\n-#\n-# return object_model\n-#\n-# @staticmethod\n-# def get_index_data():\n-# return {}\n-#\n-# @staticmethod\n-# def get_all_text():\n-# text_items = []\n-#\n-# return u' '.join(text_items)\ndiff --git a/ocd_backend/loaders/__init__.py b/ocd_backend/loaders/__init__.py\n--- a/ocd_backend/loaders/__init__.py\n+++ b/ocd_backend/loaders/__init__.py\n@@ -1,18 +1,9 @@\n-import json\n from datetime import datetime\n \n-import requests\n-\n from ocd_backend import celery_app\n-from ocd_backend import settings\n-from ocd_backend.es import elasticsearch\n-from ocd_backend.exceptions import ConfigurationError\n from ocd_backend.log import get_source_logger\n-from ocd_backend.mixins import (OCDBackendTaskSuccessMixin,\n- OCDBackendTaskFailureMixin)\n-from ocd_backend.utils import json_encoder\n-from ocd_backend.utils.misc import iterate, get_sha1_hash, doc_type\n-from ocd_backend.models.serializers import JsonLDSerializer, JsonSerializer\n+from ocd_backend.mixins import OCDBackendTaskSuccessMixin, OCDBackendTaskFailureMixin\n+from ocd_backend.utils.misc import iterate\n \n log = get_source_logger('loader')\n \n@@ -21,7 +12,7 @@ class BaseLoader(OCDBackendTaskSuccessMixin, OCDBackendTaskFailureMixin,\n celery_app.Task):\n \"\"\"The base class that other loaders should inherit.\"\"\"\n \n- def run(self, *args, **kwargs):\n+ def start(self, *args, **kwargs):\n \"\"\"Start loading of a single item.\n \n This method is called by the transformer and expects args to\n@@ -44,164 +35,3 @@ def post_processing(doc):\n # Add the 'processing.finished' datetime to the documents\n finished = datetime.now()\n # doc.Meta.processing_finished = finished\n-\n-\n-class ElasticsearchLoader(BaseLoader):\n- \"\"\"Indexes items into Elasticsearch.\n-\n- Each item is added to two indexes: a 'combined' index that contains\n- items from different sources, and an index that only contains items\n- of the same source as the item.\n-\n- Each URL found in ``media_urls`` is added as a document to the\n- ``RESOLVER_URL_INDEX`` (if it doesn't already exist).\n- \"\"\"\n-\n- def run(self, *args, **kwargs):\n- self.index_name = kwargs.get('new_index_name')\n-\n- if not self.index_name:\n- raise ConfigurationError('The name of the index is not provided')\n-\n- return super(ElasticsearchLoader, self).run(*args, **kwargs)\n-\n- def load_item(self, doc):\n- body = json_encoder.encode(JsonLDSerializer().serialize(doc))\n-\n- log.info('ElasticsearchLoader indexing document id: %s' % doc.get_ori_identifier())\n-\n- # Index documents into new index\n- elasticsearch.index(index=self.index_name, body=body, id=doc.get_short_identifier())\n-\n- # Recursively index associated models like attachments\n- for _, value in doc.properties(rels=True, props=False):\n- self.load_item(value)\n-\n- if 'enricher_task' in value:\n- # The value seems to be enriched so add to resolver\n- url_doc = {\n- 'ori_identifier': value.get_short_identifier(),\n- 'original_url': value.original_url,\n- 'file_name': value.name,\n- }\n-\n- if 'content_type' in value:\n- url_doc['content_type'] = value.content_type\n-\n- # Update if already exists\n- elasticsearch.index(index=settings.RESOLVER_URL_INDEX,\n- id=get_sha1_hash(value.original_url), body=url_doc)\n-\n-\n-class ElasticsearchUpdateOnlyLoader(ElasticsearchLoader):\n- \"\"\"\n- Updates elasticsearch items using the update method. Use with caution.\n- \"\"\"\n-\n- def load_item(self, doc):\n- body = json_encoder.encode(JsonLDSerializer().serialize(doc))\n-\n- if doc == {}:\n- log.info('Empty document ....')\n- return\n-\n- log.info('ElasticsearchUpdateOnlyLoader indexing document id: %s' % doc.get_ori_identifier())\n-\n- # Index documents into new index\n- elasticsearch.update(\n- id=doc.get_short_identifier(),\n- index=self.index_name,\n- body={'doc': body},\n- )\n- # remember, resolver URLs are not update here to prevent too complex\n- # things\n-\n-\n-class ElasticsearchUpsertLoader(ElasticsearchLoader):\n- \"\"\"\n- Updates elasticsearch items using the update method. Use with caution.\n- \"\"\"\n-\n- def load_item(self, doc):\n- body = json_encoder.encode(JsonLDSerializer().serialize(doc))\n-\n- if doc == {}:\n- log.info('Empty document ....')\n- return\n-\n- log.info('ElasticsearchUpsertLoader indexing document id: %s' % doc.get_ori_identifier())\n-\n- # Index documents into new index\n- elasticsearch.update(\n- id=doc.get_short_identifier(),\n- index=self.index_name,\n- body={\n- 'doc': body,\n- 'doc_as_upsert': True,\n- },\n- )\n-\n-\n-class DummyLoader(BaseLoader):\n- \"\"\"\n- Prints the item to the console, for debugging purposes.\n- \"\"\"\n-\n- def load_item(self, doc):\n- log.debug('=' * 50)\n- log.debug('%s %s %s' % ('=' * 4, doc.get_ori_identifier(), '=' * 4))\n- log.debug('%s %s %s' % ('-' * 20, 'doc', '-' * 25))\n- log.debug(JsonSerializer().serialize(doc))\n- log.debug('=' * 50)\n-\n- @staticmethod\n- def run_finished(run_identifier):\n- log.debug('*' * 50)\n- log.debug('Finished run {}'.format(run_identifier))\n- log.debug('*' * 50)\n-\n-\n-def json_serial(obj):\n- \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n-\n- if isinstance(obj, datetime):\n- serial = obj.isoformat()\n- return serial\n- raise TypeError(\"Type not serializable\")\n-\n-\n-class PopitLoader(BaseLoader):\n- \"\"\"\n- Loads data to a Popit instance.\n- \"\"\"\n-\n- def _create_or_update_item(self, item, item_id):\n- \"\"\"\n- First tries to post (aka create) a new item. If that does not work,\n- do an update (aka put).\n- \"\"\"\n-\n- headers = {\n- \"Apikey\": self.source_definition['popit_api_key'],\n- \"Accept\": \"application/json\",\n- \"Content-Type\": \"application/json\"\n- }\n-\n- popit_url = \"%s/%s\" % (\n- self.source_definition['popit_base_url'],\n- self.source_definition['doc_type'],)\n- resp = requests.post(\n- popit_url,\n- headers=headers, data=json.dumps(item, default=json_serial))\n-\n- # popit update controls where we should update the data from ibabs (overwriting our own data)\n- # or whether we should only add things when there's new information.\n- if (not self.source_definition.get('popit_update', False)) or (resp.status_code != 500):\n- return resp\n-\n- return requests.put(\n- \"%s/%s\" % (popit_url, item_id,),\n- headers=headers, data=json.dumps(item, default=json_serial))\n-\n- def load_item(self, doc):\n- resp = self._create_or_update_item(doc, doc.get_short_identifier())\ndiff --git a/ocd_backend/loaders/delta.py b/ocd_backend/loaders/delta.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/loaders/delta.py\n@@ -0,0 +1,66 @@\n+from confluent_kafka import Producer\n+from pyld import jsonld\n+\n+from ocd_backend import celery_app\n+from ocd_backend import settings\n+from ocd_backend.loaders import BaseLoader\n+from ocd_backend.log import get_source_logger\n+from ocd_backend.models.serializers import JsonLDSerializer\n+\n+log = get_source_logger('delta_loader')\n+\n+\n+class DeltaLoader(BaseLoader):\n+ \"\"\"Serializes a model to N-Quads and then sends it to a Kafka bus.\"\"\"\n+\n+ config = {\n+ 'bootstrap.servers': settings.KAFKA_HOST,\n+ 'session.timeout.ms': settings.KAFKA_SESSION_TIMEOUT,\n+ }\n+\n+ if settings.KAFKA_USERNAME:\n+ config['sasl.mechanisms'] = 'PLAIN'\n+ config['security.protocol'] = 'SASL_SSL'\n+ # config['ssl.ca.location'] = '/usr/local/etc/openssl/cert.pem'\n+ config['sasl.username'] = settings.KAFKA_USERNAME\n+ config['sasl.password'] = settings.KAFKA_PASSWORD\n+\n+ def load_item(self, doc):\n+ kafka_producer = Producer(self.config)\n+\n+ # Recursively index associated models like attachments\n+ for model in doc.traverse():\n+ # Serialize the body to JSON-LD\n+ jsonld_body = JsonLDSerializer().serialize(model)\n+\n+ # Serialize the jsonld_body to N-Triples\n+ ntriples = jsonld.normalize(jsonld_body, {'algorithm': 'URDNA2015', 'format': 'application/n-quads'})\n+\n+ # Add the graph name to the body. This is done the low-tech way, but could be improved by updating the\n+ # JSON-LD so that the graph information is included when serializing to N-Quads.\n+ ntriples_split = ntriples.split(' .\\n')\n+ nquads = ' .\\n'.join(ntriples_split)\n+\n+ # Send document to the Kafka bus\n+ log.debug('DeltaLoader sending document id %s to Kafka' % model.get_ori_identifier())\n+ message_key_id = '%s_%s' % (settings.KAFKA_MESSAGE_KEY, model.get_short_identifier())\n+ kafka_producer.produce(settings.KAFKA_TOPIC, nquads.encode('utf-8'), message_key_id, callback=delivery_report)\n+\n+ # See https://github.com/confluentinc/confluent-kafka-python#usage for a complete example of how to use\n+ # the kafka producer with status callbacks.\n+\n+ kafka_producer.flush()\n+\n+\n+def delivery_report(err, msg):\n+ \"\"\" Called once for each message produced to indicate delivery result.\n+ Triggered by poll() or flush(). \"\"\"\n+ if err is not None:\n+ log.warning('Message delivery failed: {}'.format(err))\n+ else:\n+ log.debug('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))\n+\n+\n+@celery_app.task(bind=True, base=DeltaLoader, autoretry_for=(Exception,), retry_backoff=True)\n+def delta_loader(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\ndiff --git a/ocd_backend/loaders/elasticsearch.py b/ocd_backend/loaders/elasticsearch.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/loaders/elasticsearch.py\n@@ -0,0 +1,136 @@\n+import json\n+\n+from ocd_backend import celery_app\n+from ocd_backend import settings\n+from ocd_backend.es import elasticsearch\n+from ocd_backend.exceptions import ConfigurationError\n+from ocd_backend.loaders import BaseLoader\n+from ocd_backend.log import get_source_logger\n+from ocd_backend.models.serializers import JsonLDSerializer\n+from ocd_backend.utils import json_encoder\n+from ocd_backend.utils.misc import get_sha1_hash\n+\n+log = get_source_logger('elasticsearch_loader')\n+\n+\n+class ElasticsearchLoader(BaseLoader):\n+ \"\"\"Indexes items into Elasticsearch.\n+\n+ Each URL found in ``media_urls`` is added as a document to the\n+ ``RESOLVER_URL_INDEX`` (if it doesn't already exist).\n+ \"\"\"\n+\n+ def start(self, *args, **kwargs):\n+ self.index_name = kwargs.get('new_index_name')\n+\n+ if not self.index_name:\n+ raise ConfigurationError('The name of the index is not provided')\n+\n+ return super(ElasticsearchLoader, self).start(*args, **kwargs)\n+\n+ def load_item(self, doc):\n+ # Recursively index associated models like attachments\n+ for model in doc.traverse():\n+ model_body = json_encoder.encode(JsonLDSerializer().serialize(model))\n+\n+ log.debug('ElasticsearchLoader indexing document id: %s' % model.get_ori_identifier())\n+\n+ # Index document into new index\n+ elasticsearch.index(index=self.index_name,\n+ body=model_body,\n+ id=model.get_short_identifier())\n+\n+ if 'enricher_task' in model:\n+ # The value seems to be enriched so add to resolver\n+ url_doc = {\n+ 'ori_identifier': model.get_short_identifier(),\n+ 'original_url': model.original_url,\n+ 'file_name': model.name,\n+ }\n+\n+ if 'content_type' in model:\n+ url_doc['content_type'] = model.content_type\n+\n+ # Update if already exists\n+ elasticsearch.index(index=settings.RESOLVER_URL_INDEX,\n+ id=get_sha1_hash(model.original_url),\n+ body=url_doc)\n+\n+\n+class ElasticsearchUpdateOnlyLoader(ElasticsearchLoader):\n+ \"\"\"\n+ Updates elasticsearch items using the update method. Use with caution.\n+ \"\"\"\n+\n+ def load_item(self, doc):\n+ # Recursively index associated models like attachments\n+ for model in doc.traverse():\n+ model_body = json_encoder.encode(JsonLDSerializer().serialize(model))\n+\n+ if doc == {}:\n+ log.info('Empty document ....')\n+ return\n+\n+ log.debug('ElasticsearchUpdateOnlyLoader indexing document id: %s' % model.get_ori_identifier())\n+\n+ # Index document into new index\n+ elasticsearch.update(\n+ id=model.get_short_identifier(),\n+ index=self.index_name,\n+ body={'doc': json.loads(model_body)},\n+ )\n+\n+ # Resolver URLs are not updated here to prevent too complex things\n+\n+\n+class ElasticsearchUpsertLoader(ElasticsearchLoader):\n+ \"\"\"\n+ Updates elasticsearch items using the update method.\n+ \"\"\"\n+\n+ def load_item(self, doc):\n+ # Recursively index associated models like attachments\n+ for model in doc.traverse():\n+ model_body = json_encoder.encode(JsonLDSerializer().serialize(model))\n+\n+ log.debug('ElasticsearchUpsertLoader indexing document id: %s' % model.get_ori_identifier())\n+\n+ # Update document\n+ elasticsearch.update(\n+ id=model.get_short_identifier(),\n+ index=self.index_name,\n+ body={'doc': json.loads(model_body),\n+ 'doc_as_upsert': True,\n+ },\n+ )\n+\n+ if 'enricher_task' in model:\n+ # The value seems to be enriched so add to resolver\n+ url_doc = {\n+ 'ori_identifier': model.get_short_identifier(),\n+ 'original_url': model.original_url,\n+ 'file_name': model.name,\n+ }\n+\n+ if 'content_type' in model:\n+ url_doc['content_type'] = model.content_type\n+\n+ # Update if already exists\n+ elasticsearch.index(index=settings.RESOLVER_URL_INDEX,\n+ id=get_sha1_hash(model.original_url),\n+ body=url_doc)\n+\n+\n+@celery_app.task(bind=True, base=ElasticsearchLoader, autoretry_for=(Exception,), retry_backoff=True)\n+def elasticsearch_loader(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\n+\n+\n+@celery_app.task(bind=True, base=ElasticsearchUpdateOnlyLoader, autoretry_for=(Exception,), retry_backoff=True)\n+def elasticsearch_update_only_loader(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\n+\n+\n+@celery_app.task(bind=True, base=ElasticsearchUpsertLoader, autoretry_for=(Exception,), retry_backoff=True)\n+def elasticsearch_upsert_loader(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\ndiff --git a/ocd_backend/mixins.py b/ocd_backend/mixins.py\n--- a/ocd_backend/mixins.py\n+++ b/ocd_backend/mixins.py\n@@ -13,7 +13,7 @@ class OCDBackendTaskMixin(object):\n \"\"\"\n \n def cleanup(self, **kwargs):\n- cleanup_task = load_object(self.source_definition.get('cleanup'))()\n+ cleanup_task = load_object(self.source_definition.get('cleanup'))\n cleanup_task.delay(**kwargs)\n \n \ndiff --git a/ocd_backend/models/__init__.py b/ocd_backend/models/__init__.py\n--- a/ocd_backend/models/__init__.py\n+++ b/ocd_backend/models/__init__.py\n@@ -4,9 +4,7 @@\n By mapping these names definitions can be remapped if needed later on.\n \"\"\"\n import definitions.foaf\n-import definitions.mapping\n import definitions.meeting\n-import definitions.meta\n import definitions.ncal\n import definitions.opengov\n import definitions.org\n@@ -18,8 +16,6 @@\n Meeting = definitions.meeting.Meeting\n AgendaItem = definitions.meeting.AgendaItem\n Amendment = definitions.meeting.Amendment\n-EventUnconfirmed = definitions.meeting.EventUnconfirmed\n-EventConfirmed = definitions.meeting.EventConfirmed\n \n # https://argu.co/voc/mapping/\n # OriIdentifier = definitions.mapping.OriIdentifier\n@@ -41,26 +37,45 @@\n AbstainCount = definitions.opengov.AbstainCount\n AbsentCount = definitions.opengov.AbsentCount\n Vote = definitions.opengov.Vote\n-ResultFail = definitions.opengov.ResultFail\n-ResultPass = definitions.opengov.ResultPass\n-VoteOptionYes = definitions.opengov.VoteOptionYes\n-VoteOptionNo = definitions.opengov.VoteOptionNo\n-VoteOptionAbsent = definitions.opengov.VoteOptionAbsent\n \n # http://schema.org/\n MediaObject = definitions.schema.MediaObject\n ImageObject = definitions.schema.ImageObject\n CreativeWork = definitions.schema.CreativeWork\n PropertyValue = definitions.schema.PropertyValue\n-EventCancelled = definitions.schema.EventCancelled\n \n # http://www.w3.org/ns/person#\n Person = definitions.person.Person\n \n # http://www.w3.org/ns/org#\n Organization = definitions.org.Organization\n+# TopLevelOrganization is an alias for Organization\n+TopLevelOrganization = definitions.org.Organization\n Membership = definitions.org.Membership\n \n-# https://argu.co/ns/meta#\n-Metadata = definitions.meta.Metadata\n-Run = definitions.meta.Run\n+# Constants\n+\n+ResultKept = definitions.meeting.ResultKept\n+ResultPostponed = definitions.meeting.ResultPostponed\n+ResultWithdrawn = definitions.meeting.ResultWithdrawn\n+ResultExpired = definitions.meeting.ResultExpired\n+ResultDiscussed = definitions.meeting.ResultDiscussed\n+ResultPublished = definitions.meeting.ResultPublished\n+\n+ResultFailed = definitions.opengov.ResultFailed\n+ResultPassed = definitions.opengov.ResultPassed\n+\n+EventCompleted = definitions.meeting.EventCompleted\n+EventConfirmed = definitions.meeting.EventConfirmed\n+EventUnconfirmed = definitions.meeting.EventUnconfirmed\n+EventScheduled = definitions.schema.EventScheduled\n+EventRescheduled = definitions.schema.EventRescheduled\n+EventCancelled = definitions.schema.EventCancelled\n+EventPostponed = definitions.schema.EventPostponed\n+\n+VoteOptionYes = definitions.opengov.VoteOptionYes\n+VoteOptionNo = definitions.opengov.VoteOptionNo\n+VoteOptionAbstain = definitions.opengov.VoteOptionAbstain\n+VoteOptionAbsent = definitions.opengov.VoteOptionAbsent\n+VoteOptionNotVoting = definitions.opengov.VoteOptionNotVoting\n+VoteOptionPaired = definitions.opengov.VoteOptionPaired\ndiff --git a/ocd_backend/models/database.py b/ocd_backend/models/database.py\ndeleted file mode 100644\n--- a/ocd_backend/models/database.py\n+++ /dev/null\n@@ -1,367 +0,0 @@\n-# -*- coding: utf-8 -*-\n-import re\n-from string import Formatter\n-\n-from neo4j.v1 import GraphDatabase\n-from py2neo import cypher_escape\n-from copy import copy\n-from lock import Lock\n-\n-from ocd_backend.models.definitions import Prov, Pav, Mapping\n-from ocd_backend.models.exceptions import QueryResultError, QueryEmptyResult, MissingProperty\n-from ocd_backend.models.misc import Uri\n-from ocd_backend.settings import NEO4J_URL, NEO4J_USER, NEO4J_PASSWORD\n-\n-\n-class AQuoteFormatter(Formatter):\n- \"\"\"Angled quotation marks are used for delimiting parameter keys\"\"\"\n- parse_regex = re.compile(u'(.[^\u00ab]*)\u00ab?([^!\u00bb]*)?!?([^\u00bb]*)?\u00bb?', re.DOTALL)\n-\n- def parse(self, format_string):\n- if format_string:\n- for result in self.parse_regex.finditer(format_string):\n- yield result.group(1), result.group(2) or None, None, result.group(3) or None\n-\n-\n-fmt = AQuoteFormatter()\n-\n-\n-class Neo4jDatabase(object):\n- \"\"\"Database implementation for Neo4j graph database.\n-\n- Provides methods for model operations to process ETL data for new and\n- existing nodes. When the class is initialized, it reuses the driver if it\n- has been used before.\n- \"\"\"\n- default_params = {\n- 'was_revision_of': cypher_escape(Uri(Prov, 'wasRevisionOf')),\n- 'was_derived_from': cypher_escape(Uri(Prov, 'wasDerivedFrom')),\n- 'had_primary_source': cypher_escape(Uri(Prov, 'hadPrimarySource')),\n- 'provided_by': cypher_escape(Uri(Pav, 'providedBy')),\n- 'ori_identifier': cypher_escape(Uri(Mapping, 'ori/identifier')),\n- }\n-\n- # Set driver on the class so all instances use the same driver\n- driver = GraphDatabase.driver(\n- NEO4J_URL, auth=(NEO4J_USER, NEO4J_PASSWORD,), encrypted=False,\n- )\n-\n- def __init__(self, serializer):\n- self.serializer = serializer\n- self.tx = None\n-\n- def query(self, query, **params):\n- \"\"\"Executes a query and returns the result\"\"\"\n- with self.driver.session() as session:\n- cursor = session.run(query, **params)\n- result = cursor.data()\n- return result\n-\n- @property\n- def session(self):\n- return self.driver.session()\n-\n- def transaction_query(self, query, **params):\n- \"\"\"Adds a query to be executed as a transaction. All queries called with\n- this method will be in the same transaction until `transaction_commit`\n- is called.\n- \"\"\"\n- if not self.tx:\n- self.tx = self.session.begin_transaction()\n-\n- self.tx.run(query, **params)\n-\n- def transaction_commit(self):\n- \"\"\"Commits all queries that are added by `transaction_query`.\"\"\"\n- if self.tx:\n- result = self.tx.commit()\n- self.tx = None # Make sure the tx is reset\n- return result\n-\n- def create_constraints(self):\n- \"\"\"Creates constraints on identifiers in Neo4j\"\"\"\n- self.session.run(\n- 'CREATE CONSTRAINT ON (x:Hot)'\n- 'ASSERT x.`{}` IS UNIQUE'.format(Uri(Mapping, 'ori/identifier'))\n- )\n-\n- self.session.run(\n- 'CREATE CONSTRAINT ON (x:Live)'\n- 'ASSERT x.`{}` IS UNIQUE'.format(Uri(Mapping, 'ori/sourceLocator'))\n- )\n-\n- def get_identifier(self, model_object, **kwargs):\n- \"\"\"Returns the ori identifier based on the specified keyword-argument.\n-\n- The ori identifier on a `Hot` node is queried by looking for the source\n- identifier on `Cold` nodes. Should return exactly one int or a\n- QueryResultError exception.\"\"\"\n- if len(kwargs) != 1:\n- raise TypeError('connect takes exactly 1 keyword-argument')\n-\n- filter_key, filter_value = kwargs.items()[0]\n-\n- label = self.serializer.label(model_object)\n- definition = model_object.definition(filter_key)\n-\n- params = {\n- 'labels': cypher_escape(label),\n- 'filter_key': cypher_escape(definition.absolute_uri())\n- }\n- params.update(self.default_params)\n-\n- clauses = [\n- u'MATCH (n :\u00ablabels\u00bb {\u00abfilter_key\u00bb: $filter_value})',\n- u'RETURN n.\u00abori_identifier\u00bb AS ori_identifier',\n- ]\n-\n- result = self.query(\n- fmt.format(u'\\n'.join(clauses), **params),\n- filter_value=filter_value\n- )\n-\n- if not result:\n- raise MissingProperty(\n- 'Does not exist: %s with %s=%s' % (model_object.verbose_name(),\n- filter_key, filter_value)\n- )\n-\n- if len(result) > 1:\n- raise QueryResultError(\n- 'The number of %s results is greater than one with %s=%s'\n- % (model_object.verbose_name(), filter_key, filter_value)\n- )\n-\n- return result[0]['ori_identifier']\n-\n- def replace(self, model_object):\n- \"\"\"Replaces or creates nodes based on the model object.\n-\n- Existing nodes are replaced by the deflated model object and new ones\n- are created when they do not exist. Three queries are run sequentially\n- until one of them yields a result.\n-\n- The first will add a new version if an older version exists on a node,\n- the second will add a new version when no older version exists, the\n- third will create new nodes if the nodes do not yet exist. If the third\n- query fails, an QueryResultError is raised.\n-\n- The first and second query will match the `Cold` node based on the\n- source_id.\n- \"\"\"\n- labels = self.serializer.label(model_object)\n-\n- params = {\n- 'labels': cypher_escape(labels),\n- 'had_primary_source': cypher_escape(Uri(Prov, 'hadPrimarySource')),\n- }\n- params.update(self.default_params)\n-\n- if not model_object.values.get('had_primary_source'):\n-\n- from ocd_backend.models.model import Individual\n- if isinstance(model_object, Individual):\n-\n- with Lock(labels):\n- clauses = [\n- u'MATCH (n :\u00ablabels\u00bb)',\n- u'RETURN n.\u00abori_identifier\u00bb AS ori_identifier',\n- ]\n-\n- cursor = self.session.run(\n- fmt.format(u'\\n'.join(clauses), **params),\n- )\n- result = cursor.data()\n-\n- if len(result) > 1:\n- raise QueryResultError(\n- 'The number of %s results is greater than one'\n- % model_object.verbose_name()\n- )\n-\n- elif len(result) < 1:\n- model_object.generate_ori_identifier()\n- props = self.serializer.deflate(model_object, props=True, rels=False)\n-\n- clauses = [\n- u'MERGE (n :\u00ablabels\u00bb)',\n- u'SET n += $props',\n- u'RETURN n.\u00abori_identifier\u00bb AS ori_identifier',\n- ]\n-\n- cursor = self.session.run(\n- fmt.format(u'\\n'.join(clauses), **params),\n- props=props,\n- )\n- cursor.summary()\n-\n- else:\n- try:\n- model_object.ori_identifier = result[0]['ori_identifier']\n- except Exception:\n- raise QueryResultError('No ori_identifier was returned')\n- else:\n- self._create_blank_node(model_object)\n- else:\n- with Lock(model_object.values.get('had_primary_source')):\n- # if ori_identifier is already known use that to identify instead\n- if model_object.values.get('ori_identifier'):\n- self._merge(model_object)\n- else:\n- clauses = [\n- u'MATCH (n :\u00ablabels\u00bb)',\n- u'WHERE $had_primary_source IN n.\u00abhad_primary_source\u00bb',\n- u'RETURN n.\u00abori_identifier\u00bb AS ori_identifier',\n- ]\n-\n- cursor = self.session.run(\n- fmt.format(u'\\n'.join(clauses), **params),\n- had_primary_source=model_object.had_primary_source,\n- )\n- result = cursor.data()\n-\n- if len(result) > 1:\n- # Todo don't fail yet until unique constraints are solved\n- # raise QueryResultError('The number of results is greater than one!')\n- pass\n-\n- try:\n- ori_identifier = result[0]['ori_identifier']\n- except Exception:\n- ori_identifier = None\n-\n- if ori_identifier:\n- model_object.ori_identifier = ori_identifier\n- self._merge(model_object)\n- else:\n- # if ori_identifier do merge otherwise create\n- self._create_node(model_object)\n-\n- # raise QueryEmptyResult('No ori_identifier was returned')\n-\n- def _create_node(self, model_object):\n- if not model_object.values.get('ori_identifier'):\n- model_object.generate_ori_identifier()\n-\n- labels = self.serializer.label(model_object)\n- props = self.serializer.deflate(model_object, props=True, rels=False)\n-\n- params = {\n- 'labels': cypher_escape(labels),\n- }\n- params.update(self.default_params)\n-\n- clauses = [\n- u'CREATE (n :\u00ablabels\u00bb {\u00abhad_primary_source\u00bb: [$had_primary_source]})',\n- u'SET n += $props',\n- u'RETURN n',\n- ]\n-\n- cursor = self.session.run(\n- fmt.format(u'\\n'.join(clauses), **params),\n- props=props,\n- had_primary_source=model_object.had_primary_source,\n- )\n- cursor.summary()\n-\n- def _create_blank_node(self, model_object):\n- if not model_object.values.get('ori_identifier'):\n- model_object.generate_ori_identifier()\n-\n- labels = self.serializer.label(model_object)\n- props = self.serializer.deflate(model_object, props=True, rels=False)\n-\n- params = {\n- 'labels': cypher_escape(labels),\n- }\n- params.update(self.default_params)\n-\n- clauses = [\n- u'CREATE (n :\u00ablabels\u00bb)',\n- u'SET n += $props',\n- u'RETURN n',\n- ]\n-\n- cursor = self.session.run(\n- fmt.format(u'\\n'.join(clauses), **params),\n- props=props,\n- )\n- cursor.summary()\n-\n- def _merge(self, model_object):\n- labels = self.serializer.label(model_object)\n- props = self.serializer.deflate(model_object, props=True, rels=False)\n-\n- # todo this quickfix needs to be refactored\n- del props[Uri(Prov, 'hadPrimarySource')]\n-\n- params = {\n- 'labels': cypher_escape(labels),\n- }\n- params.update(self.default_params)\n-\n- clauses = [\n- u'MERGE (n :\u00ablabels\u00bb {\u00abori_identifier\u00bb: $ori_identifier})',\n- u'SET n += $props',\n- u'SET(', # Only add had_primary_source to array if doesn't exist\n- u' CASE WHEN NOT $had_primary_source IN n.\u00abhad_primary_source\u00bb THEN n END',\n- u').\u00abhad_primary_source\u00bb = n.\u00abhad_primary_source\u00bb + [$had_primary_source]',\n- u'WITH n',\n- u'OPTIONAL MATCH (n)-->(m)', # Remove all directly related blank nodes\n- u'WHERE NOT EXISTS(m.\u00abhad_primary_source\u00bb)',\n- u'DETACH DELETE m',\n- u'WITH n',\n- u'OPTIONAL MATCH (n)-[r]->()', # Remove all outgoing relationships\n- u'DELETE r',\n- u'WITH n',\n- u'RETURN n',\n- ]\n-\n- cursor = self.session.run(\n- fmt.format(u'\\n'.join(clauses), **params),\n- had_primary_source=model_object.had_primary_source,\n- ori_identifier=model_object.ori_identifier,\n- props=props,\n- )\n- cursor.summary()\n-\n- def attach(self, this_object, that_object, rel_type):\n- \"\"\"Attaches this_object to that_object model.\n-\n- The query will match the `Cold` node based on the source_id of the\n- models. If available it will set `r1_props` on the relation between the\n- nodes.\n- \"\"\"\n- from .model import Model, Relationship\n-\n- r1_props = dict()\n- if isinstance(that_object, Relationship):\n- r1_props = that_object.rel\n- that_object = that_object.model\n-\n- if isinstance(r1_props, Model):\n- r1_props = r1_props.serializer.deflate(props=True, rels=True)\n-\n- this_label = self.serializer.label(this_object)\n- that_label = self.serializer.label(that_object)\n-\n- params = {\n- 'n2_labels': cypher_escape(this_label),\n- 'n3_labels': cypher_escape(that_label),\n- 'r1_labels': cypher_escape(rel_type),\n- }\n- params.update(self.default_params)\n-\n- clauses = [\n- u'MATCH (n2 :\u00abn2_labels\u00bb {\u00abori_identifier\u00bb: $ori_identifier1})',\n- u'MATCH (n3 :\u00abn3_labels\u00bb {\u00abori_identifier\u00bb: $ori_identifier2})',\n- u'MERGE (n2)-[r1 :\u00abr1_labels\u00bb]->(n3)',\n- u'SET r1 = $r1_props',\n- ]\n-\n- self.query(\n- fmt.format(u'\\n'.join(clauses), **params),\n- ori_identifier1=this_object.ori_identifier,\n- ori_identifier2=that_object.ori_identifier,\n- r1_props=r1_props\n- )\ndiff --git a/ocd_backend/models/definitions/__init__.py b/ocd_backend/models/definitions/__init__.py\n--- a/ocd_backend/models/definitions/__init__.py\n+++ b/ocd_backend/models/definitions/__init__.py\n@@ -30,7 +30,7 @@ class Meeting(Namespace):\n \n \n class Mapping(Namespace):\n- uri = 'https://argu.co/voc/mapping/'\n+ uri = 'https://openbesluitvorming.nl/voc/mapping/'\n prefix = 'mapping'\n \n \n@@ -104,6 +104,11 @@ class Dbo(Namespace):\n prefix = 'dbo'\n \n \n+class Vcard(Namespace):\n+ uri = 'http://www.w3.org/2006/vcard/ns#'\n+ prefix = 'vcard'\n+\n+\n # class Opaque(Namespace):\n # uri = 'https://argu.co/ns/opaque-model/'\n # prefix = 'opaque'\n@@ -111,5 +116,5 @@ class Dbo(Namespace):\n \n ALL = [\n Foaf, Ncal, Opengov, Org, Meeting, Mapping, Meta, Owl, Person,\n- Schema, Rdf, Rdfs, Dcterms, Skos, Bio, Bibframe, Prov, Ori\n+ Schema, Rdf, Rdfs, Dcterms, Skos, Bio, Bibframe, Prov, Ori, Vcard\n ]\ndiff --git a/ocd_backend/models/definitions/mapping.py b/ocd_backend/models/definitions/mapping.py\ndeleted file mode 100644\n--- a/ocd_backend/models/definitions/mapping.py\n+++ /dev/null\n@@ -1,66 +0,0 @@\n-\"\"\"The classes in this ontology are defined by Argu BV. More details, current\n-definitions and information can be found here:\n-https://argu.co/voc/mapping/\n-\n-The purpose of this ontology is to define identifiers that are used by data\n-suppliers. This way, our data contains references to the original data in the\n-spirit of linked open data.\n-\"\"\"\n-from ocd_backend.models.definitions import Mapping\n-from ocd_backend.models.model import Individual\n-\n-# currently not used\n-\n-# class OriIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ori/identifier'\n-#\n-#\n-# class RunIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ori/meta/runIdentifier'\n-#\n-#\n-# class MetadataIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ori/meta/metadataIdentifier'\n-#\n-#\n-# class IbabsIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ibabs/identifier'\n-#\n-#\n-# class NotubizIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'notubiz/identifier'\n-#\n-#\n-# class CbsIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'cbs/identifier'\n-#\n-#\n-# class AlmanakOrganizationName(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'almanak/organizationName'\n-#\n-#\n-# class GGMIdentifier(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ggm/identifier'\n-#\n-#\n-# class GGMVrsNummer(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ggm/vrsnummer'\n-#\n-#\n-# class GGMNummer(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ggm/nummer'\n-#\n-#\n-# class GGMVolgnummer(Mapping, Individual):\n-# class Meta:\n-# verbose_name = 'ggm/volgnummer'\ndiff --git a/ocd_backend/models/definitions/meeting.py b/ocd_backend/models/definitions/meeting.py\n--- a/ocd_backend/models/definitions/meeting.py\n+++ b/ocd_backend/models/definitions/meeting.py\n@@ -10,9 +10,9 @@\n import opengov\n import schema\n from ocd_backend.models.definitions import Opengov, Schema, Meeting as MeetingNS\n-from ocd_backend.models.model import Individual\n-from ocd_backend.models.properties import StringProperty, IntegerProperty, \\\n+from ocd_backend.models.properties import StringProperty, URLProperty, IntegerProperty, \\\n Relation, OrderedRelation\n+from ocd_backend.models.misc import Uri\n \n \n class Meeting(MeetingNS, schema.Event):\n@@ -25,7 +25,7 @@ class Meeting(MeetingNS, schema.Event):\n attendee = Relation(Schema, 'attendee')\n audio = Relation(Schema, 'audio')\n description = StringProperty(Schema, 'description')\n- status = Relation(Schema, 'eventStatus')\n+ status = URLProperty(Schema, 'eventStatus')\n location = StringProperty(Schema, 'location')\n name = StringProperty(Schema, 'name', required=True)\n organization = Relation(Schema, 'organizer', required=True)\n@@ -59,50 +59,14 @@ class AgendaItem(MeetingNS, schema.Event):\n agenda = Relation(MeetingNS, 'agenda')\n \n \n-# Result Individuals\n-class ResultKept(MeetingNS, Individual):\n- \"\"\"When a proposal is kept for later processing\"\"\"\n- pass\n+ResultKept = Uri(MeetingNS, \"ResultKept\")\n+ResultPostponed = Uri(MeetingNS, \"ResultPostponed\")\n+ResultWithdrawn = Uri(MeetingNS, \"ResultWithdrawn\")\n+ResultExpired = Uri(MeetingNS, \"ResultExpired\")\n+ResultDiscussed = Uri(MeetingNS, \"ResultDiscussed\")\n+ResultPublished = Uri(MeetingNS, \"ResultPublished\")\n \n \n-class ResultPostponed(MeetingNS, Individual):\n- \"\"\"When a proposal is postponed to a later (unspecified) moment\"\"\"\n- pass\n-\n-\n-class ResultWithdrawn(MeetingNS, Individual):\n- \"\"\"When a proposal is withdrawn by its author\"\"\"\n- pass\n-\n-\n-class ResultExpired(MeetingNS, Individual):\n- \"\"\"When a proposal has been expired\"\"\"\n- pass\n-\n-\n-class ResultDiscussed(MeetingNS, Individual):\n- \"\"\"When a proposal has been discussed\"\"\"\n- pass\n-\n-\n-class ResultPublished(MeetingNS, Individual):\n- \"\"\"When a proposal has been published\"\"\"\n- pass\n-\n-\n-# EventStatusType Individuals\n-class EventCompleted(MeetingNS, Individual):\n- \"\"\"The event has taken place and has been completed\"\"\"\n- pass\n-\n-\n-class EventConfirmed(MeetingNS, Individual):\n- \"\"\"The event will take place but has not been\n- :class:`.schema.EventScheduled` yet\n- \"\"\"\n- pass\n-\n-\n-class EventUnconfirmed(MeetingNS, Individual):\n- \"\"\"The event is not :class:`EventConfirmed` or is inactive\"\"\"\n- pass\n+EventCompleted = Uri(MeetingNS, \"EventCompleted\")\n+EventConfirmed = Uri(MeetingNS, \"EventConfirmed\")\n+EventUnconfirmed = Uri(MeetingNS, \"EventUnconfirmed\")\ndiff --git a/ocd_backend/models/definitions/meta.py b/ocd_backend/models/definitions/meta.py\ndeleted file mode 100644\n--- a/ocd_backend/models/definitions/meta.py\n+++ /dev/null\n@@ -1,29 +0,0 @@\n-\"\"\"The classes in this ontology are defined by Argu BV. More details, current\n-definitions and information can be found here:\n-https://argu.co/ns/meta#\n-\n-The purpose of this ontology is to define metadata information that describes\n-ie. when the data was processed, what collection it belongs to rights apply to\n-the data.\n-\"\"\"\n-\n-import owl\n-from ocd_backend.models.definitions import Meta, Meeting\n-from ocd_backend.models.properties import StringProperty, DateTimeProperty\n-\n-\n-class Metadata(Meta, owl.Thing):\n- # todo needs to be formalized in a ontology\n- status = StringProperty(Meta, 'status')\n- processing_started = DateTimeProperty(Meta, 'processingStarted')\n- source_id = StringProperty(Meta, 'sourceId')\n- collection = StringProperty(Meta, 'collection')\n- rights = StringProperty(Meta, 'rights')\n-\n- skip_validation = True\n-\n-\n-class Run(Meta, owl.Thing):\n- run_identifier = StringProperty(Meeting, 'runIdentifier')\n-\n- skip_validation = True\ndiff --git a/ocd_backend/models/definitions/opengov.py b/ocd_backend/models/definitions/opengov.py\n--- a/ocd_backend/models/definitions/opengov.py\n+++ b/ocd_backend/models/definitions/opengov.py\n@@ -6,9 +6,9 @@\n import schema\n from ocd_backend.models.definitions import Opengov, Schema, Meeting, Dcterms, \\\n Ncal, Rdf, Rdfs, Skos, Bibframe\n-from ocd_backend.models.model import Individual\n from ocd_backend.models.properties import StringProperty, IntegerProperty, \\\n DateProperty, DateTimeProperty, ArrayProperty, Relation\n+from ocd_backend.models.misc import Uri\n \n \n class Motion(Opengov, owl.Thing):\n@@ -123,46 +123,13 @@ class Result(Opengov, owl.Thing):\n vote_event = Relation(Opengov, 'voteEvent')\n \n \n-# Result Individuals\n-class ResultFail(Opengov, Individual):\n- \"\"\"When a decision is made against a proposal\"\"\"\n- pass\n-\n-\n-class ResultPass(Opengov, Individual):\n- \"\"\"When a decision is made in favor of a proposal\"\"\"\n- pass\n-\n-\n-# VoteOption Individuals\n-class VoteOptionYes(Opengov, Individual):\n- \"\"\"When an individual votes in favor of a proposal\"\"\"\n- pass\n-\n-\n-class VoteOptionNo(Opengov, Individual):\n- \"\"\"When an individual votes against a proposal\"\"\"\n- pass\n+ResultFailed = Uri(Opengov, \"ResultFailed\")\n+ResultPassed = Uri(Opengov, \"ResultPassed\")\n \n \n-class VoteOptionAbstain(Opengov, Individual):\n- \"\"\"When an individual abstained from voting\"\"\"\n- pass\n-\n-\n-class VoteOptionAbsent(Opengov, Individual):\n- \"\"\"When an individual did not vote due to being absent\"\"\"\n- pass\n-\n-\n-class VoteOptionNotVoting(Opengov, Individual):\n- \"\"\"When an individual is not voting\"\"\"\n- pass\n-\n-\n-class VoteOptionPaired(Opengov, Individual):\n- \"\"\"When an individual entered a reciprocal agreement with another voter by\n- which the voter abstains if the other is unable to vote. It may not be\n- known which two members form a pair.\n- \"\"\"\n- pass\n+VoteOptionYes = Uri(Opengov, \"VoteOptionYes\")\n+VoteOptionNo = Uri(Opengov, \"VoteOptionNo\")\n+VoteOptionAbstain = Uri(Opengov, \"VoteOptionAbstain\")\n+VoteOptionAbsent = Uri(Opengov, \"VoteOptionAbsent\")\n+VoteOptionNotVoting = Uri(Opengov, \"VoteOptionNotVoting\")\n+VoteOptionPaired = Uri(Opengov, \"VoteOptionPaired\")\ndiff --git a/ocd_backend/models/definitions/org.py b/ocd_backend/models/definitions/org.py\n--- a/ocd_backend/models/definitions/org.py\n+++ b/ocd_backend/models/definitions/org.py\n@@ -6,7 +6,7 @@\n import owl\n from ocd_backend.models.definitions import Org, Skos, Opengov, Dcterms, \\\n Schema, Rdf, Meta\n-from ocd_backend.models.properties import StringProperty, DateTimeProperty, \\\n+from ocd_backend.models.properties import StringProperty, URLProperty, DateTimeProperty, \\\n Relation, OrderedRelation\n \n \n@@ -30,7 +30,7 @@ class Organization(Org, foaf.Agent):\n classification = StringProperty(Org, 'classification')\n subOrganizationOf = OrderedRelation(Org, 'subOrganizationOf')\n other_names = StringProperty(Opengov, 'otherName')\n- links = StringProperty(Rdf, 'seeAlso')\n+ links = URLProperty(Rdf, 'seeAlso')\n dissolution_date = StringProperty(Schema, 'dissolutionDate')\n founding_date = StringProperty(Schema, 'foundingDate')\n image = StringProperty(Schema, 'image')\ndiff --git a/ocd_backend/models/definitions/owl.py b/ocd_backend/models/definitions/owl.py\n--- a/ocd_backend/models/definitions/owl.py\n+++ b/ocd_backend/models/definitions/owl.py\n@@ -2,7 +2,7 @@\n http://www.w3.org/2002/07/owl#\n \"\"\"\n \n-from ocd_backend.models.definitions import Meta, Dcterms, Meeting, Ncal, Owl\n+from ocd_backend.models.definitions import Meta, Dcterms, Meeting, Ncal, Owl, Vcard\n from ocd_backend.models.model import Model\n from ocd_backend.models.properties import Relation, StringProperty, ArrayProperty\n \n@@ -10,6 +10,10 @@\n class Thing(Owl, Model):\n classification = ArrayProperty(Ncal, 'categories') # todo fix with popolo\n meta = Relation(Meta, 'meta')\n+ canonical_iri = StringProperty(Meta, 'canonical_iri')\n+ canonical_id = StringProperty(Meta, 'canonical_id')\n+ # has_organization_name is used to set the municipality or province ID on every item (see issue #141)\n+ has_organization_name = Relation(Vcard, 'hasOrganizationName')\n \n \n class Identifier(Thing):\ndiff --git a/ocd_backend/models/definitions/person.py b/ocd_backend/models/definitions/person.py\n--- a/ocd_backend/models/definitions/person.py\n+++ b/ocd_backend/models/definitions/person.py\n@@ -5,7 +5,7 @@\n import foaf\n from ocd_backend.models.definitions import Opengov, Schema, Foaf, Rdfs, \\\n Dcterms, Bio, Person as PersonNS\n-from ocd_backend.models.properties import StringProperty, DateTimeProperty, \\\n+from ocd_backend.models.properties import StringProperty, URLProperty, DateTimeProperty, \\\n Relation\n \n \n@@ -20,12 +20,9 @@ class Person(PersonNS, foaf.Agent):\n national_identity = StringProperty(Opengov, 'nationalIdentity')\n summary = StringProperty(Bio, 'olb')\n other_names = StringProperty(Opengov, 'otherName')\n- links = StringProperty(Rdfs, 'seeAlso')\n+ links = URLProperty(Rdfs, 'seeAlso')\n birth_date = DateTimeProperty(Schema, 'birthDate')\n death_date = DateTimeProperty(Schema, 'deathDate')\n email = StringProperty(Schema, 'email')\n image = Relation(Schema, 'image')\n phone = StringProperty(Schema, 'telephone')\n-\n- # def verbose_name(self):\n- # self.name = '%s %s' % (self.given_name, self.family_name)\ndiff --git a/ocd_backend/models/definitions/schema.py b/ocd_backend/models/definitions/schema.py\n--- a/ocd_backend/models/definitions/schema.py\n+++ b/ocd_backend/models/definitions/schema.py\n@@ -3,15 +3,15 @@\n \"\"\"\n \n import owl\n-from ocd_backend.models.definitions import Schema, Opengov, Dbo\n-from ocd_backend.models.model import Individual\n-from ocd_backend.models.properties import StringProperty, IntegerProperty, \\\n+from ocd_backend.models.definitions import Schema, Opengov, Dbo, Dcterms\n+from ocd_backend.models.properties import StringProperty, URLProperty, IntegerProperty, \\\n DateTimeProperty, DateProperty, ArrayProperty, Relation\n+from ocd_backend.models.misc import Uri\n \n \n class MediaObject(Schema, owl.Thing):\n name = StringProperty(Schema, 'name')\n- url = StringProperty(Schema, 'contentUrl')\n+ url = URLProperty(Schema, 'contentUrl')\n size_in_bytes = IntegerProperty(Schema, 'fileSize')\n file_type = StringProperty(Schema, 'fileType')\n additional_type = StringProperty(Schema, 'additionalType')\n@@ -22,14 +22,15 @@ class MediaObject(Schema, owl.Thing):\n embed_url = StringProperty(Schema, 'embedUrl')\n file_name = StringProperty(Dbo, 'filename')\n date_modified = DateTimeProperty(Schema, 'dateModified')\n- original_url = StringProperty(Schema, 'isBasedOn')\n+ original_url = URLProperty(Schema, 'isBasedOn')\n text = ArrayProperty(Schema, 'text')\n+ isReferencedBy = Relation(Dcterms, 'isReferencedBy')\n \n enricher_task = 'file_to_text'\n \n \n class AudioObject(Schema, owl.Thing):\n- contentUrl = StringProperty(Schema, 'contentUrl')\n+ contentUrl = URLProperty(Schema, 'contentUrl')\n \n \n class CreativeWork(Schema, owl.Thing):\n@@ -45,9 +46,10 @@ class Event(Schema, owl.Thing):\n end_date = DateTimeProperty(Schema, 'endDate')\n start_date = DateTimeProperty(Schema, 'startDate', required=True)\n \n+\n class ImageObject(Schema, owl.Thing):\n- content_url = StringProperty(Schema, 'contentUrl')\n- is_based_on = StringProperty(Schema, 'isBasedOn')\n+ content_url = URLProperty(Schema, 'contentUrl')\n+ is_based_on = URLProperty(Schema, 'isBasedOn')\n file_format = StringProperty(Schema, 'fileFormat')\n content_size = StringProperty(Schema, 'contentSize')\n encoding_format = StringProperty(Schema, 'encodingFormat')\n@@ -68,21 +70,10 @@ class Place(Schema, owl.Thing):\n \n \n class VideoObject(Schema, owl.Thing):\n- content_url = StringProperty(Schema, 'contentUrl')\n-\n-\n-# EventStatusType Individuals\n-class EventCancelled(Schema, Individual):\n- pass\n-\n+ content_url = URLProperty(Schema, 'contentUrl')\n \n-class EventPostponed(Schema, Individual):\n- pass\n \n-\n-class EventRescheduled(Schema, Individual):\n- pass\n-\n-\n-class EventScheduled(Schema, Individual):\n- pass\n+EventScheduled = Uri(Schema, \"EventScheduled\")\n+EventRescheduled = Uri(Schema, \"EventRescheduled\")\n+EventCancelled = Uri(Schema, \"EventCancelled\")\n+EventPostponed = Uri(Schema, \"EventPostponed\")\ndiff --git a/ocd_backend/models/lock.py b/ocd_backend/models/lock.py\n--- a/ocd_backend/models/lock.py\n+++ b/ocd_backend/models/lock.py\n@@ -7,6 +7,7 @@\n redis = celery_app.backend.client\n lock_prefix = 'lock_'\n lock_expiry_seconds = 10\n+lock_max_seconds = 6000\n \n \n class AcquireTimeoutException(Exception):\n@@ -27,15 +28,22 @@ def __exit__(self, exc_type, exc_val, exc_tb):\n \n def acquire(self, timeout=lock_expiry_seconds):\n self.random_value = self.random_generator()\n+ before_lock_value = redis.get(self.lock_identifier)\n \n- end = time.time() + timeout\n- while time.time() < end:\n+ wait_delta = timeout\n+ while time.time() < time.time() + wait_delta:\n if redis.set(self.lock_identifier, self.random_value, ex=lock_expiry_seconds, nx=True):\n return True\n else:\n+ current_lock_value = redis.get(self.lock_identifier)\n+ if before_lock_value and before_lock_value != current_lock_value:\n+ if wait_delta > lock_max_seconds:\n+ break\n+ wait_delta += timeout\n+ before_lock_value = current_lock_value\n time.sleep(random.uniform(0.01, 0.1))\n \n- raise AcquireTimeoutException('Lock acquire failed, waited for %s seconds', lock_expiry_seconds)\n+ raise AcquireTimeoutException('Lock acquire failed, waited for %s seconds' % wait_delta)\n \n def release(self):\n lock_value = redis.get(self.lock_identifier)\ndiff --git a/ocd_backend/models/model.py b/ocd_backend/models/model.py\n--- a/ocd_backend/models/model.py\n+++ b/ocd_backend/models/model.py\n@@ -1,26 +1,23 @@\n # -*- coding: utf-8 -*-\n \n-import re\n-\n-from ocd_backend import celery_app\n-from ocd_backend.models.database import Neo4jDatabase\n-from ocd_backend.models.definitions import Mapping, Prov, Ori\n-from ocd_backend.models.exceptions import MissingProperty, ValidationError, \\\n- QueryResultError\n-from ocd_backend.models.properties import PropertyBase, Property, \\\n- StringProperty, IntegerProperty, Relation\n-from ocd_backend.models.serializers import Neo4jSerializer\n+from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n+\n+from ocd_backend.models.definitions import Mapping, Prov, Ori, Meta\n+from ocd_backend.models.exceptions import MissingProperty\n+from ocd_backend.models.properties import PropertyBase, Property, StringProperty, Relation\n+from ocd_backend.models.serializers import PostgresSerializer\n from ocd_backend.models.misc import Namespace, Uri\n-from ocd_backend.utils.misc import iterate, doc_type\n+from ocd_backend.utils.misc import iterate\n from ocd_backend.log import get_source_logger\n from ocd_backend.utils.misc import slugify\n+from ocd_backend.models.postgres_database import PostgresDatabase\n \n logger = get_source_logger('model')\n \n \n class ModelMetaclass(type):\n- database_class = Neo4jDatabase\n- serializer_class = Neo4jSerializer\n+ database_class = PostgresDatabase\n+ serializer_class = PostgresSerializer\n \n def __new__(mcs, name, bases, attrs):\n # Collect fields from current class.\n@@ -52,10 +49,6 @@ def __new__(mcs, name, bases, attrs):\n class Model(object):\n __metaclass__ = ModelMetaclass\n \n- # Top-level definitions\n- ori_identifier = StringProperty(Mapping, 'ori/identifier')\n- had_primary_source = StringProperty(Prov, 'hadPrimarySource')\n-\n def absolute_uri(self):\n return '%s%s' % (self.uri, self.verbose_name())\n \n@@ -98,31 +91,33 @@ def inflate(cls, **deflated_props):\n \n return instance\n \n- def __init__(self, source_id=False, organization=None, source=None, source_id_key=None):\n+ def __init__(self, source_id=None, source=None, supplier=None, collection=None, merge_into=None):\n # Set defaults\n- #self.uri = None\n- #self.prefix = None\n self.skip_validation = None\n- # self.verbose_name = None\n self.values = dict()\n \n+ if merge_into:\n+ if not isinstance(merge_into, tuple) or len(merge_into) != 3:\n+ raise ValueError('merge_into requires a tuple with 3 elements: (predicate, column, value)')\n+ self.merge_into = merge_into\n+ else:\n+ self.merge_into = None\n+\n # https://argu.co/voc/mapping////\n # i.e. https://argu.co/voc/mapping/nl/ggm/vrsnummer/6655476\n- if source_id is not False:\n- assert source_id\n- assert organization\n+ if source_id:\n assert source\n- assert source_id_key\n+ assert supplier\n+ assert collection\n self.had_primary_source = Uri(\n Mapping,\n '{}/{}/{}/{}'.format(\n- organization,\n source,\n- source_id_key,\n+ supplier,\n+ collection,\n slugify(source_id)\n )\n )\n- self._source = source\n \n def __getattr__(self, item):\n try:\n@@ -163,16 +158,12 @@ def __repr__(self):\n def get_ori_identifier(self):\n if not self.values.get('ori_identifier'):\n try:\n- self.ori_identifier = self.db.get_identifier(\n- self,\n- had_primary_source=self.had_primary_source,\n- )\n- except AttributeError:\n- raise\n- except MissingProperty:\n- raise MissingProperty('OriIdentifier is not present, has the '\n- 'model been saved?')\n- return self.ori_identifier\n+ self.ori_identifier = self.db.get_ori_identifier(iri=self.had_primary_source)\n+ return self.ori_identifier\n+ except:\n+ raise AttributeError('Ori Identifier is not present, has the model been saved?')\n+ else:\n+ return self.ori_identifier\n \n def get_short_identifier(self):\n ori_identifier = self.get_ori_identifier()\n@@ -180,17 +171,10 @@ def get_short_identifier(self):\n assert len(identifier) > 0\n return identifier\n \n- def generate_ori_identifier(self):\n- self.ori_identifier = Uri(Ori, celery_app.backend.increment(\"ori_identifier_autoincrement\"))\n- return self.ori_identifier\n-\n- def properties(self, props=True, rels=True, parent=False):\n+ def properties(self, props=True, rels=True):\n \"\"\" Returns namespaced properties with their inflated values \"\"\"\n props_list = list()\n for name, prop in iterate({k: v for k, v in self.values.items() if k[0:1] != '_'}):\n- if not parent and name == 'parent':\n- continue\n-\n definition = self.definition(name)\n if not definition:\n continue\n@@ -202,49 +186,51 @@ def properties(self, props=True, rels=True, parent=False):\n \n saving_flag = False\n \n+ def traverse(self):\n+ \"\"\"Returns all associated models that been attached to this model as properties\"\"\"\n+ rels_list = []\n+\n+ def inner(model):\n+ # Prevent circular recursion\n+ if model in rels_list:\n+ return\n+\n+ rels_list.append(model)\n+\n+ for _, prop in iterate(model.values.items()):\n+ if isinstance(prop, Model) or isinstance(prop, Relationship):\n+ inner(prop)\n+\n+ inner(self)\n+ return rels_list\n+\n def save(self):\n if self.saving_flag:\n return\n self.saving_flag = True\n \n- try:\n- self.db.replace(self) # pylint: disable=no-member\n+ if self.merge_into:\n+ self._merge(*self.merge_into)\n \n- # Recursive save\n- for rel_type, value in self.properties(rels=True, props=False, parent=True):\n+ try:\n+ self.db.save(self) # pylint: disable=no-member\n+ # Recursive saving of related models\n+ for rel_type, value in self.properties(rels=True, props=False):\n if isinstance(value, Model):\n- # Todo don't do parent setting for now, until first needed\n- # Self-reference via parent attribute if not done explicitly\n- # if 'parent' not in value:\n- # value.parent = self\n value.save()\n-\n- # End the recursive loop when self-referencing\n- if self != value:\n- self.db.attach(self, value, rel_type)\n except:\n # Re-raise everything\n raise\n finally:\n self.saving_flag = False\n \n- def merge(self, **kwargs):\n- \"\"\"Takes one keyword-argument to filter, and set an ori_identifier.\n-\n- Use this method to try and merge the current node with an existing node\n- by ori_identifier.\n-\n- For example:\n- `organization.connect(name='some_name')`\n- \"\"\"\n+ def _merge(self, predicate, column, value):\n+ \"\"\"Tries to set the ORI identifier of an existing Resource on the model. It tries to find the Resource by\n+ filtering on a Property with the given predicate and value in the specified column.\"\"\"\n try:\n- self.ori_identifier = self.db.get_identifier(self, **kwargs)\n- except MissingProperty, e:\n- logger.warning(\"Could not connect nodes. %s\", e)\n-\n-\n-class Individual(Model):\n- __metaclass__ = ModelMetaclass\n+ self.ori_identifier = self.db.get_mergeable_resource_identifier(self, predicate, column, value)\n+ except (NoResultFound, MultipleResultsFound, ValueError), e:\n+ logger.warning(\"Unable to merge: %s\", e)\n \n \n class Relationship(object):\ndiff --git a/ocd_backend/models/postgres_database.py b/ocd_backend/models/postgres_database.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/models/postgres_database.py\n@@ -0,0 +1,216 @@\n+import uuid\n+\n+from sqlalchemy import create_engine, Sequence\n+from sqlalchemy.orm import sessionmaker\n+from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n+\n+from ocd_backend import settings\n+from ocd_backend.models.postgres_models import Source, Resource, Property\n+from ocd_backend.models.definitions import Ori\n+from ocd_backend.models.properties import StringProperty, URLProperty, IntegerProperty, DateProperty, \\\n+ DateTimeProperty, ArrayProperty, Relation, OrderedRelation\n+from ocd_backend.models.misc import Uri\n+\n+\n+class PostgresDatabase(object):\n+\n+ def __init__(self, serializer):\n+ self.serializer = serializer\n+ self.connection_string = 'postgresql://%s:%s@%s/%s' % (\n+ settings.POSTGRES_USERNAME,\n+ settings.POSTGRES_PASSWORD,\n+ settings.POSTGRES_HOST,\n+ settings.POSTGRES_DATABASE)\n+ self.engine = create_engine(self.connection_string)\n+ self.Session = sessionmaker(bind=self.engine)\n+\n+ def get_ori_identifier(self, iri):\n+ \"\"\"\n+ Retrieves a Resource-based ORI identifier from the database. If no corresponding Resource exists,\n+ a new one is created.\n+ \"\"\"\n+\n+ session = self.Session()\n+ try:\n+ resource = session.query(Resource).join(Source).filter(Source.iri == iri).one()\n+ return Uri(Ori, resource.ori_id)\n+ except MultipleResultsFound:\n+ raise MultipleResultsFound('Multiple resources found for IRI %s' % iri)\n+ except NoResultFound:\n+ return self.generate_ori_identifier(iri=iri)\n+ finally:\n+ session.close()\n+\n+ def generate_ori_identifier(self, iri):\n+ \"\"\"\n+ Generates a Resource with an ORI identifier and adds the IRI as a Source if it does not already exist.\n+ \"\"\"\n+\n+ session = self.Session()\n+ new_id = self.engine.execute(Sequence('ori_id_seq'))\n+ new_identifier = Uri(Ori, new_id)\n+\n+ try:\n+ # If the resource already exists, create the source as a child of the resource\n+ resource = session.query(Source).filter(Source.iri == iri).one().resource\n+ resource.sources.append(Source(iri=iri))\n+ session.flush()\n+ except NoResultFound:\n+ # If the resource does not exist, create resource and source together\n+ resource = Resource(ori_id=new_id, iri=new_identifier, sources=[Source(iri=iri)])\n+ session.add(resource)\n+ session.commit()\n+ finally:\n+ session.close()\n+\n+ return new_identifier\n+\n+ def get_mergeable_resource_identifier(self, model_object, predicate, column, value):\n+ \"\"\"\n+ Queries the database to find the ORI identifier of the Resource linked to the Property with the given\n+ predicate and value in the specified column.\n+ \"\"\"\n+\n+ definition = model_object.definition(predicate)\n+\n+ session = self.Session()\n+ try:\n+ query_result = session.query(Property).filter(Property.predicate == definition.absolute_uri())\n+ if column == 'prop_resource':\n+ query_result = query_result.filter(Property.prop_resource == value)\n+ elif column == 'prop_string':\n+ query_result = query_result.filter(Property.prop_string == value)\n+ elif column == 'prop_datetime':\n+ query_result = query_result.filter(Property.prop_datetime == value)\n+ elif column == 'prop_integer':\n+ query_result = query_result.filter(Property.prop_integer == value)\n+ elif column == 'prop_url':\n+ query_result = query_result.filter(Property.prop_url == value)\n+ else:\n+ raise ValueError('Invalid column type \"%s\" specified for merge_into' % column)\n+ resource_property = query_result.one()\n+ return resource_property.resource.iri\n+ except MultipleResultsFound:\n+ raise MultipleResultsFound('Multiple resources found for predicate \"%s\" with value \"%s\" in column \"%s\"' %\n+ (predicate, value, column))\n+ except NoResultFound:\n+ raise NoResultFound('No resource found for predicate \"%s\" with value \"%s\" in column \"%s\"' %\n+ (predicate, value, column))\n+ finally:\n+ session.close()\n+\n+ def save(self, model_object):\n+ if not model_object.had_primary_source:\n+ # If the item is an Individual, like EventConfirmed, we \"save\" it by setting an ORI identifier\n+ iri = self.serializer.label(model_object)\n+ if not model_object.values.get('ori_identifier'):\n+ model_object.ori_identifier = self.get_ori_identifier(iri=iri)\n+ else:\n+ if not model_object.values.get('ori_identifier'):\n+ model_object.ori_identifier = self.get_ori_identifier(iri=model_object.had_primary_source)\n+\n+ # Handle canonical IRI or ID\n+ if model_object.values.get('canonical_iri'):\n+ self.update_source(model_object, iri=True)\n+ del model_object.values['canonical_iri']\n+ elif model_object.values.get('canonical_id'):\n+ self.update_source(model_object, id=True)\n+ del model_object.values['canonical_id']\n+\n+ serialized_properties = self.serializer.deflate(model_object, props=True, rels=True)\n+\n+ session = self.Session()\n+ resource = session.query(Resource).filter(Resource.ori_id == model_object.ori_identifier.partition(Ori.uri)[2]).one()\n+\n+ # Delete properties that are about to be updated\n+ predicates = [predicate for predicate, _ in serialized_properties.iteritems()]\n+ session.query(Property).filter(Property.resource_id == resource.ori_id,\n+ Property.predicate.in_(predicates)\n+ ).delete(synchronize_session='fetch')\n+ session.commit()\n+\n+ # Save new properties\n+ for predicate, value_and_property_type in serialized_properties.iteritems():\n+ if isinstance(value_and_property_type[0], list):\n+ # Create each item as a separate Property with the same predicate, and save the order to\n+ # the `order` column\n+ for order, item in enumerate(value_and_property_type[0], start=1):\n+ new_property = (Property(id=uuid.uuid4(), predicate=predicate, order=order))\n+ setattr(new_property, self.map_column_type((item, value_and_property_type[1])), item)\n+ resource.properties.append(new_property)\n+ else:\n+ new_property = (Property(id=uuid.uuid4(), predicate=predicate))\n+ setattr(new_property, self.map_column_type(value_and_property_type), value_and_property_type[0])\n+ resource.properties.append(new_property)\n+\n+ session.commit()\n+ session.close()\n+\n+ @staticmethod\n+ def map_column_type(value_and_property_type):\n+ \"\"\"Maps the property type to a column.\"\"\"\n+ value = value_and_property_type[0]\n+ property_type = value_and_property_type[1]\n+\n+ if property_type == StringProperty:\n+ return 'prop_string'\n+ if property_type is URLProperty:\n+ return 'prop_url'\n+ elif property_type is IntegerProperty:\n+ return 'prop_integer'\n+ elif property_type in (DateProperty, DateTimeProperty):\n+ return 'prop_datetime'\n+ elif property_type in (ArrayProperty, Relation, OrderedRelation):\n+ try:\n+ int(value)\n+ return 'prop_resource'\n+ except (ValueError, TypeError):\n+ return 'prop_string'\n+ else:\n+ raise ValueError('Unable to map property of type \"%s\" to a column.' % property_type)\n+\n+ def update_source(self, model_object, iri=False, id=False):\n+ \"\"\"Updates the canonical IRI or ID field of the Source of the corresponding model object. One Source can have\n+ multiple different canonical IRI/ID entries.\"\"\"\n+\n+ if iri:\n+ canonical_field = 'canonical_iri'\n+ elif id:\n+ canonical_field = 'canonical_id'\n+ else:\n+ raise ValueError('update_source must be called with either iri or id as True')\n+\n+ session = self.Session()\n+ resource = session.query(Resource).filter(Resource.ori_id == model_object.get_short_identifier()).one()\n+\n+ try:\n+ # First check if there is a Source record with an empty canonical IRI/ID field, and if so fill that record\n+ source = session.query(Source).filter(Source.resource_ori_id == resource.ori_id,\n+ Source.canonical_iri == None,\n+ Source.canonical_id == None).one()\n+ setattr(source, canonical_field, getattr(model_object, canonical_field))\n+ except NoResultFound:\n+ try:\n+ # If no empty record exists, check if one already exists with the given source IRI and canonical IRI/ID\n+ if iri:\n+ source = session.query(Source).filter(Source.resource == resource,\n+ Source.canonical_iri == model_object.canonical_iri).one()\n+ elif id:\n+ source = session.query(Source).filter(Source.resource == resource,\n+ Source.canonical_id == model_object.canonical_id).one()\n+ # At this point it's not really necessary to update the field again, but it's here in case\n+ # more fields are added later\n+ setattr(source, canonical_field, getattr(model_object, canonical_field))\n+ except NoResultFound:\n+ # If no Source and canonical IRI/ID combination exists for the given source IRI, create it\n+ source = Source(resource=resource,\n+ iri=model_object.had_primary_source)\n+ setattr(source, canonical_field, getattr(model_object, canonical_field))\n+ session.add(source)\n+ except Exception:\n+ raise\n+ except Exception:\n+ raise\n+\n+ session.commit()\n+ session.close()\ndiff --git a/ocd_backend/models/postgres_models.py b/ocd_backend/models/postgres_models.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/models/postgres_models.py\n@@ -0,0 +1,56 @@\n+from sqlalchemy import Column, Sequence, String, ForeignKey, DateTime, SmallInteger, BigInteger, func, CheckConstraint\n+from sqlalchemy.ext.declarative import declarative_base\n+from sqlalchemy.orm import relationship\n+\n+from sqlalchemy_utils.types import UUIDType\n+\n+\n+Base = declarative_base()\n+\n+\n+class Source(Base):\n+ __tablename__ = 'source'\n+\n+ id = Column(BigInteger, Sequence('source_id_seq'), primary_key=True)\n+ iri = Column(String)\n+ resource_ori_id = Column(BigInteger, ForeignKey(\"resource.ori_id\"), nullable=False)\n+ canonical_iri = Column(String)\n+ canonical_id = Column(String)\n+ used_file = Column(String)\n+ created_at = Column(DateTime, default=func.now())\n+ updated_at = Column(DateTime, default=func.now())\n+\n+ resource = relationship(\"Resource\", back_populates=\"sources\")\n+\n+\n+class Resource(Base):\n+ __tablename__ = 'resource'\n+\n+ ori_id = Column(BigInteger, Sequence('ori_id_seq'), primary_key=True)\n+ iri = Column(String)\n+\n+ sources = relationship(\"Source\", back_populates=\"resource\")\n+ properties = relationship(\"Property\", back_populates=\"resource\", foreign_keys=\"Property.resource_id\")\n+\n+\n+class Property(Base):\n+ __tablename__ = 'property'\n+ __table_args__ = (\n+ CheckConstraint('NOT(prop_resource IS NULL AND '\n+ 'prop_string IS NULL AND '\n+ 'prop_datetime IS NULL AND '\n+ 'prop_integer IS NULL AND '\n+ 'prop_url IS NULL'),\n+ )\n+\n+ id = Column(UUIDType(), primary_key=True)\n+ resource_id = Column(BigInteger, ForeignKey(\"resource.ori_id\"), nullable=False)\n+ predicate = Column(String, nullable=False)\n+ order = Column(SmallInteger, nullable=True)\n+ prop_resource = Column(BigInteger, ForeignKey(\"resource.ori_id\"), nullable=True)\n+ prop_string = Column(String, nullable=True)\n+ prop_datetime = Column(DateTime, nullable=True)\n+ prop_integer = Column(BigInteger, nullable=True)\n+ prop_url = Column(String, nullable=True)\n+\n+ resource = relationship(\"Resource\", back_populates=\"properties\", foreign_keys=resource_id)\ndiff --git a/ocd_backend/models/properties.py b/ocd_backend/models/properties.py\n--- a/ocd_backend/models/properties.py\n+++ b/ocd_backend/models/properties.py\n@@ -1,8 +1,9 @@\n from .misc import Namespace\n from ocd_backend.utils.misc import str_to_datetime\n \n+\n class PropertyBase(object):\n- \"\"\"The base propery all properties and relations should inherit from.\"\"\"\n+ \"\"\"The base property all properties and relations should inherit from.\"\"\"\n \n def __init__(self, ns, name, required=False):\n assert issubclass(ns, Namespace)\n@@ -51,6 +52,11 @@ def sanitize(value):\n return unicode(value).strip()\n \n \n+class URLProperty(StringProperty):\n+ \"\"\"A property which defines a URL.\"\"\"\n+ pass\n+\n+\n class IntegerProperty(Property):\n \"\"\"A property which defines an int type.\"\"\"\n \ndiff --git a/ocd_backend/models/serializers.py b/ocd_backend/models/serializers.py\n--- a/ocd_backend/models/serializers.py\n+++ b/ocd_backend/models/serializers.py\n@@ -5,9 +5,9 @@\n from ocd_backend.models.definitions import ALL, Rdf, Ori\n from ocd_backend.models.exceptions import SerializerError, SerializerNotFound, \\\n RequiredProperty, MissingProperty\n-from ocd_backend.models.properties import StringProperty, IntegerProperty, \\\n+from ocd_backend.models.properties import StringProperty, URLProperty, IntegerProperty, \\\n DateProperty, DateTimeProperty, ArrayProperty, Relation, OrderedRelation\n-from ocd_backend.utils.misc import iterate, str_to_datetime, datetime_to_unixstamp\n+from ocd_backend.utils.misc import iterate\n \n \n def get_serializer_class(format=None):\n@@ -31,9 +31,9 @@ def __init__(self, uri_format_type='term'):\n \"\"\"Initialize the serializer with a specified format.\n \n Options for uri_format_type are:\n- - 'full': Fully quantified URI (ie. http://schema.org/example)\n- - 'prefix': A property that is prefixed (ie. schema:example)\n- - 'name': Just the name of the property\n+ - 'absolute': Fully quantified URI (ie. http://schema.org/example)\n+ - 'compact': A property that is prefixed (ie. schema:example)\n+ - 'term': Just the name of the property\n \"\"\"\n if uri_format_type not in ['absolute', 'compact', 'term']:\n raise ValueError(\n@@ -55,7 +55,7 @@ def label(self, model_object):\n return self.uri_format(model_object)\n \n def deflate(self, model_object, props, rels):\n- \"\"\"Returns a recurive serialized value for each model definition.\"\"\"\n+ \"\"\"Returns a recursive serialized value for each model definition.\"\"\"\n props_list = dict()\n for name, definition in model_object.definitions(props=props, rels=rels):\n value = model_object.values.get(name, None)\n@@ -88,6 +88,9 @@ def serialize_prop(self, prop, value):\n if type(prop) == StringProperty:\n return value\n \n+ elif type(prop) == URLProperty:\n+ return value\n+\n elif type(prop) == IntegerProperty:\n return value\n \n@@ -106,30 +109,70 @@ def serialize_prop(self, prop, value):\n )\n \n \n-class Neo4jSerializer(BaseSerializer):\n- \"\"\"The `Neo4jSerializer` is just a basic subclass of the `BaseSerializer`.\n-\n- This serializer is used to turn the models in full URI properties that can\n- be inserted in Neo4j.\n- \"\"\"\n+class PostgresSerializer(BaseSerializer):\n+ \"\"\"This serializer is used to turn models into full URI properties that can be inserted into Postgres.\"\"\"\n \n def __init__(self):\n- \"\"\"Currently all properties in the Neo4j are fully qualified.\"\"\"\n- super(Neo4jSerializer, self).__init__('absolute')\n+ super(PostgresSerializer, self).__init__('absolute')\n \n def serialize(self, model_object=None):\n \"\"\"No high-level serialize method available, use `deflate` instead.\"\"\"\n pass\n \n+ def deflate(self, model_object, props, rels):\n+ \"\"\"Returns a recursive serialized value for each model definition.\n+\n+ This serializer also returns the property type in the form of {uri: (value, property_type)} . The property\n+ type information is needed by the Postgres database to map the property value to the appropriate column.\n+ \"\"\"\n+ props_list = dict()\n+ for name, definition in model_object.definitions(props=props, rels=rels):\n+ value = model_object.values.get(name, None)\n+ if value:\n+ uri = self.uri_format(definition) or name\n+ try:\n+ props_list[uri] = (self.serialize_prop(definition, value), definition.__class__)\n+ except MissingProperty:\n+ raise\n+ elif definition.required and not model_object.skip_validation:\n+ raise RequiredProperty(\"Property '{}' is required for {}\".format(\n+ name, model_object.compact_uri()))\n+ return props_list\n+\n+ def serialize_prop(self, prop, value):\n+ \"\"\"Serializes `Relation` and `OrderedRelation` as a short identifier when they are Relationships.\n+ For Individuals, the compact or absolute IRI of the label is returned.\n+\n+ For all other properties the super method is called as a fallback.\n+ \"\"\"\n+ if type(prop) == Relation or type(prop) == OrderedRelation:\n+ props = list()\n+ for _, item in iterate(value):\n+ from .model import Relationship\n+ if isinstance(item, Relationship):\n+ item = item.model\n+\n+ props.append(self.ori_uri(item))\n+\n+ if len(props) == 1:\n+ return props[0]\n+ return props\n+\n+ return super(PostgresSerializer, self).serialize_prop(prop, value)\n+\n+ def ori_uri(self, item):\n+ \"\"\"Gets the short identifier of a resource\"\"\"\n+ return item.get_short_identifier()\n+\n \n class RdfSerializer(BaseSerializer):\n- \"\"\"The `Rdfserializer` create a graph and add the properties as Rdf triples.\n+ \"\"\"The `Rdfserializer` create a graph and adds the properties as Rdf triples.\n \n This uses rdflib to create a graph which can be serialized to the various\n formats that rdflib supports.\"\"\"\n \n def __init__(self):\n- \"\"\"Set all properties in the Neo4j to be fully qualified.\"\"\"\n+ \"\"\"Set all properties to be fully qualified.\"\"\"\n self.g = Graph()\n super(RdfSerializer, self).__init__('absolute')\n \n@@ -147,8 +190,7 @@ def deflate(self, model_object, props, rels):\n override=False\n )\n \n- s = URIRef('{}{}'.format(Ori.uri,\n- model_object.get_ori_identifier()))\n+ s = URIRef('{}{}'.format(Ori.uri, model_object.get_ori_identifier()))\n p = URIRef('{}type'.format(Rdf.uri))\n o = URIRef(self.uri_format(model_object))\n self.g.add((s, p, o,))\n@@ -186,7 +228,7 @@ def serialize(self, model_object, format='turtle'):\n def serialize_prop(self, prop, value):\n \"\"\"Calls the super method and applies rdflib specific logic on it.\n \n- Most properties will returned as a rdflib `Literal`. Relations will be\n+ Most properties will be returned as a rdflib `Literal`. Relations will be\n iterated and returned as `URIRef`.\n \"\"\"\n serialized = super(RdfSerializer, self).serialize_prop(prop, value)\n@@ -241,17 +283,11 @@ def serialize_prop(self, prop, value):\n if type(prop) == Relation or type(prop) == OrderedRelation:\n props = list()\n for _, item in iterate(value):\n- from .model import Relationship, Individual\n+ from .model import Relationship\n if isinstance(item, Relationship):\n item = item.model\n \n- if isinstance(item, Individual):\n- if self.uri_format_type == 'compact':\n- props.append(item.compact_uri())\n- else:\n- props.append(item.absolute_uri())\n- else:\n- props.append(self.ori_uri(item))\n+ props.append(self.ori_uri(item))\n \n if len(props) == 1:\n return props[0]\n@@ -260,8 +296,7 @@ def serialize_prop(self, prop, value):\n return super(JsonSerializer, self).serialize_prop(prop, value)\n \n def ori_uri(self, item):\n- \"\"\"Creates a full uri to an ori resource since json doesn't do prefixes.\n- \"\"\"\n+ \"\"\"Creates a full uri to an ori resource since json doesn't do prefixes.\"\"\"\n return str(item.get_ori_identifier())\n \n \n@@ -284,15 +319,14 @@ def serialize(self, model_object):\n \n deflated = self.deflate(model_object, props=True, rels=True)\n deflated['@context'] = {k: v for k, v in context.items() if k in deflated}\n- deflated['@context']['ori_identifier'] = '@id'\n deflated['@context']['@base'] = Ori.uri\n deflated['@context'][model_object.verbose_name()] = model_object.absolute_uri()\n+ deflated['@id'] = model_object.get_short_identifier()\n deflated['@type'] = model_object.verbose_name()\n return deflated\n \n def serialize_prop(self, prop, value):\n- \"\"\"Serializes all OrderedRelation props with an @list attribute\n- \"\"\"\n+ \"\"\"Serializes all OrderedRelation props with an @list attribute.\"\"\"\n serialized = super(JsonLDSerializer, self).serialize_prop(prop, value)\n \n if type(prop) == OrderedRelation:\ndiff --git a/ocd_backend/pipeline.py b/ocd_backend/pipeline.py\n--- a/ocd_backend/pipeline.py\n+++ b/ocd_backend/pipeline.py\n@@ -9,18 +9,18 @@\n from ocd_backend.es import elasticsearch as es\n from ocd_backend.exceptions import ConfigurationError\n from ocd_backend.log import get_source_logger\n-from ocd_backend.models import Run\n from ocd_backend.utils.misc import load_object, propagate_chain_get\n \n logger = get_source_logger('pipeline')\n \n \n+@celery_app.task(autoretry_for=(Exception,), retry_backoff=True)\n def setup_pipeline(source_definition):\n- logger.info('Starting pipeline for source: %s' % source_definition.get('id'))\n+ logger.debug('[%s] Starting pipeline for source: %s' % (source_definition['sitename'], source_definition.get('id')))\n \n # index_name is an alias of the current version of the index\n index_alias = '{prefix}_{index_name}'.format(\n- prefix=settings.DEFAULT_INDEX_PREFIX,\n+ prefix=source_definition.get('es_prefix', settings.DEFAULT_INDEX_PREFIX),\n index_name=source_definition.get('index_name',\n source_definition.get('id'))\n )\n@@ -59,11 +59,7 @@ def setup_pipeline(source_definition):\n 'index_alias': index_alias\n }\n \n- logger.debug('Starting run with identifier %s' % params['run_identifier'])\n-\n- #run = Run(RunIdentifier, params['run_identifier'], 'ori')\n- #run.save()\n- #params['run_node'] = run\n+ logger.debug('[%s] Starting run with identifier %s' % (source_definition['sitename'], params['run_identifier']))\n \n celery_app.backend.set(params['run_identifier'], 'running')\n run_identifier_chains = '{}_chains'.format(params['run_identifier'])\n@@ -74,7 +70,6 @@ def setup_pipeline(source_definition):\n \n pipeline_definitions = {}\n pipeline_extractors = {}\n- pipeline_extensions = {}\n pipeline_transformers = {}\n pipeline_enrichers = {}\n pipeline_loaders = {}\n@@ -83,7 +78,7 @@ def setup_pipeline(source_definition):\n if 'id' not in pipeline:\n raise ConfigurationError(\"Each pipeline must have an id field.\")\n \n- # adjusted source definitionsv per pipeline. This way you can for\n+ # adjusted source definitions per pipeline. This way you can for\n # example change the index on a pipeline basis\n pipeline_definitions[pipeline['id']] = deepcopy(source_definition)\n pipeline_definitions[pipeline['id']].update(pipeline)\n@@ -92,23 +87,18 @@ def setup_pipeline(source_definition):\n pipeline_extractors[pipeline['id']] = load_object(\n pipeline_definitions[pipeline['id']]['extractor'])\n \n- pipeline_extensions[pipeline['id']] = [\n- load_object(cls) for cls in\n- pipeline_definitions[pipeline['id']].get('extensions', [])]\n-\n- if pipeline.get('transformer'):\n- pipeline_transformers[pipeline['id']] = load_object(\n- pipeline['transformer'])()\n+ pipeline_transformers[pipeline['id']] = load_object(\n+ pipeline_definitions[pipeline['id']]['transformer'])\n \n pipeline_enrichers[pipeline['id']] = [\n- (load_object(enricher[0])(), enricher[1] or {}) for enricher in\n+ (load_object(enricher[0]), enricher[1] or {}) for enricher in\n pipeline_definitions[pipeline['id']].get('enrichers', [])]\n \n pipeline_loaders[pipeline['id']] = list()\n for cls in pipeline_definitions[pipeline['id']].get('loaders', None) or \\\n [pipeline_definitions[pipeline['id']].get('loader', None)]:\n if cls:\n- pipeline_loaders[pipeline['id']].append(load_object(cls)())\n+ pipeline_loaders[pipeline['id']].append(load_object(cls))\n \n result = None\n for pipeline in pipelines:\n@@ -124,17 +114,6 @@ def setup_pipeline(source_definition):\n set_name=run_identifier_chains,\n value=params['chain_id'])\n \n- # Remaining extractors\n- for extension in pipeline_extensions[pipeline['id']]:\n- step_chain.append(extension().s(\n- *item,\n- source_definition=pipeline_definitions[pipeline['id']],\n- **params\n- )\n- )\n- # Prevent old item being passed down to next steps\n- item = []\n-\n # Transformers\n if pipeline_transformers.get(pipeline['id']):\n step_chain.append(pipeline_transformers[pipeline['id']].s(\n@@ -170,21 +149,23 @@ def setup_pipeline(source_definition):\n logger.warning('KeyboardInterrupt received. Stopping the program.')\n exit()\n except Exception, e:\n- logger.error('An exception has occured in the \"{extractor}\" extractor.'\n- ' Setting status of run identifier \"{run_identifier}\" to '\n- '\"error\":\\n{message}'\n+ logger.error('[{site_name}] Pipeline has failed. Setting status of '\n+ 'run identifier \"{run_identifier}\" to \"error\":\\n{message}'\n .format(index=params['new_index_name'],\n run_identifier=params['run_identifier'],\n extractor=pipeline_extractors[pipeline['id']],\n message=e,\n+ site_name=source_definition['sitename'],\n )\n )\n \n celery_app.backend.set(params['run_identifier'], 'error')\n+\n+ # Reraise the exception so celery can autoretry\n raise\n \n celery_app.backend.set(params['run_identifier'], 'done')\n if result and source_definition.get('wait_until_finished'):\n # Wait for last task chain to end before continuing\n- logger.info(\"Waiting for last chain to finish\")\n+ logger.info(\"[%s] Waiting for last chain to finish\" % source_definition['sitename'])\n propagate_chain_get(result)\ndiff --git a/ocd_backend/settings.py b/ocd_backend/settings.py\n--- a/ocd_backend/settings.py\n+++ b/ocd_backend/settings.py\n@@ -33,8 +33,9 @@\n # Use this timezone as default for timezone unaware dates\n TIMEZONE = 'Europe/Amsterdam'\n \n-fast_exchange = Exchange('fast', type='direct')\n-slow_exchange = Exchange('slow', type='direct')\n+transformers_exchange = Exchange('transformers', type='direct')\n+enrichers_exchange = Exchange('enrichers', type='direct')\n+loaders_exchange = Exchange('loaders', type='direct')\n \n CELERY_CONFIG = {\n 'BROKER_URL': REDIS_URL,\n@@ -56,33 +57,39 @@\n 'CELERY_REDIRECT_STDOUTS_LEVEL': 'INFO',\n 'CELERY_ROUTES': {\n 'ocd_backend.transformers.*': {\n- 'queue': 'slow',\n- 'routing_key': 'slow',\n+ 'queue': 'transformers',\n+ 'routing_key': 'transformers',\n 'priority': 9,\n },\n 'ocd_backend.enrichers.*': {\n- 'queue': 'slow',\n- 'routing_key': 'slow',\n+ 'queue': 'enrichers',\n+ 'routing_key': 'enrichers',\n 'priority': 6,\n },\n 'ocd_backend.loaders.*': {\n- 'queue': 'fast',\n- 'routing_key': 'fast',\n+ 'queue': 'loaders',\n+ 'routing_key': 'loaders',\n 'priority': 3,\n },\n 'ocd_backend.tasks.*': {\n- 'queue': 'fast',\n- 'routing_key': 'fast',\n+ 'queue': 'loaders',\n+ 'routing_key': 'loaders',\n+ 'priority': 0,\n+ },\n+ 'ocd_backend.pipeline.*': {\n+ 'queue': 'loaders',\n+ 'routing_key': 'loaders',\n 'priority': 0,\n },\n },\n 'CELERY_QUEUES': (\n- Queue('fast', fast_exchange, routing_key='fast'),\n- Queue('slow', slow_exchange, routing_key='slow'),\n+ Queue('transformers', transformers_exchange, routing_key='transformers'),\n+ Queue('enrichers', enrichers_exchange, routing_key='enrichers'),\n+ Queue('loaders', loaders_exchange, routing_key='loaders'),\n ),\n- 'CELERY_DEFAULT_QUEUE': 'slow',\n- 'CELERY_DEFAULT_EXCHANGE': 'slow',\n- 'CELERY_DEFAULT_ROUTING_KEY': 'slow',\n+ 'CELERY_DEFAULT_QUEUE': 'transformers',\n+ 'CELERY_DEFAULT_EXCHANGE': 'transformers',\n+ 'CELERY_DEFAULT_ROUTING_KEY': 'transformers',\n }\n \n \n@@ -160,22 +167,29 @@ def process_log_record(self, log_record):\n 'level': 'INFO',\n 'propagate': False,\n },\n+ 'celery.worker': {\n+ 'handlers': ['default'],\n+ 'level': 'DEBUG',\n+ 'propagate': False,\n+ 'filters': ['set_debug']\n+ },\n 'celery.worker.strategy': {\n 'handlers': ['default'],\n 'level': 'INFO',\n 'propagate': False,\n 'filters': ['set_debug']\n },\n- 'celery.app.trace': {\n+ 'celery.worker.control': {\n 'handlers': ['default'],\n- 'level': 'INFO',\n+ 'level': 'DEBUG',\n 'propagate': False,\n 'filters': ['set_debug']\n },\n- 'neo4j.bolt': {\n+ 'celery.app.trace': {\n 'handlers': ['default'],\n- 'level': 'WARNING',\n+ 'level': 'INFO',\n 'propagate': False,\n+ # 'filters': ['set_debug']\n },\n 'httpstream': {\n 'handlers': ['default'],\n@@ -253,12 +267,6 @@ def process_log_record(self, log_record):\n ELASTICSEARCH_HOST = os.getenv('ELASTICSEARCH_HOST', 'elastic')\n ELASTICSEARCH_PORT = os.getenv('ELASTICSEARCH_PORT', 9200)\n \n-NEO4J_URL = os.getenv('NEO4J_URL', 'bolt://neo4j:7687')\n-try:\n- NEO4J_USER, NEO4J_PASSWORD = os.getenv('NEO4J_AUTH', 'neo4j/development').split('/')\n-except (ValueError, AttributeError):\n- NEO4J_USER, NEO4J_PASSWORD = None, None\n-\n # The path of the directory used to store static files\n DATA_DIR_PATH = os.path.join(PROJECT_PATH, 'data')\n \n@@ -280,12 +288,6 @@ def process_log_record(self, log_record):\n # The User-Agent that is used when retrieving data from external sources\n USER_AGENT = 'Open Raadsinformatie/%s.%s (+http://www.openraadsinformatie.nl/)' % (MAJOR_VERSION, MINOR_VERSION)\n \n-# URL where of the API instance that should be used for management commands\n-# Should include API version and a trailing slash.\n-# Can be overridden in the CLI when required, for instance when the user wants\n-# to download dumps from another API instance than the one hosted by OpenState\n-API_URL = os.getenv('API_URL', 'http://frontend:5000/v%s/' % MAJOR_VERSION)\n-\n # The endpoint for the iBabs API\n IBABS_WSDL = u'https://www.mijnbabs.nl/iBabsWCFService/Public.svc?singleWsdl'\n \n@@ -296,6 +298,20 @@ def process_log_record(self, log_record):\n PDF_TO_TEXT = u'pdftotext'\n PDF_MAX_MEDIABOX_PIXELS = 5000000\n \n+# Kafka settings for DeltaLoader\n+KAFKA_HOST = os.getenv('KAFKA_HOST', 'localhost:9092')\n+KAFKA_USERNAME = os.getenv('KAFKA_USERNAME')\n+KAFKA_PASSWORD = os.getenv('KAFKA_PASSWORD')\n+KAFKA_SESSION_TIMEOUT = os.getenv('KAFKA_SESSION_TIMEOUT', 5000)\n+KAFKA_MESSAGE_KEY = os.getenv('KAFKA_MESSAGE_KEY', 'ori_delta_message')\n+KAFKA_TOPIC = os.getenv('KAFKA_TOPIC', 'ori-delta')\n+\n+# Postgres settings\n+POSTGRES_HOST = os.getenv('POSTGRES_HOST', 'postgres:5432')\n+POSTGRES_DATABASE = os.getenv('POSTGRES_DATABASE', 'ori')\n+POSTGRES_USERNAME = os.getenv('POSTGRES_USERNAME', 'ori_postgres_user')\n+POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD', 'ori_postgres_password')\n+\n # Allow any settings to be defined in local_settings.py which should be\n # ignored in your version control system allowing for settings to be\n # defined per machine.\ndiff --git a/ocd_backend/tasks.py b/ocd_backend/tasks.py\n--- a/ocd_backend/tasks.py\n+++ b/ocd_backend/tasks.py\n@@ -9,7 +9,7 @@\n class BaseCleanup(celery_app.Task):\n ignore_result = True\n \n- def run(self, *args, **kwargs):\n+ def start(self, *args, **kwargs):\n run_identifier = kwargs.get('run_identifier')\n run_identifier_chains = '{}_chains'.format(run_identifier)\n self._remove_chain(run_identifier_chains, kwargs.get('chain_id'))\n@@ -74,3 +74,13 @@ def run_finished(self, run_identifier, **kwargs):\n class DummyCleanup(BaseCleanup):\n def run_finished(self, run_identifier, **kwargs):\n log.info('Finished run {}.'.format(run_identifier))\n+\n+\n+@celery_app.task(bind=True, base=CleanupElasticsearch, autoretry_for=(Exception,), retry_backoff=True)\n+def cleanup_elasticsearch(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\n+\n+\n+@celery_app.task(bind=True, base=DummyCleanup, autoretry_for=(Exception,), retry_backoff=True)\n+def dummy_cleanup(self, *args, **kwargs):\n+ return self.start(*args, **kwargs)\ndiff --git a/ocd_backend/transformers/__init__.py b/ocd_backend/transformers/__init__.py\n--- a/ocd_backend/transformers/__init__.py\n+++ b/ocd_backend/transformers/__init__.py\n@@ -5,61 +5,17 @@\n from ocd_backend import celery_app\n from ocd_backend.exceptions import NoDeserializerAvailable\n from ocd_backend.mixins import OCDBackendTaskFailureMixin\n-from ocd_backend.utils.misc import load_object\n \n \n class BaseTransformer(OCDBackendTaskFailureMixin, celery_app.Task):\n \n- def run(self, *args, **kwargs):\n- \"\"\"Start transformation of a single item.\n-\n- This method is called by the extractor and expects args to\n- contain the content-type and the original item (as a string).\n- Kwargs should contain the ``source_definition`` dict.\n-\n- :returns: the output of :py:meth:`~BaseTransformer.transform_item`\n- \"\"\"\n- self.source_definition = kwargs['source_definition']\n- self.item_class = load_object(self.source_definition['item'])\n- self.run_node = kwargs.get('run_node')\n-\n- item = self.deserialize_item(*args) # pylint: disable=no-value-for-parameter\n- return self.transform_item(*args, item=item) # pylint: disable=no-value-for-parameter\n-\n @staticmethod\n- def deserialize_item(raw_item_content_type, raw_item):\n- if raw_item_content_type == 'application/json':\n+ def deserialize_item(content_type, raw_item):\n+ if content_type == 'application/json':\n return json.loads(raw_item)\n- elif raw_item_content_type == 'application/xml':\n+ elif content_type == 'application/xml':\n return etree.XML(raw_item)\n- elif raw_item_content_type == 'application/html':\n+ elif content_type == 'application/html':\n return etree.HTML(raw_item)\n else:\n- raise NoDeserializerAvailable('Item with content_type %s'\n- % raw_item_content_type)\n-\n- def transform_item(self, raw_item_content_type, raw_item, item):\n- \"\"\"Transforms a single item.\n-\n- The output of this method serves as input of a loader.\n-\n- :type raw_item_content_type: string\n- :param raw_item_content_type: the content-type of the data\n- retrieved from the source (e.g. ``application/json``)\n- :type raw_item: string\n- :param raw_item: the data in it's original format, as retrieved\n- from the source (as a string)\n- :type item: dict\n- :param item: the deserialized item\n- :returns: a tuple containing the new object id, the item structured\n- for the combined index (as a dict) and the item item structured\n- for the source specific index.\n- \"\"\"\n-\n- transformed_item = self.item_class(source_definition=self.source_definition,\n- data_content_type=raw_item_content_type,\n- data=raw_item,\n- item=item,\n- run_node=self.run_node)\n-\n- return transformed_item.object_data\n+ raise NoDeserializerAvailable('Item with content_type %s' % content_type)\ndiff --git a/ocd_backend/transformers/ggm.py b/ocd_backend/transformers/ggm.py\ndeleted file mode 100644\n--- a/ocd_backend/transformers/ggm.py\n+++ /dev/null\n@@ -1,52 +0,0 @@\n-from lxml import etree\n-\n-from ocd_backend.log import get_source_logger\n-from ocd_backend.transformers import BaseTransformer\n-from ocd_backend.utils.misc import load_object, strip_namespaces\n-\n-log = get_source_logger('transformer')\n-\n-\n-class GegevensmagazijnTransformer(BaseTransformer):\n- def run(self, *args, **kwargs):\n- args = args[0]\n-\n- self.source_definition = kwargs['source_definition']\n- item = self.deserialize_item(*args)\n-\n- return self.transform_item(*args, item=strip_namespaces(item))\n-\n- def transform_item(self, raw_item_content_type, raw_item, item,\n- class_name=False):\n-\n- if not class_name:\n- class_name = item.xpath(\"local-name()\")\n-\n- if class_name in self.source_definition['mapping']:\n- item_source = self.source_definition['mapping'][class_name]\n- item_class = item_source['item']\n- else:\n- log.info('Skipping %s, does not exist in mapping' % class_name)\n- return []\n-\n- items = list()\n- if 'sub_items' in item_source:\n- for key, path in item_source['sub_items'].items():\n- for sub_item in item.xpath(path):\n- items += self.transform_item(raw_item_content_type,\n- etree.tostring(sub_item),\n- sub_item, class_name=key)\n-\n- item_class = load_object(item_class)\n- item = item_class(self.source_definition, raw_item_content_type,\n- raw_item, item, unicode(item_source['doc_type']))\n-\n- self.add_resolveable_media_urls(item)\n-\n- return [(\n- item.get_combined_object_id(),\n- item.get_object_id(),\n- item.get_combined_index_doc(),\n- item.get_index_doc(),\n- item.doc_type\n- )] + items\ndiff --git a/ocd_backend/transformers/goapi_committee.py b/ocd_backend/transformers/goapi_committee.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/goapi_committee.py\n@@ -0,0 +1,39 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('goapi_committee')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def committee_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'gemeenteoplossingen',\n+ 'collection': 'committee',\n+ }\n+\n+ committee = Organization(original_item['id'], **source_defaults)\n+ committee.canonical_id = original_item['id']\n+ committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ committee.name = original_item['name']\n+ if original_item['name'] == 'Gemeenteraad':\n+ committee.classification = 'Council'\n+ else:\n+ committee.classification = 'Committee'\n+\n+ committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ committee.save()\n+ return committee\ndiff --git a/ocd_backend/transformers/goapi_meeting.py b/ocd_backend/transformers/goapi_meeting.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/goapi_meeting.py\n@@ -0,0 +1,172 @@\n+import iso8601\n+\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('goapi_meeting')\n+\n+\n+class GOAPITransformer(BaseTransformer):\n+ def get_current_permalink(self, original_item):\n+ api_version = self.source_definition.get('api_version', 'v1')\n+ base_url = '%s/%s' % (\n+ self.source_definition['base_url'], api_version,)\n+\n+ return u'%s/meetings/%i' % (base_url, original_item[u'id'],)\n+\n+ def get_documents_as_media_urls(self, original_item):\n+ current_permalink = self.get_current_permalink(original_item)\n+\n+ output = []\n+ for document in original_item.get('documents', []):\n+ # sleep(1)\n+ url = u\"%s/documents/%s\" % (current_permalink, document['id'])\n+ output.append({\n+ 'url': url,\n+ 'note': document[u'filename']})\n+ return output\n+\n+\n+# noinspection DuplicatedCode\n+@celery_app.task(bind=True, base=GOAPITransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def meeting_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'gemeenteoplossingen',\n+ 'collection': 'meeting',\n+ }\n+\n+ event = Meeting(original_item[u'id'], **source_defaults)\n+ event.canonical_id = original_item[u'id']\n+ event.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # dates in v1 have a time in them and in v2 they don't\n+ if ':' in original_item['date']:\n+ start_date = original_item['date']\n+ else:\n+ start_date = \"%sT%s:00\" % (\n+ original_item['date'],\n+ original_item.get('startTime', '00:00',))\n+\n+ event.start_date = iso8601.parse_date(start_date)\n+ event.end_date = event.start_date # ?\n+\n+ # Some meetings are missing a name because some municipalities do not always fill the description field.\n+ # In this case we create the name from the name of the commission and the start date of the meeting.\n+ # See issue #124.\n+ if original_item['description'] == '':\n+ event.name = 'Vergadering - %s - %s' % (original_item[u'dmu'][u'name'], event.start_date)\n+ else:\n+ event.name = original_item[u'description']\n+\n+ event.classification = [u'Agenda']\n+ event.description = original_item[u'description']\n+\n+ try:\n+ event.location = original_item[u'location'].strip()\n+ except (AttributeError, KeyError):\n+ pass\n+\n+ # Attach the meeting to the municipality node\n+ event.organization = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # Attach the meeting to the committee node. GO always lists either the name of the committee or 'Raad'\n+ # if it is a non-committee meeting so we can attach it to a committee node without any extra checks.\n+ event.committee = Organization(original_item[u'dmu'][u'id'],\n+ source=self.source_definition['key'],\n+ supplier='gemeenteoplossingen',\n+ collection='committee')\n+ event.committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+ event.committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # object_model['last_modified'] = iso8601.parse_date(\n+ # original_item['last_modified'])\n+\n+ # TODO: This is untested so we log any cases that are not the default\n+ if 'canceled' in original_item and original_item['canceled']:\n+ log.info('Found a GOAPI event with status EventCancelled: %s' % str(event.values))\n+ event.status = EventCancelled\n+ elif 'inactive' in original_item and original_item['inactive']:\n+ log.info('Found a GOAPI event with status EventUnconmfirmed: %s' % str(event.values))\n+ event.status = EventUnconfirmed\n+ else:\n+ event.status = EventConfirmed\n+\n+ event.agenda = []\n+ for item in original_item.get('items', []):\n+ if not item['sortorder']:\n+ continue\n+\n+ agendaitem = AgendaItem(item['id'],\n+ source=self.source_definition['key'],\n+ supplier='gemeenteoplossingen',\n+ collection='agenda_item')\n+ agendaitem.canonical_id = item['id']\n+ agendaitem.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ agendaitem.description = item['description']\n+ agendaitem.name = '%s: %s' % (item['number'], item['title'],)\n+ agendaitem.position = item['sortorder']\n+ agendaitem.parent = event\n+ agendaitem.start_date = event.start_date\n+ agendaitem.attachment = []\n+\n+ for doc in self.get_documents_as_media_urls(original_item):\n+ attachment = MediaObject(doc['url'].rpartition('/')[2],\n+ source=self.source_definition['key'],\n+ supplier='gemeenteoplossingen',\n+ collection='attachment')\n+ attachment.canonical_iri = doc['url']\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment.identifier_url = doc['url'] # Trick to use the self url for enrichment\n+ attachment.original_url = doc['url']\n+ attachment.name = doc['note']\n+ attachment.isReferencedBy = agendaitem\n+ agendaitem.attachment.append(attachment)\n+\n+ event.agenda.append(agendaitem)\n+\n+ event.attachment = []\n+ for doc in self.get_documents_as_media_urls(original_item):\n+ attachment = MediaObject(doc['url'].rpartition('/')[2],\n+ source=self.source_definition['key'],\n+ supplier='gemeenteoplossingen',\n+ collection='attachment')\n+ attachment.canonical_iri = doc['url']\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment.identifier_url = doc['url'] # Trick to use the self url for enrichment\n+ attachment.original_url = doc['url']\n+ attachment.name = doc['note']\n+ attachment.isReferencedBy = event\n+ event.attachment.append(attachment)\n+\n+ event.save()\n+ return event\ndiff --git a/ocd_backend/transformers/gv.py b/ocd_backend/transformers/gv.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/gv.py\n@@ -0,0 +1,292 @@\n+from datetime import datetime\n+from hashlib import sha1\n+\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('greenvalley')\n+\n+\n+class GreenValleyTransformer(BaseTransformer):\n+ def __init__(self, *args, **kwargs):\n+ self.classification_mapping = {\n+ 'agenda': 'Agenda',\n+ 'agendapage': 'Agendapunt',\n+ 'bestuurlijkstuk': 'Bestuurlijk stuk',\n+ 'notule': 'Verslag',\n+ 'ingekomenstuk': 'Ingekomen stuk',\n+ 'antwoordstuk': 'Antwoord' # ?\n+ }\n+\n+ def get_meeting_dates(self, meeting):\n+ \"\"\"Determine meeting start and end dates.\"\"\"\n+\n+ start_date = None\n+ end_date = None\n+\n+ if meeting.get(u'bis_vergaderdatum', u'').strip() != u'':\n+ start_date = datetime.fromtimestamp(\n+ float(meeting[u'bis_vergaderdatum']) +\n+ (float(meeting.get(u'bis_starttijduren', '0') or '0') * 3600) +\n+ (float(meeting.get(u'bis_starttijdminuten', '0') or '0') * 60))\n+ end_date = datetime.fromtimestamp(\n+ float(meeting[u'bis_vergaderdatum']) +\n+ (float(meeting.get(u'bis_eindtijduren', '0') or '0') * 3600) +\n+ (float(meeting.get(u'bis_eindtijdminuten', '0') or '0') * 60))\n+ elif u'publishdate' in meeting:\n+ start_date = datetime.fromtimestamp(\n+ float(meeting[u'publishdate']))\n+ end_date = datetime.fromtimestamp(\n+ float(meeting[u'publishdate']))\n+\n+ return start_date, end_date\n+\n+ def _get_documents_as_media_urls(self, original_item):\n+ media_urls = {}\n+ if u'attachmentlist' in original_item:\n+ for att_key, att in original_item.get(u'attachmentlist', {}).iteritems():\n+ if att[u'objecttype'] == 'AGENDAPAGE':\n+ continue\n+\n+ url = \"https://staten.zuid-holland.nl/dsresource?objectid=%s\" % (\n+ att[u'objectid'].encode('utf8'),)\n+\n+ doc_hash = unicode(\n+ sha1(url + ':' + att[u'objectname'].encode('utf8')).hexdigest())\n+ media_urls[doc_hash] = {\n+ \"note\": att[u'objectname'],\n+ \"original_url\": url\n+ }\n+ else:\n+ default = original_item['default']\n+ if default[u'objecttype'] != 'AGENDAPAGE':\n+ url = \"https://staten.zuid-holland.nl/dsresource?objectid=%s\" % (\n+ default[u'objectid'].encode('utf8'),)\n+\n+ doc_hash = unicode(\n+ sha1(url + ':' + default[u'objectname'].encode('utf8')).hexdigest()\n+ )\n+ media_urls[doc_hash] = {\n+ \"note\": default[u'objectname'],\n+ \"original_url\": url\n+ }\n+\n+ if media_urls:\n+ return media_urls.values()\n+ else:\n+ return []\n+ \n+\n+@celery_app.task(bind=True, base=GreenValleyTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def greenvalley_report(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'gemeenteoplossingen',\n+ 'collection': 'report',\n+ }\n+\n+ meeting = original_item[u'default']\n+\n+ event = Meeting(meeting[u'objectid'], **source_defaults)\n+ event.canonical_id = meeting[u'objectid']\n+ event.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ event.start_date, event.end_date = self.get_meeting_dates(meeting)\n+\n+ event.name = meeting[u'objectname']\n+ event.classification = [u'Agenda']\n+ try:\n+ event.classification = [unicode(\n+ self.classification_mapping[meeting[u'objecttype'].lower()])]\n+ except LookupError:\n+ event.classification = [unicode(\n+ meeting[u'objecttype'].capitalize())]\n+ event.description = meeting[u'objectname']\n+\n+ try:\n+ event.location = meeting[u'bis_locatie'].strip()\n+ except (AttributeError, KeyError):\n+ pass\n+\n+ event.organization = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ if 'bis_orgaan' in meeting:\n+ if meeting['bis_orgaan'] != '':\n+ event.committee = Organization(meeting[u'bis_orgaan'],\n+ source=self.source_definition['key'],\n+ supplier='greenvalley',\n+ collection='committee')\n+ event.committee.canonical_id = meeting['bis_orgaan']\n+ event.committee.name = meeting['bis_orgaan']\n+ event.committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+ event.committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ # object_model['last_modified'] = iso8601.parse_date(\n+ # original_item['last_modified'])\n+\n+ # if original_item['canceled']:\n+ # event.status = EventCancelled()\n+ # elif original_item['inactive']:\n+ # event.status = EventUnconfirmed()\n+ # else:\n+ # event.status = EventConfirmed()\n+ event.status = EventConfirmed\n+\n+ event.attachment = []\n+ for doc in self._get_documents_as_media_urls(original_item):\n+ attachment = MediaObject(doc['original_url'].rpartition('/')[2].split('=')[1],\n+ source=self.source_definition['key'],\n+ supplier='greenvalley',\n+ collection='attachment')\n+ attachment.canonical_iri = doc['original_url']\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ attachment.identifier_url = doc['original_url'] # Trick to use the self url for enrichment\n+ attachment.original_url = doc['original_url']\n+ attachment.name = doc['note']\n+ attachment.isReferencedBy = event\n+ event.attachment.append(attachment)\n+\n+ event.save()\n+ return event\n+\n+\n+@celery_app.task(bind=True, base=GreenValleyTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def meeting_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'greenvalley',\n+ 'collection': 'meeting',\n+ }\n+\n+ meeting = original_item[u'default']\n+\n+ event = Meeting(meeting[u'objectid'], **source_defaults)\n+ event.canonical_id = meeting[u'objectid']\n+ event.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ event.start_date, event.end_date = self.get_meeting_dates(meeting)\n+\n+ event.name = meeting[u'objectname']\n+ event.classification = [u'Agenda']\n+ try:\n+ event.classification = [unicode(\n+ self.classification_mapping[meeting[u'objecttype'].lower()])]\n+ except LookupError:\n+ event.classification = [unicode(\n+ meeting[u'objecttype'].capitalize())]\n+ event.description = meeting[u'objectname']\n+\n+ try:\n+ event.location = meeting[u'bis_locatie'].strip()\n+ except (AttributeError, KeyError):\n+ pass\n+\n+ event.organization = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ if 'bis_orgaan' in meeting:\n+ if meeting['bis_orgaan'] != '':\n+ event.committee = Organization(meeting[u'bis_orgaan'],\n+ source=self.source_definition['key'],\n+ supplier='greenvalley',\n+ collection='committee')\n+ event.committee.canonical_id = meeting['bis_orgaan']\n+ event.committee.name = meeting['bis_orgaan']\n+ event.committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+ event.committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ # object_model['last_modified'] = iso8601.parse_date(\n+ # original_item['last_modified'])\n+\n+ # if original_item['canceled']:\n+ # event.status = EventCancelled\n+ # elif original_item['inactive']:\n+ # event.status = EventUnconfirmed\n+ # else:\n+ # event.status = EventConfirmed\n+ event.status = EventConfirmed\n+\n+ event.attachment = []\n+ for doc in self._get_documents_as_media_urls(original_item):\n+ attachment = MediaObject(doc['original_url'].rpartition('/')[2].split('=')[1],\n+ source=self.source_definition['key'],\n+ supplier='greenvalley',\n+ collection='attachment')\n+ attachment.canonical_iri = doc['original_url']\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ attachment.identifier_url = doc['original_url'] # Trick to use the self url for enrichment\n+ attachment.original_url = doc['original_url']\n+ attachment.name = doc['note']\n+ attachment.isReferencedBy = event\n+ event.attachment.append(attachment)\n+\n+ event.agenda = []\n+\n+ children = []\n+ for a, v in original_item.get(u'SETS', {}).iteritems():\n+ if v[u'objecttype'].lower() == u'agendapage':\n+ result = {u'default': v}\n+ children.append(result)\n+\n+ for item in children:\n+ agenda_item = item[u'default']\n+ agendaitem = AgendaItem(agenda_item['objectid'],\n+ source=self.source_definition['key'],\n+ supplier='greenvalley',\n+ collection='agenda_item')\n+ agendaitem.canonical_id = agenda_item['objectid']\n+ agendaitem.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='province')\n+\n+ agendaitem.description = agenda_item[u'objectname']\n+ agendaitem.name = agenda_item[u'objectname']\n+ agendaitem.position = int(agenda_item['agendapagenumber'])\n+ agendaitem.parent = event\n+ # AgendaItem requires a start_date because it derives from Meeting\n+ agendaitem.start_date = event.start_date\n+\n+ event.agenda.append(agendaitem)\n+\n+ event.save()\n+ return event\ndiff --git a/ocd_backend/transformers/ibabs_committee.py b/ocd_backend/transformers/ibabs_committee.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/ibabs_committee.py\n@@ -0,0 +1,42 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('ibabs_committee')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def committee_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'ibabs',\n+ 'collection': 'committee',\n+ }\n+\n+ committee = Organization(original_item['Id'], **source_defaults)\n+ committee.canonical_id = entity\n+ committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ committee.name = original_item['Meetingtype']\n+ committee.description = original_item['Abbreviation']\n+\n+ if 'sub' in original_item['Meetingtype']:\n+ committee.classification = u'Subcommittee'\n+ else:\n+ committee.classification = u'Committee'\n+\n+ # Attach the committee node to the municipality node\n+ committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ committee.save()\n+ return committee\ndiff --git a/ocd_backend/transformers/ibabs_meeting.py b/ocd_backend/transformers/ibabs_meeting.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/ibabs_meeting.py\n@@ -0,0 +1,263 @@\n+import re\n+\n+import iso8601\n+\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('ibabs_meeting')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def meeting_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'ibabs',\n+ 'collection': 'meeting',\n+ }\n+\n+ # Sometimes the meeting is contained in a sub-dictionary called 'Meeting'\n+ if 'Meeting' in original_item:\n+ meeting = original_item['Meeting']\n+ else:\n+ meeting = original_item\n+\n+ item = Meeting(meeting['Id'], **source_defaults)\n+ item.canonical_id = entity\n+ item.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ item.name = meeting['Meetingtype']\n+ item.chair = meeting['Chairman']\n+ item.location = meeting['Location']\n+ item.start_date = iso8601.parse_date(meeting['MeetingDate'], ).strftime(\"%s\")\n+\n+ # TODO: This is untested so we log any cases that are not the default\n+ if 'canceled' in meeting and meeting['canceled']:\n+ log.info('Found an iBabs event with status EventCancelled: %s' % str(item.values))\n+ item.status = EventCancelled\n+ elif 'inactive' in meeting and meeting['inactive']:\n+ log.info('Found an iBabs event with status EventUnconfirmed: %s' % str(item.values))\n+ item.status = EventUnconfirmed\n+ else:\n+ item.status = EventConfirmed\n+\n+ item.organization = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # Check if this is a committee meeting and if so connect it to the committee node.\n+ committee_designator = self.source_definition.get('committee_designator', 'commissie')\n+ if committee_designator in meeting['Meetingtype'].lower():\n+ # Attach the meeting to the committee node\n+ item.committee = Organization(meeting['MeetingtypeId'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='committee')\n+ item.committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ item.committee.name = meeting['Meetingtype']\n+ if 'sub' in meeting['MeetingtypeId']:\n+ item.committee.classification = u'Subcommittee'\n+ else:\n+ item.committee.classification = u'Committee'\n+\n+ # Re-attach the committee node to the municipality node\n+ item.committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ item.agenda = list()\n+ if meeting['MeetingItems'] and 'iBabsMeetingItem' in meeting['MeetingItems']:\n+ for i, mi in enumerate(meeting['MeetingItems']['iBabsMeetingItem'] or [], start=1):\n+ agenda_item = AgendaItem(mi['Id'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='agenda_item')\n+ agenda_item.canonical_id = mi['Id']\n+ agenda_item.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ agenda_item.parent = item\n+ agenda_item.name = mi['Title']\n+ agenda_item.start_date = item.start_date\n+\n+ if mi['Documents'] and 'iBabsDocument' in mi['Documents']:\n+ agenda_item.attachment = list()\n+ for document in mi['Documents']['iBabsDocument'] or []:\n+ attachment = MediaObject(document['Id'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='attachment')\n+ attachment.canonical_id = document['Id']\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment.identifier_url = 'ibabs/agenda_item/%s' % document['Id']\n+ attachment.original_url = document['PublicDownloadURL']\n+ attachment.size_in_bytes = document['FileSize']\n+ attachment.name = document['DisplayName']\n+ attachment.isReferencedBy = agenda_item\n+ agenda_item.attachment.append(attachment)\n+\n+ item.agenda.append(agenda_item)\n+\n+ item.invitee = list()\n+ if meeting['Invitees'] and 'iBabsUserBasic' in meeting['Invitees']:\n+ for invitee in meeting['Invitees']['iBabsUserBasic'] or []:\n+ invitee_item = Person(invitee['UniqueId'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='person')\n+ invitee_item.canonical_id = invitee['UniqueId']\n+ invitee_item.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+ item.invitee.append(invitee_item)\n+\n+ # Double check because sometimes 'EndTime' is in meeting but it is set to None\n+ if 'EndTime' in meeting and meeting['EndTime']:\n+ meeting_date, _, _ = meeting['MeetingDate'].partition('T')\n+ meeting_datetime = '%sT%s:00' % (meeting_date, meeting['EndTime'])\n+ item.end_date = iso8601.parse_date(meeting_datetime).strftime(\"%s\")\n+ else:\n+ item.end_date = iso8601.parse_date(meeting['MeetingDate'], ).strftime(\"%s\")\n+\n+ item.attachment = list()\n+ if meeting['Documents'] and 'iBabsDocument' in meeting['Documents']:\n+ for document in meeting['Documents']['iBabsDocument'] or []:\n+ attachment = MediaObject(document['Id'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='attachment')\n+ attachment.canonical_id = document['Id']\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment.identifier_url = 'ibabs/meeting/%s' % document['Id']\n+ attachment.original_url = document['PublicDownloadURL']\n+ attachment.size_in_bytes = document['FileSize']\n+ attachment.name = document['DisplayName']\n+ attachment.isReferencedBy = item\n+ item.attachment.append(attachment)\n+\n+ item.save()\n+ return item\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def report_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'ibabs',\n+ 'collection': 'report',\n+ }\n+\n+ report = CreativeWork(original_item['id'][0],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='report')\n+ report.canonical_id = original_item['id'][0]\n+ report.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ report_name = original_item['_ReportName'].split(r'\\s+')[0]\n+ report.classification = u'Report'\n+\n+ name_field = None\n+ try:\n+ name_field = self.source_definition['fields'][report_name]['name']\n+ except KeyError:\n+ for field in original_item.keys():\n+ # Search for things that look like title\n+ if field.lower()[0:3] == 'tit':\n+ name_field = field\n+ break\n+\n+ id_for_field = '%sIds' % (field,)\n+ if id_for_field in original_item and name_field is None:\n+ name_field = field\n+ break\n+\n+ report.name = original_item[name_field][0]\n+\n+ # Temporary binding reports to municipality as long as events and agendaitems are not\n+ # referenced in the iBabs API\n+ report.creator = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ try:\n+ name_field = self.source_definition['fields'][report_name]['description']\n+ report.description = original_item[name_field][0]\n+ except KeyError:\n+ try:\n+ report.description = original_item['_Extra']['Values']['Toelichting']\n+ except KeyError:\n+ pass\n+\n+ try:\n+ datum_field = self.source_definition['fields'][report_name]['start_date']\n+ except KeyError:\n+ datum_field = 'datum'\n+\n+ datum = None\n+ if datum_field in original_item:\n+ if isinstance(original_item[datum_field], list):\n+ datum = original_item[datum_field][0]\n+ else:\n+ datum = original_item[datum_field]\n+\n+ if datum is not None:\n+ # msgpack does not like microseconds for some reason.\n+ # no biggie if we disregard it, though\n+ report.start_date = iso8601.parse_date(re.sub(r'\\.\\d+\\+', '+', datum))\n+ report.end_date = iso8601.parse_date(re.sub(r'\\.\\d+\\+', '+', datum))\n+\n+ report.status = EventConfirmed\n+\n+ report.attachment = list()\n+ for document in original_item['_Extra']['Documents'] or []:\n+ attachment_file = MediaObject(document['Id'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='attachment')\n+ attachment_file.canonical_id = document['Id']\n+ attachment_file.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment_file.original_url = document['PublicDownloadURL']\n+ attachment_file.size_in_bytes = document['FileSize']\n+ attachment_file.name = document['DisplayName']\n+ attachment_file.isReferencedBy = report\n+ report.attachment.append(attachment_file)\n+\n+ report.save()\n+ return report\ndiff --git a/ocd_backend/transformers/ibabs_person.py b/ocd_backend/transformers/ibabs_person.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/ibabs_person.py\n@@ -0,0 +1,100 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('ibabs_person')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def person_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'ibabs',\n+ 'collection': 'person',\n+ }\n+\n+ person = Person(original_item['UserId'], **source_defaults)\n+ person.canonical_id = entity\n+ person.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ person.name = original_item['Name']\n+ person.family_name = original_item['LastName']\n+ person.biography = original_item['AboutMe']\n+ person.email = original_item['Email']\n+ person.phone = original_item['Phone']\n+\n+ municipality = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # The source ID for the municipality membership is constructed by combining the person's iBabs ID and the\n+ # key of the source\n+ municipality_membership_id = '%s_%s' % (original_item['UserId'], self.source_definition['key'])\n+ municipality_member = Membership(municipality_membership_id,\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='municipality_membership')\n+ municipality_member.canonical_id = entity\n+ municipality_member.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ municipality_member.organization = municipality\n+ municipality_member.member = person\n+\n+ # FunctionName is often set to 'None' in the source, in that case we fall back to 'Member'\n+ if original_item['FunctionName'] == 'None':\n+ municipality_member.role = 'Member'\n+ else:\n+ municipality_member.role = original_item['FunctionName']\n+\n+ person.member_of = [municipality_member]\n+\n+ if original_item['PoliticalPartyId']:\n+ # Currently there is no way to merge parties from the Allmanak with parties from ibabs because\n+ # they do not share any consistent identifiers, so new nodes will be created for parties that ibabs\n+ # persons are linked to. This causes ibabs sources that have persons to have duplicate party nodes.\n+ # These duplicate nodes are necessary to cover ibabs sources that have no persons, otherwise those\n+ # sources would not have any parties.\n+ party = Organization(original_item['PoliticalPartyId'],\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='party')\n+ party.canonical_id = original_item['PoliticalPartyId']\n+ party.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ party.name = original_item['PoliticalPartyName']\n+\n+ # The source ID for the party membership is constructed by combining the person's iBabs ID and the\n+ # name of the party\n+ party_membership_id = '%s_%s' % (original_item['UserId'], original_item['PoliticalPartyName'])\n+ party_member = Membership(party_membership_id,\n+ source=self.source_definition['key'],\n+ supplier='ibabs',\n+ collection='party_membership')\n+ party_member.canonical_id = entity\n+ party_member.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ party_member.organization = party\n+ party_member.member = person\n+ party_member.role = 'Member'\n+\n+ person.member_of.append(party_member)\n+\n+ person.save()\n+ return person\ndiff --git a/ocd_backend/transformers/notubiz_committee.py b/ocd_backend/transformers/notubiz_committee.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/notubiz_committee.py\n@@ -0,0 +1,39 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('notubiz_committee')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def committee_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'notubiz',\n+ 'collection': 'committee',\n+ }\n+\n+ committee = Organization(original_item['id'], **source_defaults)\n+ committee.canonical_iri = entity\n+ committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ committee.name = original_item['title']\n+ if original_item['title'] == 'Gemeenteraad':\n+ committee.classification = 'Council'\n+ else:\n+ committee.classification = 'Committee'\n+\n+ committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ committee.save()\n+ return committee\ndiff --git a/ocd_backend/transformers/notubiz_meeting.py b/ocd_backend/transformers/notubiz_meeting.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/notubiz_meeting.py\n@@ -0,0 +1,136 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('notubiz_meeting')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def meeting_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'notubiz',\n+ 'collection': 'meeting',\n+ }\n+\n+ event = Meeting(original_item['id'], **source_defaults)\n+ event.canonical_iri = entity\n+ event.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+ event.start_date = original_item['plannings'][0]['start_date']\n+ event.end_date = original_item['plannings'][0]['end_date']\n+ event.name = original_item['attributes'].get('Titel', 'Vergadering %s' % event.start_date)\n+ event.classification = [u'Agenda']\n+ event.location = original_item['attributes'].get('Locatie')\n+\n+ event.organization = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ event.committee = Organization(original_item['gremium']['id'],\n+ source=self.source_definition['key'],\n+ supplier='notubiz',\n+ collection='committee')\n+ event.committee.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # Re-attach the committee node to the municipality node\n+ event.committee.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ event.agenda = []\n+ for item in original_item.get('agenda_items', []):\n+ if not item['order']:\n+ continue\n+\n+ # If it's a 'label' type skip the item for now, since it only gives little information about what is to come\n+ if item['type'] == 'label':\n+ continue\n+\n+ agendaitem = AgendaItem(item['id'],\n+ source=self.source_definition['key'],\n+ supplier='notubiz',\n+ collection='agenda_item')\n+ agendaitem.canonical_id = item['id']\n+ agendaitem.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ try:\n+ agendaitem.description = item['type_data']['attributes'][0]['value']\n+ except KeyError:\n+ try:\n+ agendaitem.description = item['type_data']['attributes'][1]['value']\n+ except KeyError:\n+ pass\n+ agendaitem.name = original_item['attributes']['Titel']\n+ agendaitem.position = original_item['order']\n+ agendaitem.parent = event\n+ agendaitem.start_date = event.start_date\n+\n+ agendaitem.attachment = []\n+ for doc in item.get('documents', []):\n+ attachment = MediaObject(doc['id'],\n+ source=self.source_definition['key'],\n+ supplier='notubiz',\n+ collection='attachment')\n+ attachment.canonical_iri = 'https://' + doc['self'] + '?format=json&version=1.10.8'\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment.identifier_url = doc['self'] # Trick to use the self url for enrichment\n+ attachment.original_url = doc['url']\n+ attachment.name = doc['title']\n+ attachment.date_modified = doc['last_modified']\n+ attachment.isReferencedBy = agendaitem\n+ agendaitem.attachment.append(attachment)\n+\n+ event.agenda.append(agendaitem)\n+\n+ # object_model['last_modified'] = iso8601.parse_date(\n+ # original_item['last_modified'])\n+\n+ if 'canceled' in original_item and original_item['canceled']:\n+ log.info('Found a Notubiz event with status EventCancelled: %s' % str(event.values))\n+ event.status = EventCancelled\n+ elif 'inactive' in original_item and original_item['inactive']:\n+ log.info('Found a Notubiz event with status EventUncomfirmed: %s' % str(event.values))\n+ event.status = EventConfirmed\n+ else:\n+ event.status = EventUnconfirmed\n+\n+ event.attachment = []\n+ for doc in original_item.get('documents', []):\n+ attachment = MediaObject(doc['id'],\n+ source=self.source_definition['key'],\n+ supplier='notubiz',\n+ collection='attachment')\n+ attachment.canonical_iri = 'https://' + doc['url'] + '?format=json&version=1.10.8'\n+ attachment.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ attachment.identifier_url = doc['self'] # Trick to use the self url for enrichment\n+ attachment.original_url = doc['url']\n+ attachment.name = doc['title']\n+ attachment.date_modified = doc['last_modified']\n+ attachment.isReferencedBy = event\n+ event.attachment.append(attachment)\n+\n+ event.save()\n+ return event\ndiff --git a/ocd_backend/transformers/organizations.py b/ocd_backend/transformers/organizations.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/organizations.py\n@@ -0,0 +1,98 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('organizations')\n+\n+\n+def transform_contact_details(data):\n+ \"\"\"\n+ Takes a dictionary of contact details and flattens every entry to {key: {label: label, value: value} .\n+ \"\"\"\n+\n+ transformed_data = {}\n+ for key, value in data.items():\n+ if 'label' in value:\n+ transformed_data[key] = value\n+ else:\n+ for key2, value2 in value.items():\n+ transformed_data['%s_%s' % (key, key2)] = {'label': key2, 'value': value2}\n+\n+ return transformed_data\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def municipality_organization_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'allmanak',\n+ 'collection': 'municipality',\n+ }\n+\n+ object_model = TopLevelOrganization(original_item['systemid'], **source_defaults)\n+ object_model.canonical_iri = entity\n+ object_model.classification = u'Municipality'\n+ object_model.collection = self.source_definition['key']\n+ object_model.name = ' '.join([self.source_definition.get('municipality_prefix', ''), unicode(original_item['naam'])])\n+ object_model.description = original_item['omvatplaats']\n+ # object_model.contact_details = transform_contact_details(original_item['contact'])\n+\n+ object_model.save()\n+ return object_model\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def province_organization_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'allmanak',\n+ 'collection': 'province',\n+ }\n+\n+ object_model = TopLevelOrganization(original_item['systemid'], **source_defaults)\n+ object_model.canonical_iri = entity\n+ object_model.classification = u'Province'\n+ object_model.collection = self.source_definition['key']\n+ object_model.name = unicode(original_item['naam'])\n+ object_model.description = original_item['omvatplaats']\n+ # object_model.contact_details = transform_contact_details(original_item['contact'])\n+\n+ object_model.save()\n+ return object_model\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def party_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+\n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'allmanak',\n+ 'collection': 'party',\n+ }\n+\n+ # When the Allmanak implements parties as entities, the entity ID should be used\n+ object_model = Organization(original_item['partij'], **source_defaults)\n+ object_model.canonical_id = original_item['partij']\n+ object_model.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+ object_model.collection = self.source_definition['key'] + '-' + original_item['partij']\n+ object_model.name = original_item['partij']\n+ object_model.classification = 'Party'\n+ object_model.subOrganizationOf = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ object_model.save()\n+ return object_model\ndiff --git a/ocd_backend/items/partijgedrag.py b/ocd_backend/transformers/partijgedrag.py\nsimilarity index 77%\nrename from ocd_backend/items/partijgedrag.py\nrename to ocd_backend/transformers/partijgedrag.py\n--- a/ocd_backend/items/partijgedrag.py\n+++ b/ocd_backend/transformers/partijgedrag.py\n@@ -1,20 +1,13 @@\n-from ocd_backend.items import BaseItem\n-from ocd_backend.models import Motion, Organization, VoteEvent, ResultPass,\\\n- ResultFail, Vote, Person, VoteOptionYes, VoteOptionNo, VoteOptionAbsent\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n \n \n-class PartijgedragMotion(BaseItem):\n- def get_rights(self):\n- return u'undefined'\n-\n- def get_collection(self):\n- return unicode(self.source_definition['index_name'])\n-\n- def get_object_model(self):\n+class MotionItem(BaseTransformer):\n+ def transform(self):\n source_defaults = {\n 'source': 'partijgedrag',\n- 'source_id_key': 'identifier',\n- 'organization': 'ggm',\n+ 'supplier': 'gegevensmagazijn',\n+ 'collection': 'motion',\n }\n \n motion = Motion(self.original_item['identifier'], **source_defaults)\n@@ -45,11 +38,12 @@ def get_object_model(self):\n motion.cocreator.append(cocreator)\n \n vote_event = VoteEvent(self.original_item['identifier'], **source_defaults)\n+ vote_event.start_date = self.original_item.get('issuedate')\n \n if self.original_item['uitslag']:\n- vote_event.result = ResultPass()\n+ vote_event.result = ResultPassed\n elif not self.original_item['uitslag']:\n- vote_event.result = ResultFail()\n+ vote_event.result = ResultFailed\n \n if 'votes' in self.original_item:\n votes = list()\n@@ -69,11 +63,11 @@ def get_object_model(self):\n vote.weight = vote_party['aantal']\n \n if vote_option == 'voor':\n- vote.option = VoteOptionYes()\n+ vote.option = VoteOptionYes\n elif vote_option == 'tegen':\n- vote.option = VoteOptionNo()\n+ vote.option = VoteOptionNo\n elif vote_option == 'afwezig':\n- vote.option = VoteOptionAbsent()\n+ vote.option = VoteOptionAbsent\n \n votes.append(vote)\n \ndiff --git a/ocd_backend/transformers/persons.py b/ocd_backend/transformers/persons.py\nnew file mode 100644\n--- /dev/null\n+++ b/ocd_backend/transformers/persons.py\n@@ -0,0 +1,90 @@\n+from ocd_backend import celery_app\n+from ocd_backend.transformers import BaseTransformer\n+from ocd_backend.models import *\n+from ocd_backend.log import get_source_logger\n+\n+log = get_source_logger('persons')\n+\n+\n+@celery_app.task(bind=True, base=BaseTransformer, autoretry_for=(Exception,), retry_backoff=True)\n+def allmanak_person_item(self, content_type, raw_item, entity, source_item, **kwargs):\n+ original_item = self.deserialize_item(content_type, raw_item)\n+ self.source_definition = kwargs['source_definition']\n+ \n+ source_defaults = {\n+ 'source': self.source_definition['key'],\n+ 'supplier': 'allmanak',\n+ 'collection': 'person',\n+ }\n+\n+ person = Person(original_item['systemid'], **source_defaults)\n+ person.canonical_id = original_item['systemid']\n+ person.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ person.name = original_item['naam']\n+ if 'Dhr.' in original_item['naam']:\n+ person.gender = 'Man'\n+ elif 'Mw.' in original_item['naam']:\n+ person.gender = 'Vrouw'\n+\n+ municipality = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ # The source ID for the municipality membership is constructed by combining the person's Allmanak ID and the\n+ # key of the source\n+ municipality_membership_id = '%s_%s' % (original_item['systemid'], self.source_definition['key'])\n+ municipality_member = Membership(municipality_membership_id,\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='municipality_membership')\n+ municipality_member.canonical_iri = entity\n+\n+ municipality_member.has_organization_name = municipality\n+ municipality_member.organization = municipality\n+\n+ municipality_member.member = person\n+ municipality_member.role = 'Raadslid'\n+\n+ person.member_of = [municipality_member]\n+\n+ if original_item['partij']:\n+ party = Organization(original_item['partij'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='party',\n+ merge_into=('collection',\n+ 'prop_string',\n+ self.source_definition['key'] + '-' + original_item['partij']))\n+ party.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ party.name = original_item['partij']\n+\n+ # The source ID for the party membership is constructed by combining the person's Allmanak ID and the\n+ # name of the party\n+ party_membership_id = '%s_%s' % (original_item['systemid'], original_item['partij'])\n+ party_member = Membership(party_membership_id,\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection='party_membership')\n+ party_member.canonical_iri = entity\n+ party_member.has_organization_name = TopLevelOrganization(self.source_definition['allmanak_id'],\n+ source=self.source_definition['key'],\n+ supplier='allmanak',\n+ collection=self.source_definition['source_type'])\n+\n+ party_member.organization = party\n+ party_member.member = person\n+ party_member.role = 'Lid'\n+\n+ person.member_of.append(party_member)\n+\n+ person.save()\n+ return person\ndiff --git a/ocd_backend/utils/api.py b/ocd_backend/utils/api.py\n--- a/ocd_backend/utils/api.py\n+++ b/ocd_backend/utils/api.py\n@@ -1,29 +1,13 @@\n-import json\n-\n-import requests\n-\n-from ocd_backend import settings\n+from ocd_backend.es import elasticsearch as es\n \n \n class FrontendAPIMixin(object):\n \"\"\"\n- Interface for the frontend API.\n+ Deprecated. Legacy interface for emulating the old frontend API.\n \"\"\"\n \n def api_request(self, index_name, doc_type, query=None, *args, **kwargs):\n-\n- if doc_type:\n- api_url = u'%s%s/%s/search' % (\n- self.source_definition.get('frontend_api_url', settings.API_URL),\n- index_name, doc_type,)\n- else:\n- api_url = u'%s%s/search' % (\n- self.source_definition.get('frontend_api_url', settings.API_URL),\n- index_name,)\n-\n- # TODO: facets (better), sorting\n api_query = {\n- \"facets\": {},\n \"filters\": {},\n \"from\": 0,\n \"size\": 10,\n@@ -31,6 +15,8 @@ def api_request(self, index_name, doc_type, query=None, *args, **kwargs):\n \"order\": \"asc\"\n }\n \n+ kwargs['@type'] = doc_type\n+\n if query is not None:\n api_query[\"query\"] = query\n \n@@ -45,34 +31,4 @@ def api_request(self, index_name, doc_type, query=None, *args, **kwargs):\n else:\n api_query[\"filters\"][k] = v\n \n- r = self.http_session.post(\n- api_url,\n- data=json.dumps(api_query)\n- )\n- try:\n- r.raise_for_status()\n- except requests.HTTPError:\n- return None\n-\n- if doc_type:\n- try:\n- return r.json()[doc_type]\n- except KeyError:\n- return None\n- return r.json()\n-\n- def api_request_object(self, index_name, doc_type, object_id, *args,\n- **kwargs):\n- api_url = u'%s%s/%s/%s' % (\n- self.source_definition.get('frontend_api_url',\n- settings.API_URL), index_name, doc_type,\n- object_id)\n- r = self.http_session.get(\n- api_url\n- )\n- try:\n- r.raise_for_status()\n- except requests.HTTPError:\n- return None\n-\n- return r.json()\n+ return es.search(index=index_name, body=api_query)\ndiff --git a/ocd_backend/utils/file_parsing.py b/ocd_backend/utils/file_parsing.py\n--- a/ocd_backend/utils/file_parsing.py\n+++ b/ocd_backend/utils/file_parsing.py\n@@ -15,7 +15,7 @@ def file_parser(fname, pages=None):\n try:\n result_pages = []\n i = 0\n- d = pdf.Document(fname)\n+ d = pdf.Document(fname, quiet=True)\n for i, p in enumerate(d, start=1):\n text_array = []\n for f in p:\n@@ -55,7 +55,9 @@ def file_get_contents(self, url, max_pages=20):\n \n tf = self.file_download(url)\n if tf is not None:\n- return self.file_to_text(tf.name, max_pages)\n+ name = tf.name\n+ tf.close()\n+ return self.file_to_text(name, max_pages)\n else:\n return [] # FIXME: should be something else ...\n \n@@ -74,8 +76,10 @@ def file_download(self, url):\n return tf\n except HTTPError as e:\n log.info(\"Something went wrong downloading %s\", url)\n+ raise\n except Exception as e:\n log.warning(\"Some other exception %s\", url)\n+ raise\n \n def file_to_text(self, path, max_pages=20):\n \"\"\"\ndiff --git a/ocd_backend/utils/http.py b/ocd_backend/utils/http.py\n--- a/ocd_backend/utils/http.py\n+++ b/ocd_backend/utils/http.py\n@@ -80,7 +80,9 @@ def fetch(self, url, path, modified_date):\n \n def fetch_data(self, url, path, modified_date):\n _, _, media_file = self.fetch(url, path, modified_date)\n- return media_file.read()\n+ data = media_file.read()\n+ media_file.close()\n+ return data\n \n def download_url(self, url, partial_fetch=False):\n http_resp = self.http_session.get(url, stream=True, timeout=(60, 120))\n@@ -127,6 +129,10 @@ def download_url(self, url, partial_fetch=False):\n media_file\n )\n \n+ def save(self, path, data, content_type=None):\n+ \"\"\"Save is only implemented for GCSCachingMixin\"\"\"\n+ pass\n+\n \n class LocalCachingMixin(HttpRequestMixin):\n \n@@ -164,7 +170,10 @@ def _check_path(path):\n raise InvalidFile\n \n def fetch(self, url, path, modified_date):\n- modified_date = localize_datetime(str_to_datetime(modified_date))\n+ if modified_date:\n+ modified_date = localize_datetime(str_to_datetime(modified_date))\n+ else:\n+ modified_date = None\n \n url_hash = base64.urlsafe_b64encode(path)\n base_path = self.base_path(url_hash)\n@@ -307,6 +316,10 @@ def save(self, path, data, content_type=None):\n will default to default_content_type.\n \"\"\"\n \n+ # If the storage_client has not been loaded fall back to HttpRequestMixin save\n+ if not self.storage_client:\n+ return super(GCSCachingMixin, self).save(path, data, content_type)\n+\n bucket = self.get_bucket()\n blob = bucket.get_blob(path)\n self.compressed_upload(blob, data, content_type)\ndiff --git a/ocd_backend/utils/ibabs.py b/ocd_backend/utils/ibabs.py\n--- a/ocd_backend/utils/ibabs.py\n+++ b/ocd_backend/utils/ibabs.py\n@@ -135,18 +135,6 @@ def meeting_item_to_dict(m):\n return _ibabs_to_dict(m, fields)\n \n \n-def meeting_type_to_dict(mt):\n- \"\"\"\n- Converts an iBabsMeetingType to a JSON serializable dict\n- \"\"\"\n- fields = {\n- 'Id': None,\n- 'Meetingtype': None,\n- 'Abbreviation': None,\n- }\n- return _ibabs_to_dict(mt, fields)\n-\n-\n def _list_response_field_to_val(r):\n if isinstance(r, list):\n return [unicode(l) for l in r]\n@@ -178,10 +166,3 @@ def list_entry_response_to_dict(m):\n unicode(y.Key): unicode(y.Value) if y.Value is not None else None for y in x[0]}\n }\n return _ibabs_to_dict(m, fields)\n-\n-\n-def person_profile_to_dict(p):\n- \"\"\"\n- Converts an iBabsListEntryBasic to a JSON serializable dict\n- \"\"\"\n- return {k[0]: _list_response_field_to_val(k[1]) for k in p}\ndiff --git a/ocd_frontend/rest/__init__.py b/ocd_frontend/rest/__init__.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/__init__.py\n+++ /dev/null\n@@ -1,68 +0,0 @@\n-import json\n-from functools import wraps\n-\n-from flask import jsonify, request\n-\n-from factory import create_app_factory\n-\n-\n-def create_app(settings_override=None):\n- \"\"\"Returns the REST API application instance.\"\"\"\n- app = create_app_factory(__name__, __path__, settings_override)\n- app.errorhandler(OcdApiError)(OcdApiError.serialize_error)\n-\n- def add_cors_headers(resp):\n- resp.headers['Access-Control-Allow-Origin'] = '*'\n- # See https://stackoverflow.com/questions/12630231/how-do-cors-and-access-control-allow-headers-work\n- resp.headers['Access-Control-Allow-Headers'] = 'origin, content-type, accept'\n- return resp\n-\n- app.after_request(add_cors_headers)\n-\n- return app\n-\n-\n-class OcdApiError(Exception):\n- \"\"\"API error class.\n-\n- :param msg: the message that should be returned to the API user.\n- :param status_code: the HTTP status code of the response\n- \"\"\"\n-\n- def __init__(self, msg, status_code):\n- self.msg = msg\n- self.status_code = status_code\n-\n- def __str__(self):\n- return repr(self.msg)\n-\n- @staticmethod\n- def serialize_error(e):\n- return jsonify(dict(status='error', error=e.msg)), e.status_code\n-\n-\n-def decode_json_post_data(fn):\n- \"\"\"Decorator that parses POSTed JSON and attaches it to the request\n- object (:obj:`request.data`).\"\"\"\n-\n- @wraps(fn)\n- def wrapped_function(*args, **kwargs):\n- if request.method == 'POST':\n- data = request.get_data(cache=False)\n- if not data:\n- raise OcdApiError('No data was POSTed', 400)\n-\n- try:\n- request_charset = request.mimetype_params.get('charset')\n- if request_charset is not None:\n- data = json.loads(data, encoding=request_charset)\n- else:\n- data = json.loads(data)\n- except:\n- raise OcdApiError('Unable to parse POSTed JSON', 400)\n-\n- request.data = data\n-\n- return fn(*args, **kwargs)\n-\n- return wrapped_function\ndiff --git a/ocd_frontend/rest/es.py b/ocd_frontend/rest/es.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/es.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-from elasticsearch import Elasticsearch\n-\n-\n-class ElasticsearchService(object):\n- def __init__(self, host, port):\n- self._es = Elasticsearch([{'host': host, 'port': port}])\n-\n- def search(self, *args, **kwargs):\n- return self._es.search(*args, **kwargs)\n-\n- def create(self, *args, **kwargs):\n- return self._es.index(*args, **kwargs)\n-\n- def get(self, *args, **kwargs):\n- return self._es.get(*args, **kwargs)\n-\n- def exists(self, *args, **kwargs):\n- return self._es.exists(*args, **kwargs)\n-\n- def msearch(self, *args, **kwargs):\n- return self._es.msearch(*args, **kwargs)\n-\n- def get_esclient(self):\n- return self._es\ndiff --git a/ocd_frontend/rest/factory.py b/ocd_frontend/rest/factory.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/factory.py\n+++ /dev/null\n@@ -1,31 +0,0 @@\n-from flask import Flask\n-\n-import settings\n-from es import ElasticsearchService\n-from helpers import register_blueprints\n-from settings import BUGSNAG_APIKEY\n-\n-\n-def create_app_factory(package_name, package_path, settings_override=None):\n- \"\"\"Returns a :class:`Flask` application instance configured with\n- project-wide functionality.\n-\n- :param package_name: application package name.\n- :param package_path: application package path.\n- :param settings_override: a dictionary of settings to override.\n- \"\"\"\n- app = Flask(package_name, instance_relative_config=True)\n-\n- app.config.from_object(settings)\n- app.config.from_object(settings_override)\n-\n- app.es = ElasticsearchService(app.config['ELASTICSEARCH_HOST'],\n- app.config['ELASTICSEARCH_PORT'])\n-\n- register_blueprints(app, package_name, package_path)\n-\n- if BUGSNAG_APIKEY:\n- from bugsnag.flask import handle_exceptions\n- handle_exceptions(app)\n-\n- return app\ndiff --git a/ocd_frontend/rest/helpers.py b/ocd_frontend/rest/helpers.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/helpers.py\n+++ /dev/null\n@@ -1,25 +0,0 @@\n-import importlib\n-import pkgutil\n-\n-from flask import Blueprint\n-\n-\n-def register_blueprints(app, package_name, package_path):\n- \"\"\"Register all Blueprint instances on the specified Flask\n- application found in all modules for the specified package.\n-\n- :param app: Flask application.\n- :param package_name: package name.\n- :param package_path: package path.\n- \"\"\"\n- rv = []\n-\n- for _, name, _ in pkgutil.iter_modules(package_path):\n- m = importlib.import_module('%s.%s' % (package_name, name))\n- for item in dir(m):\n- item = getattr(m, item)\n- if isinstance(item, Blueprint):\n- app.register_blueprint(item)\n- rv.append(item)\n-\n- return rv\ndiff --git a/ocd_frontend/rest/log.py b/ocd_frontend/rest/log.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/log.py\n+++ /dev/null\n@@ -1,10 +0,0 @@\n-import logging\n-\n-\n-def get_source_logger(name=None):\n- logger = logging.getLogger('ocd_frontend')\n-\n- if name:\n- logger = logging.LoggerAdapter(logger, {'source': name})\n-\n- return logger\ndiff --git a/ocd_frontend/rest/settings.py b/ocd_frontend/rest/settings.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/settings.py\n+++ /dev/null\n@@ -1,533 +0,0 @@\n-import logging.config\n-import os.path\n-\n-from bugsnag.handlers import BugsnagHandler\n-from pythonjsonlogger import jsonlogger\n-\n-DEBUG = True\n-\n-APP_VERSION = os.getenv('APP_VERSION', None)\n-\n-BUGSNAG_APIKEY = os.getenv('BUGSNAG_APIKEY')\n-\n-RELEASE_STAGE = os.getenv('RELEASE_STAGE', 'production')\n-\n-# Elasticsearch\n-ELASTICSEARCH_HOST = os.getenv('ELASTICSEARCH_HOST', 'elastic')\n-ELASTICSEARCH_PORT = os.getenv('ELASTICSEARCH_PORT', 9200)\n-\n-# The default number of hits to return for a search request via the REST API\n-DEFAULT_SEARCH_SIZE = 10\n-\n-# The max. number of hits to return for a search request via the REST API\n-MAX_SEARCH_SIZE = 100\n-\n-# The default prefix used for all data\n-DEFAULT_INDEX_PREFIX = 'ori'\n-\n-# The fields which can be used for sorting results via the REST API\n-SORTABLE_FIELDS = {\n- 'person': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score', 'gender', 'name'],\n- 'organization': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score', 'classification', 'name'],\n- 'meeting': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score', 'classification', 'name', 'start_date',\n- 'location'],\n- 'agenda_item': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score', 'classification', 'name', 'start_date',\n- 'location'],\n- 'motion': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score', 'classification', 'name', 'date'],\n- 'vote_event': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score', 'classification', 'name', 'start_date'],\n- 'items': [\n- 'meta.source_id', 'meta.processing_started',\n- 'meta.processing_finished',\n- 'start_date', '_score']\n-}\n-\n-# EXCLUDED_FIELDS_DEFAULT = ['all_text', 'source_data',\n-# 'media_urls.original_url']\n-# EXCLUDED_FIELDS_SEARCH = ['all_text', 'media_urls.original_url']\n-#\n-# ALLOWED_INCLUDE_FIELDS_DEFAULT = ['all_text', 'source_data']\n-# ALLOWED_INCLUDE_FIELDS_SEARCH = ['all_text']\n-\n-EXCLUDED_FIELDS_ALWAYS = ['enrichments', 'hidden']\n-EXCLUDED_FIELDS_DEFAULT = ['all_text', 'source_data',\n- 'media_urls.original_url']\n-EXCLUDED_FIELDS_SEARCH = ['all_text', 'media_urls.original_url']\n-\n-ALLOWED_INCLUDE_FIELDS_DEFAULT = []\n-ALLOWED_INCLUDE_FIELDS_SEARCH = []\n-\n-SIMPLE_QUERY_FIELDS = {\n- 'person': [\n- 'biography^4', 'name^3', 'other_names^2',\n- 'memberships.organization.name^2',\n- 'memberships.role'],\n- 'organization': ['name^4', 'description'],\n- 'meeting': [\n- 'name^4', 'description^3', 'location', 'organization.name',\n- 'organization.description', 'sources.note^2', 'sources.description'],\n- 'agenda_item': [\n- 'name^4', 'description^3', 'location', 'organization.name',\n- 'organization.description', 'sources.note^2', 'sources.description'],\n- 'motion': [\n- 'name^4', 'text^3', 'organization.name', 'sources.note^2',\n- 'sources.description'],\n- 'vote_event': [\n- 'name^4', 'motion.text^3', 'organization.name', 'sources.note^2',\n- 'sources.description'],\n- 'items': [\n- 'name^4', 'description^3', 'location', 'organization.name',\n- 'organization.description', 'sources.note^2', 'sources.description',\n- 'biography^4', 'other_names^2', 'memberships.organization.name^2']\n-}\n-\n-DOC_TYPE_DEFAULT = u'items'\n-\n-# Definition of the ES facets (and filters) that are accessible through\n-# the REST API\n-COMMON_FACETS = {\n- 'processing_started': {\n- 'date_histogram': {\n- 'field': 'meta.processing_started',\n- 'interval': 'month'\n- }\n- },\n- 'processing_finished': {\n- 'date_histogram': {\n- 'field': 'meta.processing_finished',\n- 'interval': 'month'\n- }\n- },\n- 'source': {\n- 'terms': {\n- 'field': 'meta.source_id',\n- 'size': 10\n- }\n- },\n- 'collection': {\n- 'terms': {\n- 'field': 'meta.collection',\n- 'size': 10\n- }\n- },\n- 'rights': {\n- 'terms': {\n- 'field': 'meta.rights',\n- 'size': 10\n- }\n- },\n- 'index': {\n- 'terms': {\n- 'field': '_index',\n- 'size': 10\n- }\n- },\n- 'types': {\n- 'terms': {\n- 'field': '_type',\n- 'size': 10\n- }\n- },\n- 'start_date': {\n- 'date_histogram': {\n- 'field': 'start_date',\n- 'interval': 'month'\n- }\n- }\n-}\n-\n-AVAILABLE_FACETS = {\n- 'organization': {\n- 'classification': {\n- 'terms': {\n- 'field': 'classification',\n- 'size': 10\n- }\n- }\n- },\n- 'person': {\n- 'gender': {\n- 'terms': {\n- 'field': 'gender',\n- 'size': 2\n- }\n- },\n- 'organization': {\n- 'terms': {\n- 'field': 'memberships.organization_id',\n- 'size': 10\n- }\n- }\n- },\n- 'meeting': {\n- 'classification': {\n- 'terms': {\n- 'field': 'classification',\n- 'size': 10\n- }\n- },\n- 'organization_id': {\n- 'terms': {\n- 'field': 'organization_id',\n- 'size': 10\n- }\n- },\n- 'location': {\n- 'terms': {\n- 'field': 'location',\n- 'size': 10\n- }\n- },\n- 'status': {\n- 'terms': {\n- 'field': 'status',\n- 'size': 10\n- }\n- },\n- 'start_date': {\n- 'date_histogram': {\n- 'field': 'start_date',\n- 'interval': 'month'\n- }\n- },\n- 'end_date': {\n- 'date_histogram': {\n- 'field': 'end_date',\n- 'interval': 'month'\n- }\n- }\n- },\n- 'agenda_item': {\n- 'classification': {\n- 'terms': {\n- 'field': 'classification',\n- 'size': 10\n- }\n- },\n- 'organization_id': {\n- 'terms': {\n- 'field': 'organization_id',\n- 'size': 10\n- }\n- },\n- 'location': {\n- 'terms': {\n- 'field': 'location',\n- 'size': 10\n- }\n- },\n- 'status': {\n- 'terms': {\n- 'field': 'status',\n- 'size': 10\n- }\n- },\n- 'start_date': {\n- 'date_histogram': {\n- 'field': 'start_date',\n- 'interval': 'month'\n- }\n- },\n- 'end_date': {\n- 'date_histogram': {\n- 'field': 'end_date',\n- 'interval': 'month'\n- }\n- }\n- },\n- 'motion': {\n- 'classification': {\n- 'terms': {\n- 'field': 'classification',\n- 'size': 10\n- }\n- },\n- 'organization_id': {\n- 'terms': {\n- 'field': 'organization_id',\n- 'size': 10\n- }\n- },\n- 'legislative_session_id': {\n- 'terms': {\n- 'field': 'legislative_session_id',\n- 'size': 10\n- }\n- },\n- 'creator_id': {\n- 'terms': {\n- 'field': 'creator_id',\n- 'size': 10\n- }\n- },\n- 'date': {\n- 'date_histogram': {\n- 'field': 'date',\n- 'interval': 'month'\n- }\n- },\n- 'requirement': {\n- 'terms': {\n- 'field': 'requirement',\n- 'size': 10\n- }\n- },\n- 'result': {\n- 'terms': {\n- 'field': 'result',\n- 'size': 10\n- }\n- }\n- },\n- 'vote_event': {\n- 'classification': {\n- 'terms': {\n- 'field': 'classification',\n- 'size': 10\n- }\n- },\n- 'organization_id': {\n- 'terms': {\n- 'field': 'organization_id',\n- 'size': 10\n- }\n- },\n- 'start_date': {\n- 'date_histogram': {\n- 'field': 'start_date',\n- 'interval': 'month'\n- }\n- },\n- 'end_date': {\n- 'date_histogram': {\n- 'field': 'end_date',\n- 'interval': 'month'\n- }\n- },\n- 'legislative_session_id': {\n- 'terms': {\n- 'field': 'legislative_session_id',\n- 'size': 10\n- }\n- }\n- },\n- 'items': {\n- 'classification': {\n- 'terms': {\n- 'field': 'classification',\n- 'size': 10\n- }\n- }\n- }\n-}\n-\n-\n-# For highlighting\n-COMMON_HIGHLIGHTS = {\n- 'source': {},\n- 'collection': {},\n- 'rights': {}\n-}\n-\n-AVAILABLE_HIGHLIGHTS = {\n- 'organization': {\n- 'classification': {},\n- 'name': {},\n- 'description': {}\n- },\n- 'person': {\n- 'name': {},\n- 'memberships.role': {},\n- 'area.name': {}\n- },\n- 'meeting': {\n- 'classification': {},\n- 'location': {},\n- 'organization.name': {},\n- 'description': {},\n- 'sources.note': {},\n- 'sources.description': {}\n- },\n- 'agenda_item': {\n- 'classification': {},\n- 'location': {},\n- 'organization.name': {},\n- 'description': {},\n- 'sources.note': {},\n- 'sources.description': {}\n- },\n- 'motion': {\n- 'classification': {},\n- 'organization.name': {},\n- 'creator.name': {},\n- 'text': {},\n- 'sources.description': {}\n- },\n- 'vote_event': {\n- 'classification': {},\n- 'organization.name': {},\n- 'creator.name': {},\n- 'text': {},\n- 'sources.description': {}\n- },\n- 'items': {\n- 'classification': {},\n- 'name': {},\n- 'description': {}\n- }\n-}\n-\n-# The allowed date intervals for an ES data_histogram that can be\n-# requested via the REST API\n-ALLOWED_DATE_INTERVALS = ('day', 'week', 'month', 'quarter', 'year')\n-\n-# Name of the Elasticsearch index used to store URL resolve documnts\n-RESOLVER_URL_INDEX = 'resolver'\n-\n-# Determines if API usage events should be logged\n-USAGE_LOGGING_ENABLED = True\n-# Name of the Elasticsearch index used to store logged events\n-USAGE_LOGGING_INDEX = 'usage_logs'\n-\n-ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n-PROJECT_PATH = os.path.dirname(ROOT_PATH)\n-DATA_DIR_PATH = os.path.join(PROJECT_PATH, 'data')\n-STATIC_DIR_PATH = os.path.join(DATA_DIR_PATH, 'static')\n-LOCAL_DUMPS_DIR = os.path.join(PROJECT_PATH, 'local_dumps')\n-DUMPS_DIR = os.path.join(PROJECT_PATH, 'dumps')\n-\n-# URL where of the API instance that should be used for management commands\n-# Should include API version and a trailing slash.\n-# Can be overridden in the CLI when required, for instance when the user wants\n-# to download dumps from another API instance than the one hosted by OpenState\n-API_URL = os.getenv('API_URL', 'http://frontend:5000/v1/')\n-\n-# URL where collection dumps are hosted. This is used for generating full URLs\n-# to dumps in the /dumps endpoint\n-DUMP_URL = 'http://dumps.opencultuurdata.nl/'\n-\n-\n-class StackdriverJsonFormatter(jsonlogger.JsonFormatter, object):\n- \"\"\" Formats the record to a Google Stackdriver compatible json string \"\"\"\n-\n- def __init__(self, fmt=\"%(levelname) %(message)\", *args, **kwargs):\n- jsonlogger.JsonFormatter.__init__(self, fmt=fmt, *args, **kwargs)\n-\n- def process_log_record(self, log_record):\n- log_record['severity'] = log_record['levelname']\n- del log_record['levelname']\n- return super(StackdriverJsonFormatter, self).process_log_record(log_record)\n-\n-\n-LOGGING = {\n- 'version': 1,\n- 'disable_existing_loggers': False,\n- 'formatters': {\n- 'basic': {\n- 'format': '[%(module)s] %(levelname)s %(message)s'\n- },\n- 'advanced': {\n- 'format': '[%(asctime)s] [%(name)s] [%(levelname)s] - %(message)s',\n- 'datefmt': '%Y-%m-%d %H:%M:%S'\n- },\n- 'stackdriver': {\n- '()': StackdriverJsonFormatter,\n- }\n- },\n- 'handlers': {\n- 'default': {\n- 'level': 'DEBUG',\n- 'class': 'logging.StreamHandler',\n- 'formatter': 'basic',\n- 'stream': 'ext://sys.stdout'\n- },\n- 'bugsnag': {\n- 'level': 'WARNING',\n- '()': BugsnagHandler,\n- }\n- },\n- 'loggers': {\n- 'ocd_frontend': {\n- 'handlers': ['default'],\n- 'level': 'DEBUG',\n- 'propagate': False,\n- },\n- 'bugsnag': {\n- 'handlers': ['bugsnag']\n- }\n- },\n- 'root': {\n- 'handlers': ['default', 'bugsnag'],\n- 'level': 'INFO',\n- },\n-}\n-\n-if os.getenv('GCE_STACKDRIVER'):\n- # Set default handler to format for Google Stackdriver logging\n- LOGGING['handlers']['default'] = {\n- 'level': 'DEBUG',\n- 'class': 'logging.StreamHandler',\n- 'formatter': 'stackdriver',\n- 'stream': 'ext://sys.stdout',\n- }\n-\n-if BUGSNAG_APIKEY:\n- import bugsnag\n- from bugsnag.handlers import BugsnagHandler\n-\n- # Needs to be called before dictConfig\n- bugsnag.configure(\n- api_key=BUGSNAG_APIKEY,\n- project_root=ROOT_PATH,\n- release_stage=RELEASE_STAGE,\n- app_version=APP_VERSION,\n- )\n-\n-# Configure python logging system with LOGGING dict\n-logging.config.dictConfig(LOGGING)\n-\n-THUMBNAILS_TEMP_DIR = '/tmp'\n-\n-THUMBNAILS_MEDIA_TYPES = {'image/jpeg', 'image/png', 'image/tiff'}\n-THUMBNAILS_DIR = os.path.join(ROOT_PATH, '.thumbnail-cache')\n-\n-THUMBNAIL_SMALL = 250\n-THUMBNAIL_MEDIUM = 500\n-THUMBNAIL_LARGE = 1000\n-\n-THUMBNAIL_SIZES = {\n- 'large': {'size': (THUMBNAIL_LARGE, THUMBNAIL_LARGE), 'type': 'aspect'},\n- 'medium': {'size': (THUMBNAIL_MEDIUM, THUMBNAIL_MEDIUM), 'type': 'aspect'},\n- 'small': {'size': (THUMBNAIL_SMALL, THUMBNAIL_SMALL), 'type': 'aspect'},\n- 'large_sq': {'size': (THUMBNAIL_LARGE, THUMBNAIL_LARGE), 'type': 'crop'},\n- 'medium_sq': {'size': (THUMBNAIL_MEDIUM, THUMBNAIL_MEDIUM),\n- 'type': 'crop'},\n- 'small_sq': {'size': (THUMBNAIL_SMALL, THUMBNAIL_SMALL), 'type': 'crop'},\n-}\n-\n-THUMBNAIL_URL = '/media/'\n-\n-# Allow any settings to be defined in local_settings.py which should be\n-# ignored in your version control system allowing for settings to be\n-# defined per machine.\n-try:\n- from local_settings import *\n-except ImportError:\n- pass\ndiff --git a/ocd_frontend/rest/tasks.py b/ocd_frontend/rest/tasks.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/tasks.py\n+++ /dev/null\n@@ -1,162 +0,0 @@\n-from datetime import datetime\n-\n-from flask import current_app\n-\n-\n-def log_event(user_agent, referer, user_ip, created_at, event_type, **kwargs):\n- \"\"\"Log user activity events to the specified 'usage logging'\n- ElasticSearch index.\n-\n- :param user_agent: the user's raw user agent string\n- :type user_agent: str\n- :param referer: the contents of the HTTP referer request header\n- :type referer: str or None\n- :param user_ip: the user's IP address\n- :type user_ip: str\n- :param created_at: the datetime when the event was created (in UTC)\n- :type created_at: datetime.datetime\n- :param event_type: the name of the event type; available event types are\n- specified under ``available_event_types``\n- :type event_type: str\n- :param kwargs: any additional arguments will be passed on to the\n- function responsible for processing the event\n- \"\"\"\n-\n- if not current_app.config['USAGE_LOGGING_ENABLED']:\n- return\n-\n- available_event_types = {\n- 'search': search_event,\n- 'search_similar': search_similar_event,\n- 'sources': sources_event,\n- 'get_object': get_object_event,\n- 'get_object_source': get_object_event,\n- 'resolve_redirect': resolve_event,\n- 'resolve_filepath': resolve_event,\n- }\n-\n- if event_type not in available_event_types.keys():\n- raise ValueError('\"%s\" is an unknown event type' % event_type)\n-\n- # Base structure of an event\n- event = {\n- 'created_at': created_at,\n- 'processed_at': datetime.utcnow(),\n- 'user_properties': {\n- 'user_agent': user_agent,\n- 'referer': referer,\n- 'ip': user_ip\n- },\n- 'event_properties': available_event_types[event_type](**kwargs)\n- }\n-\n- current_app.es.create(index=current_app.config['USAGE_LOGGING_INDEX'],\n- doc_type=event_type, body=event)\n-\n- return event\n-\n-\n-def search_event(query, hits, n_total_hits, query_time_ms, doc_type, source_id=None):\n- \"\"\"Format the properties of the ``search`` event.\n-\n- :param query: a dictionary that specifies the query and it's options\n- :type query: dict\n- :param hits: a list of the returned hits. Each item in the list should\n- contain a dictionary with the document and source ID.\n- :type hits: list\n- :param n_total_hits: number of total hists that matched the query\n- :type n_total_hits: int\n- :param query_time_ms: duration of the query in milliseconds\n- :type query_time_ms: int\n- :param doc_type: the type of the document\n- :type doc_type: str\n- :param source_id: specifies which index was targeted. If ``source_id``\n- is ``None``, the search was executed against the\n- combined index.\n- :type source_id: str or None\n- \"\"\"\n-\n- return {\n- 'source_id': source_id,\n- 'query': query,\n- 'hits': hits,\n- 'n_total_hits': n_total_hits,\n- 'query_time_ms': query_time_ms,\n- 'doc_type': doc_type,\n- }\n-\n-\n-def search_similar_event(similar_to_source_id, similar_to_object_id, query,\n- hits, n_total_hits, query_time_ms, doc_type):\n- \"\"\"Format the properties of the ``search_similar`` event.\n-\n- :param similar_to_source_id: specifies which index was targeted. If\n- ``similar_to_source_id`` is ``None``, the\n- similarity search was executed against the\n- combined index.\n- :type similar_to_source_id: str or None\n- :param similar_to_object_id: the ID of the object for which similar\n- items are requested.\n- :type similar_to_object_id: str\n- :param query: a dictionary that specifies the query and it's options\n- :type query: dict\n- :param hits: a list of the returned hits. Each item in the list should\n- contain a dictionary with the document and source ID.\n- :type hits: list\n- :param n_total_hits: number of total hists that matched the query\n- :type n_total_hits: int\n- :param query_time_ms: duration of the query in milliseconds\n- :type query_time_ms: int\n- :param doc_type: the type of the document\n- :type doc_type: str\n- \"\"\"\n- return {\n- 'similar_to_object': {\n- 'source_id': similar_to_source_id,\n- 'object_id': similar_to_object_id\n- },\n- 'query': query,\n- 'hits': hits,\n- 'n_total_hits': n_total_hits,\n- 'query_time_ms': query_time_ms,\n- 'doc_type': doc_type,\n- }\n-\n-\n-def sources_event(query_time_ms):\n- \"\"\"Format the properties of the ``sources`` event.\n-\n- :param query_time_ms: duration of the query in milliseconds\n- :type query_time_ms: int\n- \"\"\"\n- return {\n- 'query_time_ms': query_time_ms\n- }\n-\n-\n-def get_object_event(source_id, object_id, doc_type):\n- \"\"\"Format the properties of the ``get_object`` event.\n-\n- :param source_id: the ID of the source to which the document belongs\n- :type source_id: str\n- :param object_id: the ID of the requested object\n- :type object_id: str\n- :param doc_type: the type of the document\n- :type doc_type: str\n- \"\"\"\n- return {\n- 'source_id': source_id,\n- 'object_id': object_id,\n- 'doc_type': doc_type,\n- }\n-\n-\n-def resolve_event(url_id):\n- \"\"\"Format the properties of the ``resolve`` event.\n-\n- :param url_id: the resolve ID of the URL that was resolved\n- :type url_id: str\n- \"\"\"\n- return {\n- 'url_id': url_id\n- }\ndiff --git a/ocd_frontend/rest/views.py b/ocd_frontend/rest/views.py\ndeleted file mode 100644\n--- a/ocd_frontend/rest/views.py\n+++ /dev/null\n@@ -1,843 +0,0 @@\n-import copy\n-import glob\n-import os\n-from datetime import datetime\n-from hashlib import sha1\n-from urlparse import urljoin\n-\n-from elasticsearch import NotFoundError\n-from flask import (\n- Blueprint, current_app, request, jsonify, redirect, url_for, send_file, )\n-\n-import settings\n-from . import OcdApiError, decode_json_post_data, tasks\n-\n-bp = Blueprint('api', __name__)\n-\n-\n-def validate_from_and_size(data):\n- # Check if 'size' was specified, if not, fallback to default\n- try:\n- n_size = int(\n- data.get('size', current_app.config['DEFAULT_SEARCH_SIZE']))\n- except ValueError:\n- raise OcdApiError('Invalid value for \\'size\\'', 400)\n- if n_size < 0 or n_size > current_app.config['MAX_SEARCH_SIZE']:\n- raise OcdApiError('Value of \\'size\\' must be between 0 and %s' %\n- current_app.config['MAX_SEARCH_SIZE'], 400)\n-\n- # Check if 'from' was specified, if not, fallback to zero\n- try:\n- n_from = int(data.get('from', 0))\n- except ValueError:\n- raise OcdApiError('Invalid value for \\'from\\'', 400)\n- if n_from < 0:\n- raise OcdApiError('Value of \\'from\\' must 0 or larger', 400)\n-\n- return n_from, n_size\n-\n-\n-def parse_search_request(data, doc_type, mlt=False):\n- # Return an error when no query or an empty query string is provied\n- query = data.get('query', None)\n-\n- scroll = data.get('scroll', None)\n- scroll_id = data.get('scroll_id', None)\n-\n- # if not query and not mlt:\n- # raise OcdApiError('Missing \\'query\\'', 400)\n-\n- # Additional fields requested to include in the response\n- include_fields = [\n- f.strip() for f in data.get('include_fields', []) if f.strip()]\n-\n- n_from, n_size = validate_from_and_size(data)\n-\n- # Check if 'sort' was specified, if not, fallback to '_score'\n- sort = '_score'\n- if data.get('sort'):\n- sort = data.get('sort', '_score')\n- if sort not in current_app.config['SORTABLE_FIELDS'].get(doc_type, []):\n- raise OcdApiError(\n- 'Invalid value for \\'sort\\', sortable fields are: %s'\n- % ', '.join(current_app.config['SORTABLE_FIELDS'].get(doc_type, [])), 400)\n-\n- # Check if 'order' was specified, if not, fallback to desc\n- order = data.get('order', 'desc')\n- if order not in ['asc', 'desc']:\n- raise OcdApiError(\n- 'Invalid value for \\'order\\', must be asc or desc', 400)\n-\n- # Check which 'facets' are requested\n- req_facets = data.get('facets', {})\n- if type(req_facets) is not dict:\n- raise OcdApiError('\\'facets\\' should be an object', 400)\n-\n- facets = {}\n- available_facets = copy.deepcopy(\n- current_app.config['AVAILABLE_FACETS'].get(doc_type, {}))\n- available_facets.update(current_app.config['COMMON_FACETS'])\n-\n- # Inspect all requested facets and override the default settings\n- # where necessary\n- for facet, facet_opts in req_facets.iteritems():\n- if facet not in available_facets:\n- raise OcdApiError('\\'%s\\' is not a valid facet' % facet, 400)\n-\n- if type(facet_opts) is not dict:\n- raise OcdApiError('\\'facets.%s\\' should cotain an object' % facet,\n- 400)\n-\n- # Take the default facet options from the settings\n- facets[facet] = available_facets[facet]\n- f_type = facets[facet].keys()[0]\n- if f_type == 'terms':\n- if 'size' in facet_opts.keys():\n- size = facet_opts['size']\n- if type(size) is not int:\n- raise OcdApiError('\\'facets.%s.size\\' should be an '\n- 'integer' % facet, 400)\n-\n- facets[facet][f_type]['size'] = size\n-\n- elif f_type == 'date_histogram':\n- if 'interval' in facet_opts.keys():\n- interval = facet_opts['interval']\n- if type(interval) is not unicode:\n- raise OcdApiError('\\'facets.%s.interval\\' should be '\n- 'a string' % facet, 400)\n-\n- if interval not in current_app.config[\n- 'ALLOWED_DATE_INTERVALS'\n- ]:\n- raise OcdApiError('\\'%s\\' is an invalid interval for '\n- '\\'facets.%s.interval\\''\n- % (interval, facet), 400)\n-\n- facets[facet][f_type]['interval'] = interval\n-\n- # Check which 'filters' are requested\n- requested_filters = data.get('filters', {})\n- if type(requested_filters) is not dict:\n- raise OcdApiError('\\'filters\\' should be an object', 400)\n-\n- filters = []\n- # Inspect all requested filters and add them to the list of filters\n- for r_filter, filter_opts in requested_filters.iteritems():\n- # Use facet definitions to check if the requested filter can be used\n- if r_filter not in available_facets:\n- raise OcdApiError('\\'%s\\' is not a valid filter' % r_filter, 400)\n-\n- f_type = available_facets[r_filter].keys()[0]\n- if f_type == 'terms':\n- if 'terms' not in filter_opts:\n- raise OcdApiError(\n- 'Missing \\'filters.%s.terms\\'' % r_filter, 400)\n-\n- if type(filter_opts['terms']) is not list:\n- raise OcdApiError('\\'filters.%s.terms\\' should be an array'\n- % r_filter, 400)\n-\n- # Check the type of each item in the list\n- for term in filter_opts['terms']:\n- if type(term) is not unicode and type(term) is not int:\n- raise OcdApiError('\\'filters.%s.terms\\' should only '\n- 'contain strings and integers'\n- % r_filter, 400)\n-\n- filters.append({\n- 'terms': {\n- available_facets[r_filter]['terms']['field']: filter_opts[\n- 'terms']\n- }\n- })\n- elif f_type == 'date_histogram':\n- if type(filter_opts) is not dict:\n- raise OcdApiError('\\'filters.%s\\' should be an object'\n- % r_filter, 400)\n-\n- field = available_facets[r_filter]['date_histogram']['field']\n- r_filter = {'range': {field: {}}}\n-\n- if 'from' in filter_opts:\n- r_filter['range'][field]['from'] = filter_opts['from']\n-\n- if 'to' in filter_opts:\n- r_filter['range'][field]['to'] = filter_opts['to']\n-\n- filters.append(r_filter)\n-\n- return {\n- 'query': query,\n- 'n_size': n_size,\n- 'n_from': n_from,\n- 'sort': sort,\n- 'order': order,\n- 'facets': facets,\n- 'filters': filters,\n- 'include_fields': include_fields,\n- 'scroll': scroll,\n- 'scroll_id': scroll_id\n- }\n-\n-\n-def format_search_results(results, doc_type='items'):\n- del results['_shards']\n- del results['timed_out']\n-\n- for hit in results['hits']['hits']:\n- # del hit['_index']\n- # del hit['_type']\n- # del hit['_source']['hidden']\n- # kwargs = {\n- # 'object_id': hit['_id'],\n- # 'source_id': hit['_source']['meta']['collection'],\n- # 'doc_type': hit['_type'],\n- # '_external': True\n- # }\n- #hit['_source']['meta']['ocd_url'] = url_for('api.get_object', **kwargs)\n- for key in current_app.config['EXCLUDED_FIELDS_ALWAYS']:\n- try:\n- del hit['_source'][key]\n- except KeyError as e:\n- pass\n-\n- formatted_results = {}\n- for hit in results['hits']['hits']:\n- formatted_results.setdefault(hit['_type'], [])\n- for fld in ['_score', '_type', '_index', 'highlight']:\n- try:\n- hit['_source']['meta'][fld] = hit[fld]\n- except Exception as e:\n- pass\n- formatted_results[hit['_type']].append(hit['_source'])\n- del hit['_type']\n- del hit['_index']\n-\n- if 'aggregations' in results:\n- formatted_results['facets'] = results['aggregations']\n-\n- # we need this to keep the API backwards compatible\n- for f_name, f in formatted_results['facets'].iteritems():\n- f['terms'] = []\n- for b in f['buckets']:\n- if ('key' in b) and ('doc_count' in b):\n- f['terms'].append({\n- 'term': b['key'], 'count': b['doc_count']})\n-\n- formatted_results['meta'] = {\n- 'total': results['hits']['total'],\n- 'took': results['took']\n- }\n-\n- if '_scroll_id' in results:\n- formatted_results['meta']['scroll'] = results['_scroll_id']\n-\n- return formatted_results\n-\n-\n-def validate_included_fields(include_fields, excluded_fields,\n- allowed_to_include):\n- \"\"\"\n- Utility method that determines if the requested fields that the user wants\n- to see included may actually be included.\n-\n- :param include_fields: Fields requested to be included\n- :param excluded_fields: Fields that are excluded by default\n- :param allowed_to_include: Fields that the user is allowed include\n- :return:\n- \"\"\"\n- for field in include_fields:\n- if field and field in excluded_fields and field in allowed_to_include:\n- excluded_fields.remove(field)\n- return excluded_fields\n-\n-\n-def format_sources_results(results):\n- sources = []\n-\n- for bucket in results['aggregations']['index']['buckets']:\n- source = {d['key']: d['doc_count']\n- for d in bucket['doc_type']['buckets']}\n- source['id'] = u'_'.join(bucket['key'].split('_')[1:-1])\n- sources.append(source)\n-\n- return {\n- 'sources': sources\n- }\n-\n-\n-# Retrieve the indices/sources and the total number of documents per\n-# type (counting only documents which are not hidden!)\n-@bp.route('/sources', methods=['GET'])\n-def list_sources():\n- es_q = {\n- 'query': {\n- 'bool': {\n- 'must_not': {\n- 'terms': {'_type': ['url', 'search']}\n- }\n- }\n- },\n- 'aggregations': {\n- 'index': {\n- 'terms': {\n- 'field': '_index'\n- },\n- 'aggregations': {\n- 'doc_type': {\n- 'terms': {\n- 'field': '_type'\n- }\n- }\n- }\n- }\n- },\n- \"size\": 0\n- }\n-\n- es_r = current_app.es.search(body=es_q)\n-\n- # Log a 'sources' event if usage logging is enabled\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='sources',\n- query_time_ms=es_r['took']\n- )\n-\n- return jsonify(format_sources_results(es_r))\n-\n-\n-@bp.route('/search', methods=['POST', 'GET'])\n-@bp.route('/search/', methods=['POST', 'GET'])\n-@decode_json_post_data\n-def search(doc_type=u'items'):\n- data = request.data or request.args\n- search_req = parse_search_request(data, doc_type)\n-\n- excluded_fields = validate_included_fields(\n- include_fields=search_req['include_fields'],\n- excluded_fields=current_app.config['EXCLUDED_FIELDS_SEARCH'],\n- allowed_to_include=current_app.config['ALLOWED_INCLUDE_FIELDS_SEARCH']\n- )\n-\n- # the fields we want to highlight in the Elasticsearch response\n- highlighted_fields = current_app.config['COMMON_HIGHLIGHTS']\n- highlighted_fields.update(\n- current_app.config['AVAILABLE_HIGHLIGHTS'].get(doc_type, {})\n- )\n-\n- # Construct the query we are going to send to Elasticsearch\n- es_q = {\n- 'query': {\n- 'bool': {\n- 'must': {\n- 'simple_query_string': {\n- 'query': search_req['query'],\n- 'default_operator': 'AND',\n- 'fields': current_app.config['SIMPLE_QUERY_FIELDS'].get(doc_type)\n- }\n- },\n- 'filter': {}\n- }\n- },\n- 'aggregations': search_req['facets'],\n- 'size': search_req['n_size'],\n- 'from': search_req['n_from'],\n- 'sort': {\n- search_req['sort']: {'order': search_req['order']}\n- },\n- '_source': {\n- 'excludes': excluded_fields\n- },\n- 'highlight': {\n- 'fields': highlighted_fields\n- }\n- }\n-\n- if not search_req['query']:\n- es_q['query']['bool']['must'] = {'match_all': {}}\n-\n- if search_req['filters']:\n- es_q['query']['bool']['filter'] = search_req['filters']\n-\n- if doc_type != settings.DOC_TYPE_DEFAULT:\n- request_doc_type = doc_type\n- else:\n- request_doc_type = None\n-\n- scroll = search_req['scroll']\n- scroll_id = search_req['scroll_id']\n-\n- if scroll is not None:\n- if scroll_id is None:\n- es_r = current_app.es.search(\n- body=es_q,\n- index='_all',\n- doc_type=request_doc_type, scroll=scroll)\n- scroll_id = es_r['_scroll_id']\n- es_r = current_app.es.get_esclient().scroll(scroll=scroll, scroll_id=scroll_id)\n- else:\n- es_r = current_app.es.search(\n- body=es_q,\n- index='_all',\n- doc_type=request_doc_type)\n-\n- # Log a 'search' event if usage logging is enabled\n- hit_log = []\n- for hit in es_r['hits']['hits']:\n- hit_log.append({\n- # 'source_id': hit['_source']['meta']['source_id'],\n- 'object_id': hit['_id'],\n- 'score': hit['_score']\n- })\n-\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='search',\n- doc_type=doc_type,\n- query=search_req,\n- hits=hit_log,\n- n_total_hits=es_r['hits']['total'],\n- query_time_ms=es_r['took']\n- )\n-\n- return jsonify(format_search_results(es_r, doc_type))\n-\n-\n-@bp.route('//search', methods=['POST', 'GET'])\n-@bp.route('///search', methods=['POST', 'GET'])\n-@decode_json_post_data\n-def search_source(source_id, doc_type=u'items'):\n- # Disallow searching in multiple indexes by providing a wildcard\n- if '*' in source_id:\n- raise OcdApiError('Invalid \\'source_id\\'', 400)\n-\n- index_name = '%s_%s' % (\n- current_app.config['DEFAULT_INDEX_PREFIX'], source_id)\n-\n- data = request.data or request.args\n- search_req = parse_search_request(data, doc_type)\n-\n- excluded_fields = validate_included_fields(\n- include_fields=search_req['include_fields'],\n- excluded_fields=current_app.config['EXCLUDED_FIELDS_DEFAULT'],\n- allowed_to_include=current_app.config['ALLOWED_INCLUDE_FIELDS_DEFAULT']\n- )\n-\n- # Construct the query we are going to send to Elasticsearch\n- es_q = {\n- 'query': {\n- 'bool': {\n- 'must': {\n- 'simple_query_string': {\n- 'query': search_req['query'],\n- 'default_operator': 'AND',\n- 'fields': current_app.config['SIMPLE_QUERY_FIELDS'].get(doc_type),\n- # 'fields': [\n- # 'title^3',\n- # 'authors^2',\n- # 'description^2',\n- # 'meta.original_object_id',\n- # 'all_text'\n- # ]\n- }\n- },\n- 'filter': {}\n- }\n- },\n- 'aggregations': search_req['facets'],\n- 'size': search_req['n_size'],\n- 'from': search_req['n_from'],\n- 'sort': {\n- search_req['sort']: {'order': search_req['order']}\n- },\n- '_source': {\n- 'excludes': excluded_fields\n- }\n- }\n-\n- if not search_req['query']:\n- es_q['query']['bool']['must'] = {'match_all': {}}\n-\n- if search_req['filters']:\n- es_q['query']['bool']['filter'] = search_req['filters']\n-\n- if doc_type != settings.DOC_TYPE_DEFAULT:\n- request_doc_type = doc_type\n- else:\n- request_doc_type = None\n-\n- try:\n- es_r = current_app.es.search(\n- body=es_q, index=index_name, doc_type=request_doc_type)\n- except NotFoundError:\n- raise OcdApiError('Source \\'%s\\' does not exist' % source_id, 404)\n-\n- # Log a 'search' event if usage logging is enabled\n- hit_log = []\n- for hit in es_r['hits']['hits']:\n- hit_log.append({\n- # 'source_id': hit['_source']['meta']['source_id'],\n- 'object_id': hit['_id'],\n- 'score': hit['_score']\n- })\n-\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='search',\n- source_id=source_id,\n- doc_type=doc_type,\n- query=search_req,\n- hits=hit_log,\n- n_total_hits=es_r['hits']['total'],\n- query_time_ms=es_r['took']\n- )\n-\n- return jsonify(format_search_results(es_r, doc_type))\n-\n-\n-@bp.route('//', methods=['GET'])\n-@bp.route('///', methods=['GET'])\n-def get_object(source_id, object_id, doc_type=u'items'):\n- index_name = '%s_%s' % (current_app.config['DEFAULT_INDEX_PREFIX'],\n- source_id)\n-\n- include_fields = [f.strip() for f in\n- request.args.get('include_fields', '').split(',') if\n- f.strip()]\n-\n- excluded_fields = validate_included_fields(\n- include_fields=include_fields,\n- excluded_fields=current_app.config['EXCLUDED_FIELDS_DEFAULT'],\n- allowed_to_include=current_app.config['ALLOWED_INCLUDE_FIELDS_DEFAULT']\n- )\n-\n- try:\n- obj = current_app.es.get(index=index_name, id=object_id,\n- doc_type=doc_type,\n- _source_exclude=excluded_fields)\n- except NotFoundError, e:\n- if e.error.startswith('IndexMissingException'):\n- message = 'Source \\'%s\\' does not exist' % source_id\n- else:\n- message = 'Document not found.'\n-\n- raise OcdApiError(message, 404)\n-\n- # Log a 'get_object' event if usage logging is enabled\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='get_object',\n- source_id=source_id,\n- doc_type=doc_type,\n- object_id=object_id\n- )\n-\n- for key in current_app.config['EXCLUDED_FIELDS_ALWAYS']:\n- try:\n- del obj['_source'][key]\n- except KeyError as e:\n- pass\n-\n- return jsonify(obj['_source'])\n-\n-\n-@bp.route('///source')\n-@bp.route('////source')\n-def get_object_source(source_id, object_id, doc_type=u'items'):\n- index_name = '%s_%s' % (current_app.config['DEFAULT_INDEX_PREFIX'],\n- source_id)\n-\n- try:\n- obj = current_app.es.get(index=index_name, id=object_id,\n- doc_type=doc_type,\n- _source_include=['source_data'])\n- except NotFoundError, e:\n- if e.error.startswith('IndexMissingException'):\n- message = 'Source \\'%s\\' does not exist' % source_id\n- else:\n- message = 'Document not found.'\n-\n- raise OcdApiError(message, 404)\n-\n- resp = current_app.make_response(obj['_source']['source_data']['data'])\n- resp.mimetype = obj['_source']['source_data']['content_type']\n-\n- # Log a 'get_object_source' event if usage logging is enabled\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='get_object_source',\n- source_id=source_id,\n- doc_type=doc_type,\n- object_id=object_id\n- )\n-\n- return resp\n-\n-\n-@bp.route('///stats')\n-@bp.route('////stats')\n-def get_object_stats(source_id, object_id, doc_type=u'items'):\n- index_name = '%s_%s' % (current_app.config['DEFAULT_INDEX_PREFIX'],\n- source_id)\n-\n- object_exists = current_app.es.exists(index=index_name, doc_type=doc_type,\n- id=object_id)\n- if not object_exists:\n- raise OcdApiError('Document or source not found.', 404)\n-\n- queries = [\n- (\n- 'n_appeared_in_search_results',\n- 'search',\n- {\n- \"query\": {\n- \"constant_score\": {\n- \"filter\": {\n- \"term\": {\n- \"event_properties.hits.object_id\": object_id\n- }\n- }\n- }\n- }\n- }\n- ),\n- (\n- 'n_appeared_in_similar_results',\n- 'similar',\n- {\n- \"query\": {\n- \"constant_score\": {\n- \"filter\": {\n- \"term\": {\n- \"event_properties.hits.object_id\": object_id\n- }\n- }\n- }\n- }\n- }\n- ),\n- (\n- 'n_get',\n- 'get_object',\n- {\n- \"query\": {\n- \"constant_score\": {\n- \"filter\": {\n- \"term\": {\n- \"event_properties.object_id\": object_id\n- }\n- }\n- }\n- }\n- }\n- ),\n- (\n- 'n_get_source',\n- 'get_object_source',\n- {\n- \"query\": {\n- \"constant_score\": {\n- \"filter\": {\n- \"term\": {\n- \"event_properties.object_id\": object_id\n- }\n- }\n- }\n- }\n- }\n- )\n- ]\n-\n- search_body = []\n-\n- for query in queries:\n- search_body.append({\n- 'index': current_app.config['USAGE_LOGGING_INDEX'],\n- 'type': query[1],\n- 'size': 0\n- })\n- search_body.append(query[2])\n-\n- es_r = current_app.es.msearch(search_body)\n-\n- stats = {}\n- for query_i, result in enumerate(es_r['responses']):\n- stats[queries[query_i][0]] = result['hits']['total']\n-\n- return jsonify(stats)\n-\n-\n-@bp.route('//similar/', methods=['POST'])\n-@bp.route('/similar/', methods=['POST'])\n-@bp.route('///similar/', methods=['POST'])\n-@bp.route('/similar//', methods=['POST'])\n-@decode_json_post_data\n-def similar(object_id, source_id=None, doc_type=u'items'):\n- search_params = parse_search_request(request.data, doc_type, mlt=True)\n- # not relevant, as mlt already creates the query for us\n- search_params.pop('query')\n-\n- if source_id:\n- index_name = '%s_%s' % (current_app.config['DEFAULT_INDEX_PREFIX'],\n- source_id)\n- else:\n- index_name = '_all'\n-\n- excluded_fields = validate_included_fields(\n- include_fields=search_params['include_fields'],\n- excluded_fields=current_app.config['EXCLUDED_FIELDS_DEFAULT'],\n- allowed_to_include=current_app.config['ALLOWED_INCLUDE_FIELDS_DEFAULT']\n- )\n-\n- # FIXME: should do here something with the fields ...\n- es_q = {\n- 'query': {\n- 'bool': {\n- 'must': {\n- 'more_like_this': {\n- 'docs': [{\n- '_index': index_name,\n- '_type': doc_type,\n- '_id': object_id\n- }],\n- 'fields': [\n- 'title',\n- 'authors',\n- 'description',\n- 'meta.original_object_id',\n- 'all_text'\n- ]\n- }\n- },\n- 'filter': {}\n- }\n- },\n- 'aggregations': search_params['facets'],\n- 'size': search_params['n_size'],\n- 'from': search_params['n_from'],\n- 'sort': {\n- search_params['sort']: {'order': search_params['order']}\n- },\n- '_source': {\n- 'excludes': excluded_fields\n- }\n- }\n-\n- if search_params['filters']:\n- es_q['query']['bool']['filter'] = search_params['filters']\n-\n- try:\n- es_r = current_app.es.search(body=es_q, index=index_name,\n- _source_exclude=excluded_fields)\n- except NotFoundError:\n- raise OcdApiError('Source \\'%s\\' does not exist' % source_id, 404)\n-\n- # Log a 'search_similar' event if usage logging is enabled\n- hit_log = []\n- for hit in es_r['hits']['hits']:\n- hit_log.append({\n- 'source_id': hit['_source']['meta']['source_id'],\n- 'object_id': hit['_id'],\n- 'score': hit['_score']\n- })\n-\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='search_similar',\n- similar_to_source_id=source_id,\n- similar_to_object_id=object_id,\n- doc_type=doc_type,\n- query=search_params,\n- hits=hit_log,\n- n_total_hits=es_r['hits']['total'],\n- query_time_ms=es_r['took']\n- )\n-\n- return jsonify(format_search_results(es_r, doc_type))\n-\n-\n-@bp.route('/resolve/', methods=['GET'])\n-def resolve(url_id):\n- try:\n- resp = current_app.es.get(\n- index=current_app.config['RESOLVER_URL_INDEX'],\n- doc_type='url', id=url_id)\n-\n- file_hash = sha1(resp['_source']['original_url']).hexdigest()\n-\n- if not os.path.exists(settings.DATA_DIR_PATH):\n- raise OSError('DATA_DIR_PATH does not exist: %s' % settings.DATA_DIR_PATH)\n-\n- path = os.path.join(settings.DATA_DIR_PATH, file_hash)\n- if os.path.exists(path):\n- # Log a 'resolve_filepath' event if usage logging is enabled\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='resolve_filepath',\n- url_id=url_id,\n- )\n- return send_file(\n- path,\n- mimetype=resp['_source'].get('content_type'),\n- attachment_filename=resp['_source'].get('file_name',\n- file_hash),\n- )\n-\n- # Log a 'resolve' event if usage logging is enabled\n- tasks.log_event(\n- user_agent=request.user_agent.string,\n- referer=request.headers.get('Referer', None),\n- user_ip=request.remote_addr,\n- created_at=datetime.utcnow(),\n- event_type='resolve_redirect',\n- url_id=url_id,\n- )\n- return redirect(resp['_source']['original_url'])\n-\n- except NotFoundError:\n- if request.mimetype == 'application/json':\n- raise OcdApiError('URL is not available; the source may no longer '\n- 'be available', 404)\n-\n- return 'There is no original url available. You may ' \\\n- 'have an outdated URL, or the resolve id is incorrect.' \\\n- '', 404\n-\n-\n-@bp.route('/dumps', methods=['GET'])\n-def list_dumps():\n- dump_list = glob.glob('%s/*/*.gz' % current_app.config.get('DUMPS_DIR'))\n- dumps = {}\n-\n- for dump in dump_list:\n- index_name, dump_file = dump.replace(\n- '%s/' % current_app.config.get('DUMPS_DIR'), '').split('/')\n- if index_name not in dumps:\n- dumps[index_name] = []\n- dumps[index_name].append(urljoin(current_app.config['DUMP_URL'],\n- dump_file))\n-\n- return jsonify({'dumps': dumps})\ndiff --git a/ocd_frontend/wsgi.py b/ocd_frontend/wsgi.py\ndeleted file mode 100644\n--- a/ocd_frontend/wsgi.py\n+++ /dev/null\n@@ -1,33 +0,0 @@\n-import os.path\n-\n-from werkzeug.serving import run_simple\n-from werkzeug.wsgi import DispatcherMiddleware\n-\n-import rest\n-\n-application = DispatcherMiddleware(rest.create_app(), {\n- '/v1': rest.create_app()\n-})\n-\n-# For testing purposes, add a route that serves static files from a directory.\n-# DO NOT USE IN PRODUCTION. Serve static files through your webserver instead.\n-if application.app.config.get('DEBUG', False):\n- from flask import send_from_directory\n-\n-\n- @application.app.route('/data/')\n- def download_dump(filename):\n- collection_name = '_'.join(filename.split('_')[:2])\n- base_dir = os.path.join(application.app.config.get('DUMPS_DIR'),\n- collection_name)\n- return send_from_directory(base_dir, filename, as_attachment=True)\n-\n-\n- @application.app.route('/media/')\n- def serve_media(filename):\n- base_dir = os.path.join(application.app.config.get('THUMBNAILS_DIR'),\n- os.path.dirname(filename))\n- return send_from_directory(base_dir, os.path.basename(filename))\n-\n-if __name__ == '__main__':\n- run_simple('0.0.0.0', 5000, application, processes=8, use_reloader=True, use_debugger=True)\ndiff --git a/scripts/generate.py b/scripts/generate.py\n--- a/scripts/generate.py\n+++ b/scripts/generate.py\n@@ -27,32 +27,26 @@ def decorator(f):\n def _generate_for_organisations(name, almanak):\n organisations = [{\n \"id\": \"%s_municipality\" % (name.lower(),),\n- \"extractor\": \"ocd_backend.extractors.odata.ODataExtractor\",\n+ \"extractor\": \"ocd_backend.extractors.allmanak.AllmanakMunicipalityExtractor\",\n \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n- \"item\": \"ocd_backend.items.organisations.MunicipalityOrganisationItem\",\n+ \"item\": \"ocd_backend.items.organizations.MunicipalityOrganizationItem\",\n \"enrichers\": [],\n- \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n- \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+ \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+ \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n \"hidden\": False,\n \"index_name\": name.lower(),\n- \"file_url\": (\n- \"http://dataderden.cbs.nl/ODataApi/OData/45006NED/Gemeenten\"),\n \"doc_type\": \"organizations\",\n- \"filter\": {\n- \"Title\": name.lower()\n- },\n \"keep_index_on_update\": True\n }, {\n \"id\": \"%s_organisations\" % (name.lower(),),\n- \"extractor\": \"ocd_backend.extractors.almanak.OrganisationsExtractor\",\n+ \"extractor\": \"ocd_backend.extractors.allmanak.AllmanakMunicipalityExtractor\",\n \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n- \"item\": \"ocd_backend.items.organisations.AlmanakOrganisationItem\",\n+ \"item\": \"ocd_backend.items.organizations.MunicipalityOrganizationItem\",\n \"enrichers\": [],\n- \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n- \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+ \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+ \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n \"hidden\": False,\n \"index_name\": name.lower(),\n- \"file_url\": almanak,\n \"doc_type\": \"organizations\",\n \"item_xpath\": \"//\",\n \"keep_index_on_update\": True\n@@ -63,12 +57,12 @@ def _generate_for_organisations(name, almanak):\n def _generate_for_persons(name, almanak):\n persons = [{\n \"id\": \"%s_persons\" % (name.lower(),),\n- \"extractor\": \"ocd_backend.extractors.almanak.PersonsExtractor\",\n- \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n- \"item\": \"ocd_backend.items.persons.AlmanakPersonItem\",\n+ \"extractor\": \"ocd_backend.extractors.allmanak.AllmanakPersonsExtractor\",\n+ \"transformer\": \"ocd_backend.transformers.transformer\",\n+ \"item\": \"ocd_backend.items.persons.AllmanakPersonItem\",\n \"enrichers\": [],\n- \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n- \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+ \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+ \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n \"hidden\": False,\n \"index_name\": name.lower(),\n \"file_url\": almanak,\n@@ -83,11 +77,11 @@ def _generate_for_msi(name, almanak):\n sources = [{\n \"id\": \"%s_meetings\" % (name.lower(),),\n \"extractor\": \"ocd_backend.extractors.ibabs.IBabsMeetingsExtractor\",\n- \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n+ \"transformer\": \"ocd_backend.transformers.transformer\",\n \"item\": \"ocd_backend.items.ibabs_meeting.IBabsMeetingItem\",\n \"enrichers\": [],\n- \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n- \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+ \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+ \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n \"hidden\": False,\n \"index_name\": name.lower(),\n \"doc_type\": \"events\",\n@@ -96,11 +90,11 @@ def _generate_for_msi(name, almanak):\n }, {\n \"id\": \"%s_reports\" % (name.lower(),),\n \"extractor\": \"ocd_backend.extractors.ibabs.IBabsReportsExtractor\",\n- \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n+ \"transformer\": \"ocd_backend.transformers.transformer\",\n \"item\": \"ocd_backend.items.ibabs_meeting.IBabsReportItem\",\n \"enrichers\": [],\n- \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n- \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+ \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+ \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n \"hidden\": False,\n \"index_name\": name.lower(),\n \"doc_type\": \"events\",\n@@ -110,11 +104,11 @@ def _generate_for_msi(name, almanak):\n }, {\n \"id\": \"%s_committees\" % (name.lower(),),\n \"extractor\": \"ocd_backend.extractors.ibabs.IBabsCommitteesExtractor\",\n- \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n+ \"transformer\": \"ocd_backend.transformers.transformer\",\n \"item\": \"ocd_backend.items.ibabs_committee.CommitteeItem\",\n \"enrichers\": [],\n- \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n- \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+ \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+ \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n \"hidden\": False,\n \"index_name\": name.lower(),\n \"doc_type\": \"organizations\",\ndiff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -1,2 +1,2 @@\n-__version_info__ = ('1', '2', '0')\n+__version_info__ = ('1', '3', '0')\n __version__ = '.'.join(__version_info__)\n", "test_patch": "diff --git a/bin/run_tests.sh b/bin/run_tests.sh\n--- a/bin/run_tests.sh\n+++ b/bin/run_tests.sh\n@@ -1,4 +1,4 @@\n #!/bin/sh\n \n-#nosetests -l debug --nocapture --with-coverage --cover-package=ocd_backend,ocd_frontend --cover-inclusive\n-nosetests --with-coverage --cover-package=ocd_backend,ocd_frontend --cover-inclusive\n+cd /opt/ori\n+nose2 -v -s tests\ndiff --git a/docker-compose.test.yml b/docker-compose.test.yml\n--- a/docker-compose.test.yml\n+++ b/docker-compose.test.yml\n@@ -5,14 +5,6 @@ services:\n context: .\n dockerfile: ocd_backend/Dockerfile.test\n image: \"openstatefoundation/open-raadsinformatie-backend:${BRANCH_NAME-latest}-test\"\n- frontend:\n- build:\n- context: .\n- dockerfile: ocd_frontend/Dockerfile.test\n- image: \"openstatefoundation/open-raadsinformatie-frontend:${BRANCH_NAME-latest}-test\"\n- neo4j:\n- environment:\n- - \"NEO4J_dbms_active__database=test.db\"\n bootstrap:\n image: appropriate/curl\n volumes:\ndiff --git a/ocd_backend/Dockerfile.test b/ocd_backend/Dockerfile.test\n--- a/ocd_backend/Dockerfile.test\n+++ b/ocd_backend/Dockerfile.test\n@@ -7,7 +7,7 @@ USER root\n RUN apk --update add nano\n \n # Install backend testing dependencies\n-RUN pip install --no-warn-conflicts pylint==1.8.4 mock==1.0.1 nose==1.3.4 coverage==4.5.1\n+RUN pip install --no-warn-conflicts pylint==1.8.4 nose2==0.9.1 coverage==4.5.1\n \n # Copy backend testing files\n COPY .pylintrc /opt/ori/.pylintrc\ndiff --git a/ocd_frontend/Dockerfile.test b/ocd_frontend/Dockerfile.test\ndeleted file mode 100644\n--- a/ocd_frontend/Dockerfile.test\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-FROM openstatefoundation/open-raadsinformatie-frontend\n-MAINTAINER Jurrian Tromp \n-\n-# Change to root for installing\n-USER root\n-\n-RUN apk --update add nano\n-\n-# Install backend testing dependencies\n-RUN pip install pylint==1.8.4 mock==1.0.1 nose==1.3.4 coverage==4.5.1 Flask-Testing==0.4.2\n-\n-# Copy frontend testing files\n-COPY .pylintrc /opt/ori/.pylintrc\n-COPY tests/__init__.py /opt/ori/tests/__init__.py\n-COPY tests/ocd_frontend /opt/ori/tests/ocd_frontend\n-\n-RUN chown -R 1000:1000 tests .pylintrc\n-\n-# Switching back to gunicorn user\n-USER gunicorn\ndiff --git a/tests/ocd_backend/__init__.py b/tests/ocd_backend/__init__.py\n--- a/tests/ocd_backend/__init__.py\n+++ b/tests/ocd_backend/__init__.py\n@@ -1,6 +0,0 @@\n-from .extractors import *\n-from .items import *\n-from .loaders import *\n-from .models import *\n-from .misc import *\n-from .transformers import *\ndiff --git a/tests/ocd_backend/extractors/__init__.py b/tests/ocd_backend/extractors/__init__.py\n--- a/tests/ocd_backend/extractors/__init__.py\n+++ b/tests/ocd_backend/extractors/__init__.py\n@@ -25,9 +25,9 @@ def setUp(self):\n self.source_definition = {\n 'id': 'test_definition',\n 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n- 'transformer': 'ocd_backend.transformers.BaseTransformer',\n+ 'transformer': 'ocd_backend.transformers.transformer',\n 'item': 'ocd_backend.items.LocalDumpItem',\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n+ 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n 'dump_path': dump_path,\n 'index_name': 'test_index',\n 'force_old_files': True,\n@@ -164,10 +164,3 @@ def test_check_path_file_too_small(self):\n \n with self.assertRaises(InvalidFile):\n self.mixin._check_path(self.test_cache_path + \"ab/ab/small_file\")\n-\n-\n-# Import test modules here so the noserunner can pick them up, and the\n-# ExtractorTestCase is parsed. Add additional testcases when required\n-from .staticfile import (\n- StaticfileExtractorTestCase, StaticJSONExtractorTestCase\n-)\ndiff --git a/tests/ocd_backend/extractors/staticfile.py b/tests/ocd_backend/extractors/test_staticfile.py\nsimilarity index 95%\nrename from tests/ocd_backend/extractors/staticfile.py\nrename to tests/ocd_backend/extractors/test_staticfile.py\n--- a/tests/ocd_backend/extractors/staticfile.py\n+++ b/tests/ocd_backend/extractors/test_staticfile.py\n@@ -2,9 +2,7 @@\n import json\n \n from ocd_backend.exceptions import ConfigurationError\n-from ocd_backend.extractors.staticfile import (\n- StaticJSONDumpExtractor, StaticJSONExtractor\n-)\n+from ocd_backend.extractors.staticfile import StaticJSONDumpExtractor, StaticJSONExtractor\n from . import ExtractorTestCase\n \n \ndiff --git a/tests/ocd_backend/items/__init__.py b/tests/ocd_backend/items/__init__.py\n--- a/tests/ocd_backend/items/__init__.py\n+++ b/tests/ocd_backend/items/__init__.py\n@@ -13,17 +13,9 @@ def setUp(self):\n 'extractor': (\n 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor'\n ),\n- 'transformer': 'ocd_backend.transformers.BaseTransformer',\n+ 'transformer': 'ocd_backend.transformers.transformer',\n 'item': 'ocd_backend.items.LocalDumpItem',\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n+ 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n 'dump_path': dump_path,\n 'index_name': 'openbeelden'\n }\n-\n-\n-# Import test modules here so the noserunner can pick them up, and the\n-# ExtractorTestCase is parsed. Add additional testcases when required\n-#from .localdump import LocalDumpItemTestCase\n-#from .go_meeting import MeetingItemTestCase\n-from .notubiz_meeting import NotubizMeetingTestCase\n-#from .go_report import ReportItemTestCase\ndiff --git a/tests/ocd_backend/items/notubiz_meeting.py b/tests/ocd_backend/items/notubiz_meeting.py\ndeleted file mode 100644\n--- a/tests/ocd_backend/items/notubiz_meeting.py\n+++ /dev/null\n@@ -1,154 +0,0 @@\n-import json\n-import os\n-\n-from ocd_backend import celery_app\n-from ocd_backend.items.notubiz_meeting import NotubizMeetingItem\n-from ocd_backend.models import Meeting\n-from ocd_backend.models.database import Neo4jDatabase\n-from ocd_backend.models.serializers import Neo4jSerializer, JsonLDSerializer, JsonSerializer\n-from . import ItemTestCase\n-\n-\n-class NotubizMeetingTestCase(ItemTestCase):\n- def setUp(self):\n- self.PWD = os.path.dirname(__file__)\n- dump_path = os.path.abspath(os.path.join(self.PWD, '../test_dumps/notubiz_meeting_amsterdam.json'))\n-\n- self.source_definition = {\n- 'organisation_id': 281,\n- 'keep_index_on_update': True,\n- 'enrichers': [['ocd_backend.enrichers.media_enricher.static.LocalStaticMediaEnricher', None]],\n- 'cleanup': 'ocd_backend.tasks.CleanupElasticsearch',\n- 'doc_type': 'events',\n- 'sitename': 'Amsterdam',\n- 'municipality': 'Amsterdam',\n- 'id': 'amsterdam_meetings',\n- 'index_name': 'amsterdam',\n- 'base_url': 'https://api.notubiz.nl',\n- 'entity': 'meetings',\n- 'extractor': 'ocd_backend.extractors.notubiz.NotubizMeetingExtractor',\n- 'key': 'amsterdam',\n- 'wait_until_finished': True,\n- 'hidden': False,\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n- 'item': 'ocd_backend.items.notubiz_meeting.Meeting',\n- }\n-\n- self.db = Neo4jDatabase(Neo4jSerializer())\n- self.cleanup_neo4j()\n-\n- celery_app.backend.remove(\"ori_identifier_autoincrement\")\n-\n- with open(dump_path, 'r') as f:\n- self.raw_item = f.read()\n-\n- self.meeting = json.loads(self.raw_item)\n-\n- self.meeting_ins = self._instantiate_meeting()\n-\n- jsonld_serializer = JsonLDSerializer()\n- self.jsonld_data = jsonld_serializer.serialize(self.meeting_ins.object_data)\n-\n- json_serializer = JsonSerializer()\n- self.json_data = json_serializer.serialize(self.meeting_ins.object_data)\n-\n- self.expected_jsonld = {\n- 'ori_identifier': 'https://id.openraadsinformatie.nl/1',\n- 'status': 'https://argu.co/ns/meeting/EventConfirmed',\n- 'name': u'raadscommissie Financi\\xebn',\n- 'classification': [u'Agenda'],\n- 'had_primary_source': u'https://argu.co/voc/mapping/amsterdam/notubiz/identifier/458902',\n- '@type': 'Meeting',\n- 'attachment': ['3', '4'],\n- 'agenda': {\n- '@list': ['5', '6', '7', '8', '9', '10', '12', '14', '20', '22', '23', '24', '25', '26', '27', '30',\n- '33', '34', '37', '40', '41', '42', '43', '46', '51', '52', '55', '58', '61', '64', '69',\n- '72', '76', '77', '80', '81', '85', '88', '92']\n- },\n- '@context': {\n- 'status': {'@id': 'http://schema.org/eventStatus', '@type': '@id'},\n- 'name': {'@id': 'http://schema.org/name'},\n- 'classification': {'@id': 'http://www.semanticdesktop.org/ontologies/2007/04/02/ncal#categories'},\n- 'had_primary_source': {'@id': 'http://www.w3.org/ns/prov#hadPrimarySource'},\n- '@base': 'https://id.openraadsinformatie.nl/',\n- 'attachment': {'@id': 'https://argu.co/ns/meeting/attachment', '@type': '@id'},\n- 'agenda': {'@id': 'https://argu.co/ns/meeting/agenda', '@type': '@id'},\n- 'organization': {'@id': 'http://schema.org/organizer', '@type': '@id'},\n- 'Meeting': 'https://argu.co/ns/meeting/Meeting',\n- 'start_date': {'@id': 'http://schema.org/startDate'},\n- 'committee': {'@id': 'https://argu.co/ns/meeting/committee', '@type': '@id'}\n- },\n- 'organization': '95',\n- 'start_date': '2018-02-08T13:30:00+01:00',\n- 'committee': '93'\n- }\n-\n- self.expected_json = {\n- 'ori_identifier': 'https://id.openraadsinformatie.nl/1',\n- 'status': 'https://argu.co/ns/meeting/EventConfirmed',\n- 'name': u'raadscommissie Financi\\xebn',\n- 'classification': [u'Agenda'],\n- 'had_primary_source': u'https://argu.co/voc/mapping/amsterdam/notubiz/identifier/458902',\n- 'attachment': [\n- 'https://id.openraadsinformatie.nl/3',\n- 'https://id.openraadsinformatie.nl/4'\n- ],\n- 'agenda': ['https://id.openraadsinformatie.nl/5', 'https://id.openraadsinformatie.nl/6',\n- 'https://id.openraadsinformatie.nl/7', 'https://id.openraadsinformatie.nl/8',\n- 'https://id.openraadsinformatie.nl/9', 'https://id.openraadsinformatie.nl/10',\n- 'https://id.openraadsinformatie.nl/12', 'https://id.openraadsinformatie.nl/14',\n- 'https://id.openraadsinformatie.nl/20', 'https://id.openraadsinformatie.nl/22',\n- 'https://id.openraadsinformatie.nl/23', 'https://id.openraadsinformatie.nl/24',\n- 'https://id.openraadsinformatie.nl/25', 'https://id.openraadsinformatie.nl/26',\n- 'https://id.openraadsinformatie.nl/27', 'https://id.openraadsinformatie.nl/30',\n- 'https://id.openraadsinformatie.nl/33', 'https://id.openraadsinformatie.nl/34',\n- 'https://id.openraadsinformatie.nl/37', 'https://id.openraadsinformatie.nl/40',\n- 'https://id.openraadsinformatie.nl/41', 'https://id.openraadsinformatie.nl/42',\n- 'https://id.openraadsinformatie.nl/43', 'https://id.openraadsinformatie.nl/46',\n- 'https://id.openraadsinformatie.nl/51', 'https://id.openraadsinformatie.nl/52',\n- 'https://id.openraadsinformatie.nl/55', 'https://id.openraadsinformatie.nl/58',\n- 'https://id.openraadsinformatie.nl/61', 'https://id.openraadsinformatie.nl/64',\n- 'https://id.openraadsinformatie.nl/69', 'https://id.openraadsinformatie.nl/72',\n- 'https://id.openraadsinformatie.nl/76', 'https://id.openraadsinformatie.nl/77',\n- 'https://id.openraadsinformatie.nl/80', 'https://id.openraadsinformatie.nl/81',\n- 'https://id.openraadsinformatie.nl/85', 'https://id.openraadsinformatie.nl/88',\n- 'https://id.openraadsinformatie.nl/92'],\n- 'organization': 'https://id.openraadsinformatie.nl/95',\n- 'start_date': '2018-02-08T13:30:00+01:00',\n- 'committee': 'https://id.openraadsinformatie.nl/93'\n- }\n-\n- self.rights = u'undefined' # for now ...\n- self.collection = u'amsterdam'\n-\n- def tearDown(self):\n- self.cleanup_neo4j()\n-\n- def cleanup_neo4j(self):\n- self.db.query('MATCH (n) DETACH DELETE n')\n-\n- def _instantiate_meeting(self):\n- \"\"\"\n- Instantiate the item from the raw and parsed item we have\n- \"\"\"\n- meeting = NotubizMeetingItem(self.source_definition, 'application/json', self.raw_item, self.meeting, None)\n- return meeting\n-\n- def test_meeting_get_ori_id(self):\n- self.assertEqual('https://id.openraadsinformatie.nl/1', self.meeting_ins.object_data.get_ori_identifier())\n-\n- def test_meeting_get_rights(self):\n- item = self._instantiate_meeting()\n- self.assertEqual(self.rights, item.get_rights())\n-\n- def test_meeting_get_collection(self):\n- item = self._instantiate_meeting()\n- self.assertEqual(self.collection, item.get_collection())\n-\n- def test_meeting_json(self):\n- for name, _ in Meeting.definitions(props=True, rels=True):\n- self.assertEqual(self.expected_json.get(name), self.json_data.get(name))\n-\n- def test_meeting_jsonld(self):\n- for name, _ in Meeting.definitions(props=True, rels=True):\n- self.assertEqual(self.expected_jsonld.get(name), self.jsonld_data.get(name))\ndiff --git a/tests/ocd_backend/items/go_meeting.py b/tests/ocd_backend/items/test_goapi_meeting.py\nsimilarity index 94%\nrename from tests/ocd_backend/items/go_meeting.py\nrename to tests/ocd_backend/items/test_goapi_meeting.py\n--- a/tests/ocd_backend/items/go_meeting.py\n+++ b/tests/ocd_backend/items/test_goapi_meeting.py\n@@ -23,9 +23,9 @@\n # self.source_definition = {\n # 'id': 'test_definition',\n # 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n-# 'transformer': 'ocd_backend.transformers.BaseTransformer',\n+# 'transformer': 'ocd_backend.transformers.transformer',\n # 'item': 'ocd_backend.items.go_meeting.MeetingItem',\n-# 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n+# 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n # 'hidden': False,\n # 'index_name': 'den_helder',\n # 'base_url': 'https://gemeenteraad.denhelder.nl'\n@@ -213,22 +213,6 @@\n # # self.assertDictEqual(\n # # item.get_original_object_urls(), self.meeting_item_object_urls)\n # #\n-# # def test_meeting_get_rights(self):\n-# # item = self._instantiate_meeting()\n-# # self.assertEqual(item.get_rights(), self.rights)\n-# #\n-# # def test_meeting_item_get_rights(self):\n-# # item = self._instantiate_meeting_item()\n-# # self.assertEqual(item.get_rights(), self.rights)\n-# #\n-# # def test_meeting_get_collection(self):\n-# # item = self._instantiate_meeting()\n-# # self.assertEqual(item.get_collection(), self.collection)\n-# #\n-# # def test_meeting_item_get_collection(self):\n-# # item = self._instantiate_meeting_item()\n-# # self.assertEqual(item.get_collection(), self.collection)\n-# #\n # # def test_meeting_name(self):\n # # item = self._instantiate_meeting()\n # # data = item.get_object_model()\ndiff --git a/tests/ocd_backend/items/go_report.py b/tests/ocd_backend/items/test_goapi_report.py\nsimilarity index 92%\nrename from tests/ocd_backend/items/go_report.py\nrename to tests/ocd_backend/items/test_goapi_report.py\n--- a/tests/ocd_backend/items/go_report.py\n+++ b/tests/ocd_backend/items/test_goapi_report.py\n@@ -19,11 +19,11 @@\n # self.source_definition = {\n # \"id\": \"den_helder_resolutions\",\n # \"extractor\": \"ocd_backend.extractors.go.GemeenteOplossingenResolutionsExtractor\",\n-# \"transformer\": \"ocd_backend.transformers.BaseTransformer\",\n+# \"transformer\": \"ocd_backend.transformers.transformer\",\n # \"item\": \"ocd_backend.items.go_meeting.ReportItem\",\n # \"enrichers\": [],\n-# \"loader\": \"ocd_backend.loaders.ElasticsearchLoader\",\n-# \"cleanup\": \"ocd_backend.tasks.CleanupElasticsearch\",\n+# \"loader\": \"ocd_backend.loaders.elasticsearch.elasticsearch_loader\",\n+# \"cleanup\": \"ocd_backend.tasks.cleanup_elasticsearch\",\n # \"hidden\": False,\n # \"index_name\": \"den_helder\",\n # \"doc_type\": \"events\",\n@@ -119,14 +119,6 @@\n # self.assertDictEqual(\n # item.get_original_object_urls(), self.meeting_object_urls)\n #\n-# def test_meeting_get_rights(self):\n-# item = self._instantiate_meeting()\n-# self.assertEqual(item.get_rights(), self.rights)\n-#\n-# def test_meeting_get_collection(self):\n-# item = self._instantiate_meeting()\n-# self.assertEqual(item.get_collection(), self.collection)\n-#\n # def test_meeting_name(self):\n # item = self._instantiate_meeting()\n # data = item.get_object_model()\ndiff --git a/tests/ocd_backend/items/localdump.py b/tests/ocd_backend/items/test_localdump.py\nsimilarity index 83%\nrename from tests/ocd_backend/items/localdump.py\nrename to tests/ocd_backend/items/test_localdump.py\n--- a/tests/ocd_backend/items/localdump.py\n+++ b/tests/ocd_backend/items/test_localdump.py\n@@ -14,9 +14,9 @@\n # self.source_definition = {\n # 'id': 'test_definition',\n # 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n-# 'transformer': 'ocd_backend.transformers.BaseTransformer',\n+# 'transformer': 'ocd_backend.transformers.transformer',\n # 'item': 'ocd_backend.items.LocalDumpItem',\n-# 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n+# 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n # 'dump_path': dump_path,\n # 'index_name': 'openbeelden'\n # }\n@@ -35,17 +35,6 @@\n # u'html': u'http://openbeelden.nl/media/749181/'\n # }\n #\n-#\n-# def test_item_collection(self):\n-# item = LocalDumpItem(self.source_definition, 'application/json',\n-# self.raw_item, self.item, None)\n-# self.assertEqual(item.get_collection(), self.collection)\n-#\n-# def test_get_rights(self):\n-# item = LocalDumpItem(self.source_definition, 'application/json',\n-# self.raw_item, self.item, None)\n-# self.assertEqual(item.get_rights(), self.rights)\n-#\n # def test_get_original_object_id(self):\n # item = LocalDumpItem(self.source_definition, 'application/json',\n # self.raw_item, self.item, None)\ndiff --git a/tests/ocd_backend/items/test_notubiz_meeting.py b/tests/ocd_backend/items/test_notubiz_meeting.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/ocd_backend/items/test_notubiz_meeting.py\n@@ -0,0 +1,169 @@\n+# TODO: Rewrite for Postgres\n+\n+# import json\n+# import os\n+# \n+# from ocd_backend import celery_app\n+# from ocd_backend.items.notubiz_meeting import NotubizMeetingItem\n+# from ocd_backend.models import Meeting\n+# from ocd_backend.models.serializers import JsonLDSerializer, JsonSerializer\n+# from . import ItemTestCase\n+#\n+#\n+# class NotubizMeetingTestCase(ItemTestCase):\n+# def setUp(self):\n+# self.PWD = os.path.dirname(__file__)\n+# dump_path = os.path.abspath(os.path.join(self.PWD, '../test_dumps/notubiz_meeting_amsterdam.json'))\n+#\n+# self.source_definition = {\n+# 'organisation_id': 281,\n+# 'keep_index_on_update': True,\n+# 'enrichers': [['ocd_backend.enrichers.media_enricher.static.local_static_media_enricher', None]],\n+# 'cleanup': 'ocd_backend.tasks.cleanup_elasticsearch',\n+# 'doc_type': 'events',\n+# 'sitename': 'Amsterdam',\n+# 'municipality': 'Amsterdam',\n+# 'id': 'amsterdam_meetings',\n+# 'index_name': 'amsterdam',\n+# 'base_url': 'https://api.notubiz.nl',\n+# 'entity': 'meetings',\n+# 'extractor': 'ocd_backend.extractors.notubiz.NotubizMeetingExtractor',\n+# 'key': 'amsterdam',\n+# 'wait_until_finished': True,\n+# 'hidden': False,\n+# 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n+# 'item': 'ocd_backend.items.notubiz_meeting.Meeting',\n+# }\n+#\n+# self.db = Neo4jDatabase(Neo4jSerializer())\n+# self.cleanup_neo4j()\n+#\n+# celery_app.backend.remove(\"ori_identifier_autoincrement\")\n+#\n+# with open(dump_path, 'r') as f:\n+# self.raw_item = f.read()\n+#\n+# self.meeting = json.loads(self.raw_item)\n+#\n+# self.meeting_ins = self._instantiate_meeting()\n+#\n+# jsonld_serializer = JsonLDSerializer()\n+# self.jsonld_data = jsonld_serializer.serialize(self.meeting_ins.object_data)\n+#\n+# json_serializer = JsonSerializer()\n+# self.json_data = json_serializer.serialize(self.meeting_ins.object_data)\n+#\n+# self.expected_jsonld = {\n+# 'ori_identifier': 'https://id.openraadsinformatie.nl/1',\n+# 'status': 'https://argu.co/ns/meeting/EventConfirmed',\n+# 'name': u'raadscommissie Financi\\xebn',\n+# 'has_organization_name': '3',\n+# 'classification': [u'Agenda'],\n+# 'had_primary_source': u'https://argu.co/voc/mapping/amsterdam/notubiz/identifier/458902',\n+# '@type': 'Meeting',\n+# 'attachment': ['4', '5'],\n+# 'agenda': {\n+# '@list': ['6', '7', '8', '9', '10', '11', '13', '15', '21', '23', '24', '25', '26', '27', '28',\n+# '31', '34', '35', '38', '41', '42', '43', '44', '47', '52', '53', '56', '59', '62', '65',\n+# '70', '73', '77', '78', '81', '82', '86', '89', '93']\n+# },\n+# '@context': {\n+# 'status': {'@id': 'http://schema.org/eventStatus', '@type': '@id'},\n+# 'name': {'@id': 'http://schema.org/name'},\n+# 'has_organization_name': {'@id': 'http://www.w3.org/2006/vcard/ns#hasOrganizationName', '@type': '@id'},\n+# 'classification': {'@id': 'http://www.semanticdesktop.org/ontologies/2007/04/02/ncal#categories'},\n+# 'had_primary_source': {'@id': 'http://www.w3.org/ns/prov#hadPrimarySource'},\n+# '@base': 'https://id.openraadsinformatie.nl/',\n+# 'attachment': {'@id': 'https://argu.co/ns/meeting/attachment', '@type': '@id'},\n+# 'agenda': {'@id': 'https://argu.co/ns/meeting/agenda', '@type': '@id'},\n+# 'organization': {'@id': 'http://schema.org/organizer', '@type': '@id'},\n+# 'Meeting': 'https://argu.co/ns/meeting/Meeting',\n+# 'start_date': {'@id': 'http://schema.org/startDate'},\n+# 'committee': {'@id': 'https://argu.co/ns/meeting/committee', '@type': '@id'}\n+# },\n+# 'organization': '3',\n+# 'start_date': '2018-02-08T13:30:00+01:00',\n+# 'committee': '94'\n+# }\n+#\n+# self.expected_json = {\n+# 'ori_identifier': 'https://id.openraadsinformatie.nl/1',\n+# 'status': 'https://argu.co/ns/meeting/EventConfirmed',\n+# 'name': u'raadscommissie Financi\\xebn',\n+# 'has_organization_name': 'https://id.openraadsinformatie.nl/3',\n+# 'classification': [u'Agenda'],\n+# 'had_primary_source': u'https://argu.co/voc/mapping/amsterdam/notubiz/identifier/458902',\n+# 'attachment': [\n+# 'https://id.openraadsinformatie.nl/4',\n+# 'https://id.openraadsinformatie.nl/5'\n+# ],\n+# 'agenda': ['https://id.openraadsinformatie.nl/6',\n+# 'https://id.openraadsinformatie.nl/7',\n+# 'https://id.openraadsinformatie.nl/8',\n+# 'https://id.openraadsinformatie.nl/9',\n+# 'https://id.openraadsinformatie.nl/10',\n+# 'https://id.openraadsinformatie.nl/11',\n+# 'https://id.openraadsinformatie.nl/13',\n+# 'https://id.openraadsinformatie.nl/15',\n+# 'https://id.openraadsinformatie.nl/21',\n+# 'https://id.openraadsinformatie.nl/23',\n+# 'https://id.openraadsinformatie.nl/24',\n+# 'https://id.openraadsinformatie.nl/25',\n+# 'https://id.openraadsinformatie.nl/26',\n+# 'https://id.openraadsinformatie.nl/27',\n+# 'https://id.openraadsinformatie.nl/28',\n+# 'https://id.openraadsinformatie.nl/31',\n+# 'https://id.openraadsinformatie.nl/34',\n+# 'https://id.openraadsinformatie.nl/35',\n+# 'https://id.openraadsinformatie.nl/38',\n+# 'https://id.openraadsinformatie.nl/41',\n+# 'https://id.openraadsinformatie.nl/42',\n+# 'https://id.openraadsinformatie.nl/43',\n+# 'https://id.openraadsinformatie.nl/44',\n+# 'https://id.openraadsinformatie.nl/47',\n+# 'https://id.openraadsinformatie.nl/52',\n+# 'https://id.openraadsinformatie.nl/53',\n+# 'https://id.openraadsinformatie.nl/56',\n+# 'https://id.openraadsinformatie.nl/59',\n+# 'https://id.openraadsinformatie.nl/62',\n+# 'https://id.openraadsinformatie.nl/65',\n+# 'https://id.openraadsinformatie.nl/70',\n+# 'https://id.openraadsinformatie.nl/73',\n+# 'https://id.openraadsinformatie.nl/77',\n+# 'https://id.openraadsinformatie.nl/78',\n+# 'https://id.openraadsinformatie.nl/81',\n+# 'https://id.openraadsinformatie.nl/82',\n+# 'https://id.openraadsinformatie.nl/86',\n+# 'https://id.openraadsinformatie.nl/89',\n+# 'https://id.openraadsinformatie.nl/93'],\n+# 'organization': 'https://id.openraadsinformatie.nl/3',\n+# 'start_date': '2018-02-08T13:30:00+01:00',\n+# 'committee': 'https://id.openraadsinformatie.nl/94'\n+# }\n+#\n+# self.rights = u'undefined' # for now ...\n+# self.collection = u'amsterdam'\n+#\n+# def tearDown(self):\n+# self.cleanup_neo4j()\n+#\n+# def cleanup_neo4j(self):\n+# self.db.query('MATCH (n) DETACH DELETE n')\n+#\n+# def _instantiate_meeting(self):\n+# \"\"\"\n+# Instantiate the item from the raw and parsed item we have\n+# \"\"\"\n+# meeting = NotubizMeetingItem(self.source_definition, 'application/json', self.raw_item, self.meeting, None)\n+# return meeting\n+#\n+# def test_meeting_get_ori_id(self):\n+# self.assertEqual('https://id.openraadsinformatie.nl/1', self.meeting_ins.object_data.get_ori_identifier())\n+#\n+# def test_meeting_json(self):\n+# for name, _ in Meeting.definitions(props=True, rels=True):\n+# self.assertEqual(self.expected_json.get(name), self.json_data.get(name))\n+#\n+# def test_meeting_jsonld(self):\n+# for name, _ in Meeting.definitions(props=True, rels=True):\n+# self.assertEqual(self.expected_jsonld.get(name), self.jsonld_data.get(name))\ndiff --git a/tests/ocd_backend/loaders/__init__.py b/tests/ocd_backend/loaders/__init__.py\n--- a/tests/ocd_backend/loaders/__init__.py\n+++ b/tests/ocd_backend/loaders/__init__.py\n@@ -9,14 +9,9 @@ def setUp(self):\n self.source_definition = {\n 'id': 'test_definition',\n 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n- 'transformer': 'ocd_backend.transformers.BaseTransformer',\n+ 'transformer': 'ocd_backend.transformers.transformer',\n 'item': 'ocd_backend.items.LocalDumpItem',\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n+ 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n 'dump_path': dump_path,\n 'index_name': 'openbeelden'\n }\n-\n-\n-# Import test modules here so the noserunner can pick them up, and the\n-# ExtractorTestCase is parsed. Add additional testcases when required\n-from .es_loader import ESLoaderTestCase\ndiff --git a/tests/ocd_backend/loaders/es_loader.py b/tests/ocd_backend/loaders/test_elasticsearch_loader.py\nsimilarity index 82%\nrename from tests/ocd_backend/loaders/es_loader.py\nrename to tests/ocd_backend/loaders/test_elasticsearch_loader.py\n--- a/tests/ocd_backend/loaders/es_loader.py\n+++ b/tests/ocd_backend/loaders/test_elasticsearch_loader.py\n@@ -2,7 +2,7 @@\n import os.path\n \n from ocd_backend.exceptions import ConfigurationError\n-from ocd_backend.loaders import ElasticsearchLoader\n+from ocd_backend.loaders.elasticsearch import elasticsearch_loader\n from . import LoaderTestCase\n \n \n@@ -23,12 +23,12 @@ def setUp(self):\n self.source_definition = {\n 'id': 'test_definition',\n 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n- 'transformer': 'ocd_backend.transformers.BaseTransformer',\n+ 'transformer': 'ocd_backend.transformers.transformer',\n 'item': 'ocd_backend.items.LocalDumpItem',\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n+ 'loader': 'ocd_backend.loaders.elasticsearch.elasticsearch_loader',\n 'dump_path': dump_path\n }\n- self.loader = ElasticsearchLoader()\n+ self.loader = elasticsearch_loader\n \n def test_throws_configuration_error_without_index_name(self):\n # self.loader.run(source_definition=self.source_definition)\ndiff --git a/tests/ocd_backend/models/__init__.py b/tests/ocd_backend/models/__init__.py\n--- a/tests/ocd_backend/models/__init__.py\n+++ b/tests/ocd_backend/models/__init__.py\n@@ -1,5 +0,0 @@\n-# Import test modules here so the noserunner can pick them up, and the\n-# ModelsTestCase is parsed. Add additional testcases when required\n-from .model import ModelTestCase\n-from .database import DatabaseTestCase\n-from .serializers import SerializersTestCase\ndiff --git a/tests/ocd_backend/models/database.py b/tests/ocd_backend/models/database.py\ndeleted file mode 100644\n--- a/tests/ocd_backend/models/database.py\n+++ /dev/null\n@@ -1,118 +0,0 @@\n-from unittest import TestCase\n-from ocd_backend.models.database import Neo4jDatabase\n-from ocd_backend.models.serializers import Neo4jSerializer\n-from ocd_backend.models.model import Model\n-from ocd_backend.models import Organization\n-from ocd_backend.models.misc import Uri\n-from ocd_backend.models.definitions import Mapping\n-\n-\n-class DatabaseTestCase(TestCase):\n- def setUp(self):\n- self.db = Neo4jDatabase(Neo4jSerializer())\n- self.db.query('MATCH (n) DETACH DELETE n')\n-\n- # Assure we are using the test.db\n- result = self.db.query('CALL dbms.listConfig()')\n- config = {x['name']: x['value'] for x in result}\n- assert config['dbms.active_database'] == 'test.db', \\\n- 'Neo4j selected database should be test.db.' \\\n- 'Make sure to use docker-compose.test.yaml'\n-\n- self.db.create_constraints()\n-\n- def tearDown(self):\n- self.db.query('MATCH (n) DETACH DELETE n')\n-\n- # def test_replace(self):\n- # object_model = Organization('CBS', 'GM0361', 'Alkmaar')\n- # object_model.name = 'Alkmaar'\n- # object_model.classification = u'Municipality'\n- # object_model.description = 'De gemeente Alkmaar'\n- #\n- # a = Organization('CBS', 'CBSa', 'Alkmaar')\n- # a.name = 'a'\n- #\n- # b = Organization('CBS', 'CBSb', 'Alkmaar')\n- # b.name = 'b'\n- #\n- # object_model.parent = [a, b]\n- # # object_model.parent = Relationship(a, b, rel=c)\n- #\n- # object_model.save()\n- #\n- # result = self.db.query('MATCH (n) WITH COUNT(n) AS nodes '\n- # 'MATCH (m)-->() RETURN nodes, COUNT(m) AS rels')\n- # self.assertEqual(result[0]['nodes'], 5)\n- # self.assertEqual(result[0]['rels'], 6)\n-\n- def test_replace_nodes(self):\n- source_defaults = {\n- 'source': 'cbs',\n- 'source_id_key': 'identifier',\n- 'organization': 'alkmaar',\n- }\n-\n- object_model = Organization('GM0361', **source_defaults)\n- object_model.name = 'Alkmaar'\n- object_model.classification = u'Municipality'\n- object_model.description = 'De gemeente Alkmaar'\n-\n- a = Organization('CBSa', **source_defaults)\n- a.name = 'a'\n-\n- b = Organization('CBSb', **source_defaults)\n- b.name = 'b'\n-\n- object_model.subOrganizationOf = [a, b]\n- object_model.save()\n-\n- first_identifier = object_model.get_ori_identifier()\n-\n- result = self.db.query('MATCH (n) WITH COUNT(n) AS nodes '\n- 'MATCH (m)-->() RETURN nodes, COUNT(m) AS rels')\n- self.assertEqual(3, result[0]['nodes'])\n- self.assertEqual(2, result[0]['rels'])\n-\n- # Make a new object that matches everything but description\n- object_model = Organization('GM0361', **source_defaults)\n- object_model.name = 'Alkmaar'\n- object_model.classification = u'Municipality'\n- object_model.description = 'De gemeente Alkmaar bestaat al lang'\n-\n- a = Organization('CBSa', **source_defaults)\n- a.name = 'a'\n-\n- b = Organization('CBSb', **source_defaults)\n- b.name = 'b'\n-\n- object_model.subOrganizationOf = [a, b]\n- object_model.save()\n-\n- result = self.db.query('MATCH (n) WITH COUNT(n) AS nodes '\n- 'MATCH (m)-->() RETURN nodes, COUNT(m) AS rels')\n- self.assertEqual(3, result[0]['nodes'])\n- self.assertEqual(2, result[0]['rels'])\n-\n- second_identifier = object_model.get_ori_identifier()\n-\n- object_model = Organization('GM0361', **source_defaults)\n- object_model.name = 'Alkmaar'\n- object_model.classification = u'Municipality'\n- object_model.description = 'MAAR NU CAPS'\n-\n- a = Organization('CBSa', **source_defaults)\n- a.name = 'a'\n-\n- b = Organization('CBSb', **source_defaults)\n- b.name = 'b'\n-\n- object_model.subOrganizationOf = [a, b]\n- object_model.save()\n-\n- result = self.db.query('MATCH (n) WITH COUNT(n) AS nodes '\n- 'MATCH (m)-->() RETURN nodes, COUNT(m) AS rels')\n- self.assertEqual(3, result[0]['nodes'])\n- self.assertEqual(2, result[0]['rels'])\n-\n- self.assertEqual(first_identifier, second_identifier)\ndiff --git a/tests/ocd_backend/models/model.py b/tests/ocd_backend/models/model.py\ndeleted file mode 100644\n--- a/tests/ocd_backend/models/model.py\n+++ /dev/null\n@@ -1,131 +0,0 @@\n-import copy\n-import datetime\n-from unittest import TestCase\n-\n-from nose.tools import eq_, assert_raises\n-\n-from ocd_backend.models import Meeting, Organization, AgendaItem, Person\n-from ocd_backend.models.model import Model\n-from ocd_backend.models.properties import Property, Namespace\n-from ocd_backend.models.exceptions import RequiredProperty\n-from ocd_backend.models.serializers import Neo4jSerializer, JsonLDSerializer\n-from ocd_backend.models.database import Neo4jDatabase\n-\n-\n-def get_event():\n- source_defaults = {\n- 'source': 'notubiz',\n- 'source_id_key': 'identifier',\n- 'organization': 'alkmaar',\n- }\n-\n- item = Meeting('104ce628-b453-4fc1-9ab5-61383b6c9ab4', **source_defaults)\n- item.name = 'Test iBabs event'\n- item.chair = 'Chairman'\n- item.location = 'Somewhere'\n- item.start_date = datetime.datetime(2019, 3, 1, 12, 15)\n-\n-\n- organization = Organization('MeetingtypeId', **source_defaults)\n- item.organization = organization\n-\n- agenda_item1 = AgendaItem('204ce628-b453-4fc1-9ab5-61383b6c9ab4', **source_defaults)\n- agenda_item1.name = 'Test 1'\n- agenda_item1.start_date = datetime.datetime(2019, 3, 1, 12, 15)\n-\n- agenda_item2 = AgendaItem('304ce628-b453-4fc1-9ab5-61383b6c9ab4', **source_defaults)\n- agenda_item2.name = 'Test 2'\n- agenda_item2.start_date = datetime.datetime(2019, 3, 1, 12, 15)\n-\n- item.agenda = [agenda_item1, agenda_item2]\n-\n- item.invitee = [\n- Person('404ce628-b453-4fc1-9ab5-61383b6c9ab4', **source_defaults),\n- Person('504ce628-b453-4fc1-9ab5-61383b6c9ab4', **source_defaults),\n- ]\n-\n- return item\n-\n-\n-class ModelTestCase(TestCase):\n- def setUp(self):\n- self.db = Neo4jDatabase(Neo4jSerializer())\n- self.cleanup_neo4j()\n-\n- def tearDown(self):\n- self.cleanup_neo4j()\n-\n- def cleanup_neo4j(self):\n- self.db.query('MATCH (n) DETACH DELETE n')\n-\n- def results_neo4j(self):\n- result = self.db.query('MATCH (n) WITH COUNT(n) AS nodes '\n- 'OPTIONAL MATCH (m)-->() RETURN nodes, COUNT(m) AS rels')\n- return result[0]['nodes'], result[0]['rels'],\n-\n- def test_properties(self):\n- mapping = {\n- 'http://schema.org/name': 'Test iBabs event',\n- 'http://www.w3.org/ns/prov#hadPrimarySource': 'https://argu.co/voc/mapping/alkmaar/notubiz/identifier/104ce628-b453-4fc1-9ab5-61383b6c9ab4',\n- 'https://argu.co/voc/mapping/ori/identifier': None,\n- 'https://argu.co/ns/meeting/chair': 'Chairman',\n- 'http://schema.org/invitee': Person,\n- 'http://schema.org/location': 'Somewhere',\n- 'https://argu.co/ns/meeting/agenda': AgendaItem,\n- 'http://schema.org/organizer': Organization,\n- 'http://schema.org/startDate': datetime.datetime(2019, 3, 1, 12, 15),\n- }\n- expected = copy.copy(mapping)\n-\n- item = get_event()\n- item.save()\n-\n- for key, value in item.properties():\n- mapping.pop(key, None)\n- if isinstance(value, Model):\n- # If value is a instance, return its class for easy compare\n- value = type(value)\n-\n- # Values should be the same as in the mapping\n- if expected[key]:\n- eq_(expected[key], value)\n-\n- # All pairs should be popped from the mapping\n- eq_(0, len(mapping))\n-\n- def test_required_props(self):\n- source_defaults = {\n- 'source': 'notubiz',\n- 'source_id_key': 'identifier',\n- 'organization': 'alkmaar',\n- }\n-\n- class TestNamespace(Namespace):\n- uri = 'http://example.com'\n- prefix = 'ex'\n-\n- class RequiredModel(TestNamespace, Model):\n- oriIdentifier = Property(TestNamespace, 'oriIdentifier')\n- ibabsIdentifier = Property(TestNamespace, 'ibabsIdentifier')\n- required_prop = Property(TestNamespace, 'required_prop', required=True)\n-\n- item = RequiredModel('104ce628-b453-4fc1-9ab5-61383b6c9ab4', **source_defaults)\n-\n- with assert_raises(RequiredProperty):\n- item.save()\n-\n- # def test_model_meta(self):\n- # item = get_event()\n- # .save()\n- #\n- # # Is the identifier_key set in Meta\n- # eq_(item.Meta.identifier_key, 'ibabsIdentifier')\n-\n- def test_replace(self):\n- item = get_event()\n- item.db.replace(item)\n-\n- # Todo test if the number of first-level attributes is 5\n- nodes, rels = self.results_neo4j()\n- self.assertEqual(1, nodes)\n- self.assertEqual(0, rels)\ndiff --git a/tests/ocd_backend/models/serializers.py b/tests/ocd_backend/models/serializers.py\ndeleted file mode 100644\n--- a/tests/ocd_backend/models/serializers.py\n+++ /dev/null\n@@ -1,61 +0,0 @@\n-from unittest import TestCase\n-from ocd_backend.models.serializers import Neo4jSerializer, RdfSerializer\n-from ocd_backend.models import Organization\n-from ocd_backend.models.misc import Uri\n-from ocd_backend.models.definitions import Mapping\n-from ocd_backend.models.database import Neo4jDatabase\n-from ocd_backend.models.model import Model\n-from ocd_backend.models.exceptions import MissingProperty\n-\n-\n-class SerializersTestCase(TestCase):\n- def setUp(self):\n- self.db = Neo4jDatabase(Neo4jSerializer())\n- self.cleanup_neo4j()\n-\n- def tearDown(self):\n- self.cleanup_neo4j()\n-\n- def cleanup_neo4j(self):\n- self.db.query('MATCH (n) DETACH DELETE n')\n-\n- def test_unsaved_model(self):\n- source_defaults = {\n- 'source': 'cbs',\n- 'source_id_key': 'identifier',\n- 'organization': 'alkmaar',\n- }\n-\n- model = Organization('SomeID0123', **source_defaults)\n-\n- with self.assertRaises(MissingProperty):\n- serializer = RdfSerializer()\n- serializer.serialize(model)\n-\n- def test_rdf_serializer(self):\n- source_defaults = {\n- 'source': 'cbs',\n- 'source_id_key': 'identifier',\n- 'organization': 'alkmaar',\n- }\n-\n- model = Organization('GM0361', **source_defaults)\n- model.save()\n-\n- serializer = RdfSerializer()\n- serializer.serialize(model)\n-\n- # def test_deflate(self):\n- # item = get_event()\n- # item.save()\n- #\n- # serializer = JsonLDSerializer()\n- # deflated = serializer.deflate(item, props=True, rels=True)\n- #\n- # # Delete ori_identifier since this will be different every time\n- # del deflated['ori_identifier']\n- #\n- # expected = {'name': u'Test iBabs event', 'had_primary_source': u'https://argu.co/voc/mapping/alkmaar/notubiz/identifier/104ce628-b453-4fc1-9ab5-61383b6c9ab4', 'invitee': ['https://id.openraadsinformatie.nl/364', 'https://id.openraadsinformatie.nl/365'], 'location': u'Somewhere', 'agenda': ['https://id.openraadsinformatie.nl/366', 'https://id.openraadsinformatie.nl/367'], 'organization': 'https://id.openraadsinformatie.nl/368', 'chair': u'Chairman'}\n- #\n- # # Deflate output should match expected dict\n- # eq_(deflated, expected)\ndiff --git a/tests/ocd_backend/models/test_postgres_database.py b/tests/ocd_backend/models/test_postgres_database.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/ocd_backend/models/test_postgres_database.py\n@@ -0,0 +1,4 @@\n+import datetime\n+from unittest import TestCase\n+\n+from ocd_backend.models.postgres_database import PostgresDatabase\ndiff --git a/tests/ocd_backend/models/test_serializers.py b/tests/ocd_backend/models/test_serializers.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/ocd_backend/models/test_serializers.py\n@@ -0,0 +1,50 @@\n+# TODO: Needs to be rewritten for Postgres\n+\n+# from unittest import TestCase\n+# from ocd_backend.models.serializers import RdfSerializer\n+# from ocd_backend.models import Organization\n+# from ocd_backend.models.exceptions import MissingProperty\n+#\n+#\n+# class SerializersTestCase(TestCase):\n+#\n+# def test_unsaved_model(self):\n+# source_defaults = {\n+# 'source': 'cbs',\n+# 'source_id_key': 'identifier',\n+# 'organization': 'alkmaar',\n+# }\n+#\n+# model = Organization('SomeID0123', **source_defaults)\n+#\n+# with self.assertRaises(MissingProperty):\n+# serializer = RdfSerializer()\n+# serializer.serialize(model)\n+#\n+# def test_rdf_serializer(self):\n+# source_defaults = {\n+# 'source': 'cbs',\n+# 'source_id_key': 'identifier',\n+# 'organization': 'alkmaar',\n+# }\n+#\n+# model = Organization('GM0361', **source_defaults)\n+# model.save()\n+#\n+# serializer = RdfSerializer()\n+# serializer.serialize(model)\n+#\n+# # def test_deflate(self):\n+# # item = get_event()\n+# # item.save()\n+# #\n+# # serializer = JsonLDSerializer()\n+# # deflated = serializer.deflate(item, props=True, rels=True)\n+# #\n+# # # Delete ori_identifier since this will be different every time\n+# # del deflated['ori_identifier']\n+# #\n+# # expected = {'name': u'Test iBabs event', 'had_primary_source': u'https://argu.co/voc/mapping/alkmaar/notubiz/identifier/104ce628-b453-4fc1-9ab5-61383b6c9ab4', 'invitee': ['https://id.openraadsinformatie.nl/364', 'https://id.openraadsinformatie.nl/365'], 'location': u'Somewhere', 'agenda': ['https://id.openraadsinformatie.nl/366', 'https://id.openraadsinformatie.nl/367'], 'organization': 'https://id.openraadsinformatie.nl/368', 'chair': u'Chairman'}\n+# #\n+# # # Deflate output should match expected dict\n+# # eq_(deflated, expected)\ndiff --git a/tests/ocd_backend/misc.py b/tests/ocd_backend/test_misc.py\nsimilarity index 100%\nrename from tests/ocd_backend/misc.py\nrename to tests/ocd_backend/test_misc.py\ndiff --git a/tests/ocd_backend/transformers/__init__.py b/tests/ocd_backend/transformers/__init__.py\n--- a/tests/ocd_backend/transformers/__init__.py\n+++ b/tests/ocd_backend/transformers/__init__.py\n@@ -1,8 +0,0 @@\n-from unittest import TestCase\n-\n-\n-class TransformerTestCase(TestCase):\n- pass\n-\n-\n-from .base import BaseTransformerTestCase\ndiff --git a/tests/ocd_backend/transformers/base.py b/tests/ocd_backend/transformers/base.py\ndeleted file mode 100644\n--- a/tests/ocd_backend/transformers/base.py\n+++ /dev/null\n@@ -1,44 +0,0 @@\n-import os.path\n-\n-from ocd_backend.exceptions import NoDeserializerAvailable\n-from ocd_backend.transformers import BaseTransformer\n-from . import TransformerTestCase\n-\n-\n-class BaseTransformerTestCase(TransformerTestCase):\n- def setUp(self):\n- super(BaseTransformerTestCase, self).setUp()\n- self.PWD = os.path.dirname(__file__)\n- dump_path = os.path.abspath(os.path.join(self.PWD, '../test_dumps/ocd_openbeelden_test.gz'))\n- self.source_definition = {\n- 'id': 'test_definition',\n- 'extractor': 'ocd_backend.extractors.staticfile.StaticJSONDumpExtractor',\n- 'transformer': 'ocd_backend.transformers.BaseTransformer',\n- 'item': 'ocd_backend.items.LocalDumpItem',\n- 'loader': 'ocd_backend.loaders.ElasticsearchLoader',\n- 'dump_path': dump_path,\n- 'index_name': 'openbeelden'\n- }\n- with open(os.path.abspath(os.path.join(self.PWD, '../test_dumps/item.json')), 'r') as f:\n- self.item = ('application/json', f.read())\n- self.transformer = BaseTransformer()\n- self.deserialized_item = self.transformer.deserialize_item(*self.item)\n-\n- def test_deserializer(self):\n- deserialized_item = self.transformer.deserialize_item(*self.item)\n- self.assertEqual(deserialized_item, self.deserialized_item)\n-\n- def test_no_deserializer_available(self):\n- with self.assertRaises(NoDeserializerAvailable):\n- item = self.transformer.deserialize_item('application/test',\n- self.item[1])\n-\n- # todo needs to be rewritten with new data\n- # def test_run(self):\n- # # This implicitly tests item functionality too. Perhaps we want to mock\n- # # this?\n- # combined_object_id, object_id, combi_doc, doc, doc_type = self.transformer.run(\n- # *self.item, source_definition=self.source_definition)\n- # self.assertIsNotNone(object_id)\n- # self.assertIsNotNone(combi_doc)\n- # self.assertIsNotNone(doc)\ndiff --git a/tests/ocd_frontend/__init__.py b/tests/ocd_frontend/__init__.py\ndeleted file mode 100644\n--- a/tests/ocd_frontend/__init__.py\n+++ /dev/null\n@@ -1,1023 +0,0 @@\n-import json\n-import random\n-from unittest import TestCase as UnittestTestCase\n-\n-import mock\n-from flask import url_for, current_app\n-from flask_testing import TestCase\n-\n-from ocd_frontend.rest import tasks\n-from .mixins import OcdRestTestCaseMixin\n-\n-\n-class RestApiSearchTestCase(OcdRestTestCaseMixin, TestCase):\n- endpoint_url = 'api.search'\n- endpoint_url_args = {}\n- required_indexes = [\n- 'ori_test_combined_index'\n- ]\n-\n- def test_valid_search(self):\n- \"\"\"Tests if a valid search request responds with a JSON and\n- status 200 OK.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de'}))\n- self.assert_ok_json(response)\n-\n- def test_missing_query(self):\n- \"\"\"Tests if a 200 response is returned when the required\n- ``query`` attribute is missing.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'not-a-query': 'de'}))\n- self.assert_ok(response)\n-\n- def test_sort_option_is_accepted(self):\n- \"\"\"Tests if valid use of the ``sort`` option results in a\n- JSON response with a 200 OK.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- sort_field = random.choice(current_app.config['SORTABLE_FIELDS']['items'])\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'sort': sort_field}))\n- self.assert_ok_json(response)\n-\n- def test_sort_order_option_is_accepted(self):\n- \"\"\"Test if valid use of the ``sort`` and ``order`` options\n- result in a JSON response with a 200 OK.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- available_sort_fields = current_app.config['SORTABLE_FIELDS']['items']\n- try:\n- available_sort_fields.remove('start_date')\n- available_sort_fields.remove('end_date')\n- except ValueError as e:\n- pass\n- sort_field = random.choice(available_sort_fields)\n- sort_order = random.choice(['asc', 'desc'])\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'order': sort_order,\n- 'sort': sort_field}))\n- self.assert_ok_json(response)\n-\n- def test_sort_option_with_invalid_field(self):\n- \"\"\"Tests if sorting on an invalid field results in a response\n- with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'sort': 'not-a-sort-field'}))\n- self.assert_bad_request_json(response)\n-\n- def test_sort_option_with_invalid_order(self):\n- \"\"\"Test if supplying an invalid order option results in a\n- response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- sort_field = random.choice(current_app.config['SORTABLE_FIELDS']['items'])\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'order': 'upsidedown',\n- 'sort': sort_field}))\n- self.assert_bad_request_json(response)\n-\n- def test_facets(self):\n- \"\"\"Test if requesting facets results in a 200 OK, and if the\n- facets are actually present in the response.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- available_facets = current_app.config['AVAILABLE_FACETS']\n- facet_keys = random.sample(available_facets['items'].keys(), 1)\n- facets = {fk: available_facets['items'][fk] for fk in facet_keys}\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n-\n- self.assert_ok_json(response)\n- self.assertIn(u'facets', response.json)\n- for fk in facet_keys:\n- self.assertIn(fk, response.json.get(u'facets', {}))\n-\n- def test_invalid_facet_option_value(self):\n- \"\"\"Tests if requesting a facet with invalid value (not dict)\n- results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n-\n- facets = {\n- 'rights': []\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_not_available_facet(self):\n- \"\"\"Tests if requesting a facet that is not available results\n- in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n-\n- facets = {\n- 'rights-that-are-not-a-facet': {}\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_facet_size(self):\n- \"\"\"Tests if valid use of the facet ``size`` attribute results in\n- a 200 OK JSON response.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n-\n- facets = {\n- 'classification': {\n- 'size': 10\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_ok_json(response)\n-\n- def test_invalid_facet_size(self):\n- \"\"\"Tests if supplying an invalid facet ``size`` value results in\n- a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n-\n- facets = {\n- 'source': {\n- 'size': 'abc'\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- # def test_datetime_facet(self):\n- # \"\"\"Tests if valid use of the ``date`` facet results in a 200 OK\n- # JSON response.\"\"\"\n- # url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- #\n- # facets = {\n- # 'date': {\n- # 'interval': 'month'\n- # }\n- # }\n- #\n- # response = self.post(url, content_type='application/json',\n- # data=json.dumps({'query': 'de',\n- # 'facets': facets}))\n- # self.assert_ok_json(response)\n- # self.assertEqual(response.json['facets']['date']['_type'],\n- # 'date_histogram')\n-\n- def test_datetime_facet_interval_not_string(self):\n- \"\"\"Test if supplying an invalid interval type (i.e. integer)\n- results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n-\n- facets = {\n- 'date': {\n- 'date_histogram': {\n- 'interval': 123\n- }\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_datetime_facet_interval_not_allowed(self):\n- \"\"\"Tests if supplying an invalid interval size results in\n- a response with a status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n-\n- facets = {\n- 'date': {\n- 'date_histogram': {\n- 'interval': 'millennium'\n- }\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_facet_should_be_dict(self):\n- \"\"\"Tests if supplying a list as facet request description\n- results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- facets = ['some facet']\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de',\n- 'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_from(self):\n- \"\"\"Test if setting the ``from`` attribute responds with JSON\n- and status 200 OK.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de', 'from': 10}))\n- self.assert_ok_json(response)\n-\n- def test_invalid_value_from(self):\n- \"\"\"Tests if supplying an invalid data type for the ``from``\n- attribute results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de', 'from': 'abc'}))\n- self.assert_bad_request_json(response)\n-\n- def test_negative_value_from(self):\n- \"\"\"Test if supplying a negative value for the ``from`` attribute\n- results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de', 'from': -1}))\n- self.assert_bad_request_json(response)\n-\n- def test_size(self):\n- \"\"\"Test if supplying a valid value for the ``size`` attribute\n- results in a 200 OK JSON response.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de', 'size': 10}))\n- self.assert_ok_json(response)\n-\n- def test_invalid_value_size(self):\n- \"\"\"Test if supplying an invalid type for the ``size`` attribute\n- results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de', 'size': 'abc'}))\n- self.assert_bad_request_json(response)\n-\n- def test_negative_value_size(self):\n- \"\"\"Test if supplying a negative value for the ``size`` attribute\n- results in a response with status code 400.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de', 'size': -1}))\n- self.assert_bad_request_json(response)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_called_if_enabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is called when usage\n- logging is enabled.\"\"\"\n- # Enable usage logging for this test\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n-\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- self.post(url, content_type='application/json', data=json.dumps({'query': 'de'}))\n- self.assertTrue(mocked_es_create.called)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_not_called_if_disabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is not called when\n- usage logging is disabled.\"\"\"\n- # Make sure usage logging is disabled\n- self.app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- self.post(url, content_type='application/json', data=json.dumps({'query': 'de'}))\n- self.assertFalse(mocked_es_create.called)\n-\n-\n-class RestApiSearchSourceTestCase(RestApiSearchTestCase):\n- endpoint_url = 'api.search_source'\n- endpoint_url_args = {'source_id': 'test_collection_index'}\n- required_indexes = [\n- 'ori_test_collection_index'\n- ]\n-\n- def test_nonexistent_source_id(self):\n- \"\"\"Test if supplying a nonexistent ``source_id`` returns a 404\n- JSON response.\"\"\"\n-\n- url = url_for(self.endpoint_url, source_id='i-do-not-exist')\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'query': 'de'}))\n- self.assert_not_found_request_json(response)\n-\n-\n-class RestApiSearchSimilarTestCase(OcdRestTestCaseMixin, TestCase):\n- required_indexes = [\n- 'ori_test_combined_index',\n- 'ori_test_collection_index'\n- ]\n-\n- def test_valid_search(self):\n- \"\"\"Tests if a valid search request responds with a JSON and\n- status 200 OK.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({}))\n-\n- self.assert_ok_json(response)\n-\n- def test_valid_search_source(self):\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.similar', source_id='test_collection_index',\n- object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({}))\n-\n- self.assert_ok_json(response)\n-\n- def test_search_nonexistent_source(self):\n- \"\"\"Test if finding similar objects within a source index that\n- doesn't exist returns a 404 JSON response (with the appropriate\n- error message).\"\"\"\n- source_id = 'i-do-not-exist'\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object', source_id=source_id, object_id=doc_id)\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n- self.assertEqual(response.json['error'],\n- 'Document not found.')\n-\n- def test_sort_option_is_accepted(self):\n- \"\"\"Tests if valid use of the ``sort`` option results in a\n- JSON response with a 200 OK.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- available_sort_fields = current_app.config['SORTABLE_FIELDS']['items']\n- try:\n- available_sort_fields.remove('start_date')\n- available_sort_fields.remove('end_date')\n- except ValueError as e:\n- pass\n- sort_field = random.choice(available_sort_fields)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'sort': sort_field}))\n- self.assert_ok_json(response)\n-\n- def test_sort_order_option_is_accepted(self):\n- \"\"\"Test if valid use of the ``sort`` and ``order`` options\n- result in a JSON response with a 200 OK.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- available_sort_fields = current_app.config['SORTABLE_FIELDS']['items']\n- try:\n- available_sort_fields.remove('start_date')\n- available_sort_fields.remove('end_date')\n- except ValueError as e:\n- pass\n- sort_field = random.choice(available_sort_fields)\n- sort_order = random.choice(['asc', 'desc'])\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'order': sort_order,\n- 'sort': sort_field}))\n- self.assert_ok_json(response)\n-\n- def test_sort_option_with_invalid_field(self):\n- \"\"\"Tests if sorting on an invalid field results in a response\n- with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'sort': 'not-a-sort-field'}))\n- self.assert_bad_request_json(response)\n-\n- def test_sort_option_with_invalid_order(self):\n- \"\"\"Test if supplying an invalid order option results in a\n- response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- available_sort_fields = current_app.config['SORTABLE_FIELDS']['items']\n- try:\n- available_sort_fields.remove('start_date')\n- available_sort_fields.remove('end_date')\n- except ValueError as e:\n- pass\n- sort_field = random.choice(available_sort_fields)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'order': 'upsidedown',\n- 'sort': sort_field}))\n- self.assert_bad_request_json(response)\n-\n- # def test_facets(self):\n- # \"\"\"Test if requesting facets results in a 200 OK, and if the\n- # facets are actually present in the response.\"\"\"\n- # doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- # url = url_for('api.similar', object_id=doc_id)\n- #\n- # available_facets = current_app.config['AVAILABLE_FACETS']\n- # facet_keys = random.sample(available_facets.keys(), 3)\n- # facets = {fk: available_facets[fk] for fk in facet_keys}\n- #\n- # response = self.post(url, content_type='application/json',\n- # data=json.dumps({'facets': facets}))\n- #\n- # self.assert_ok_json(response)\n- # self.assertIn('facets', response.json)\n- # for fk in facet_keys:\n- # self.assertIn(fk, response.json.get('facets', {}))\n-\n- def test_not_available_facet(self):\n- \"\"\"Tests if requesting a facet that is not available results\n- in a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n-\n- facets = {\n- 'rights-that-are-not-a-facet': {\n- 'terms': {\n- 'field': 'meta.rights'\n- }\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_facet_size(self):\n- \"\"\"Tests if valid use of the facet ``size`` attribute results in\n- a 200 OK JSON response.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n-\n- facets = {\n- 'classification': {\n- 'size': 10\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'facets': facets}))\n- self.assert_ok_json(response)\n-\n- def test_invalid_facet_size(self):\n- \"\"\"Tests if supplying an invalid facet ``size`` value results in\n- a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n-\n- facets = {\n- 'rights': {\n- 'size': 'abc'\n- }\n- }\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- # def test_datetime_facet(self):\n- # \"\"\"Tests if valid use of the ``date`` facet results in a 200 OK\n- # JSON response.\"\"\"\n- # doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- # url = url_for('api.similar', object_id=doc_id)\n- #\n- # facets = {\n- # 'date': {\n- # 'date_histogram': {\n- # 'field': 'date',\n- # 'interval': 'month'\n- # }\n- # }\n- # }\n- #\n- # response = self.post(url, content_type='application/json',\n- # data=json.dumps({'facets': facets}))\n- # self.assert_ok_json(response)\n- # self.assertEqual(response.json['facets']['date']['_type'],\n- # 'date_histogram')\n- #\n- # def test_datetime_facet_interval_not_string(self):\n- # \"\"\"Test if supplying an invalid interval type (i.e. integer)\n- # results in a response with status code 400.\"\"\"\n- # doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- # url = url_for('api.similar', object_id=doc_id)\n- #\n- # facets = {\n- # 'date': {\n- # 'date_histogram': {\n- # 'field': 'date',\n- # 'interval': 123\n- # }\n- # }\n- # }\n- #\n- # response = self.post(url, content_type='application/json',\n- # data=json.dumps({'facets': facets}))\n- # self.assert_bad_request_json(response)\n- #\n- # def test_datetime_facet_interval_not_allowed(self):\n- # \"\"\"Tests if supplying an invalid interval size results in\n- # a response with a status code 400.\"\"\"\n- # doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- # url = url_for('api.similar', object_id=doc_id)\n- #\n- # facets = {\n- # 'date': {\n- # 'date_histogram': {\n- # 'field': 'date',\n- # 'interval': 'millennium'\n- # }\n- # }\n- # }\n- #\n- # response = self.post(url, content_type='application/json',\n- # data=json.dumps({'facets': facets}))\n- # self.assert_bad_request_json(response)\n-\n- def test_facet_should_be_dict(self):\n- \"\"\"Tests if supplying a list as facet request description\n- results in a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- facets = ['some facet']\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'facets': facets}))\n- self.assert_bad_request_json(response)\n-\n- def test_from(self):\n- \"\"\"Test if setting the ``from`` attribute responds with JSON\n- and status 200 OK.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'from': 10}))\n- self.assert_ok_json(response)\n-\n- def test_invalid_value_from(self):\n- \"\"\"Tests if supplying an invalid data type for the ``from``\n- attribute results in a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'from': 'abc'}))\n- self.assert_bad_request_json(response)\n-\n- def test_negative_value_from(self):\n- \"\"\"Test if supplying a negative value for the ``from`` attribute\n- results in a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'from': -1}))\n- self.assert_bad_request_json(response)\n-\n- def test_size(self):\n- \"\"\"Test if supplying a valid value for the ``size`` attribute\n- results in a 200 OK JSON response.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'size': 10}))\n- self.assert_ok_json(response)\n-\n- def test_invalid_value_size(self):\n- \"\"\"Test if supplying an invalid type for the ``size`` attribute\n- results in a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'size': 'abc'}))\n- self.assert_bad_request_json(response)\n-\n- def test_negative_value_size(self):\n- \"\"\"Test if supplying a negative value for the ``size`` attribute\n- results in a response with status code 400.\"\"\"\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'size': -1}))\n- self.assert_bad_request_json(response)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_called_if_enabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is called when usage\n- logging is enabled.\"\"\"\n- # Enable usage logging for this test\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n-\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- self.post(url, content_type='application/json', data=json.dumps({}))\n- self.assertTrue(mocked_es_create.called)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_not_called_if_disabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is not called when\n- usage logging is disabled.\"\"\"\n- # Make sure usage logging is disabled\n- self.app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- doc_id = self.doc_ids['ori_test_combined_index']['item'][0]\n- url = url_for('api.similar', object_id=doc_id)\n- self.post(url, content_type='application/json', data=json.dumps({}))\n- self.assertFalse(mocked_es_create.called)\n-\n-\n-class RestApiSourcesTestCase(OcdRestTestCaseMixin, TestCase):\n- required_indexes = [\n- 'ori_test_combined_index'\n- ]\n-\n- # todo needs to be revised\n- # def test_response_format(self):\n- # url = url_for('api.list_sources')\n- # response = self.get(url)\n- #\n- # self.assert_ok_json(response)\n- #\n- # self.assertIn('sources', response.json)\n- #\n- # # There might be no sources in an empty test database\n- # source_attrs = response.json['sources'][0].keys()\n- # self.assertIn('id', source_attrs)\n- # self.assertIn('organizations', source_attrs)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_logging_called_if_enabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is called when usage\n- logging is enabled.\"\"\"\n- # Enable usage logging for this test\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n-\n- url = url_for('api.list_sources')\n- self.get(url)\n- self.assertTrue(mocked_es_create.called)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_logging_not_called_if_disabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is not called when\n- usage logging is disabled.\"\"\"\n- # Make sure usage logging is disabled\n- self.app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- url = url_for('api.list_sources')\n- self.get(url)\n- self.assertFalse(mocked_es_create.called)\n-\n-\n-class RestApiGetObjectTestCase(OcdRestTestCaseMixin, TestCase):\n- required_indexes = [\n- 'ori_test_collection_index'\n- ]\n-\n- def test_get_existing_object(self):\n- \"\"\"Test getting an index document.\"\"\"\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object', source_id='test_collection_index',\n- object_id=doc_id)\n- response = self.get(url)\n-\n- self.assert_ok_json(response)\n-\n- def test_get_nonexistent_object(self):\n- \"\"\"Test if getting an object that doesn't exist returns a 404\n- JSON response (with the appropriate error message).\"\"\"\n- url = url_for('api.get_object', source_id='test_collection_index',\n- object_id='i-do-not-exist')\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n- self.assertEqual(response.json['error'], 'Document not found.')\n-\n- def test_get_nonexistent_source(self):\n- \"\"\"Test if getting an object from a source index that doesn't\n- exist returns a 404 JSON response (with the appropriate error\n- message).\"\"\"\n- source_id = 'i-do-not-exist'\n- url = url_for('api.get_object', source_id=source_id,\n- object_id='i-do-not-exist')\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n- self.assertEqual(response.json['error'],\n- 'Document not found.')\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_logging_called_if_enabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is called when usage\n- logging is enabled.\"\"\"\n- # Enable usage logging for this test\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n-\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object', source_id='test_collection_index',\n- object_id=doc_id)\n- self.get(url)\n- self.assertTrue(mocked_es_create.called)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_not_called_if_disabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is not called when\n- usage logging is disabled.\"\"\"\n- # Make sure usage logging is disabled\n- self.app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url_for('api.get_object', source_id='test_collection_index',\n- object_id=doc_id)\n- self.assertFalse(mocked_es_create.called)\n-\n-\n-class RestApiGetObjectSourceTestCase(OcdRestTestCaseMixin, TestCase):\n- required_indexes = [\n- 'ori_test_collection_index'\n- ]\n-\n- def test_get_existing_object(self):\n- \"\"\"Test getting an index document.\"\"\"\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object_source',\n- source_id='test_collection_index',\n- doc_type='items', object_id=doc_id)\n- response = self.get(url)\n-\n- self.assert_ok_json(response)\n-\n- def test_get_nonexistent_object(self):\n- \"\"\"Test if getting an object that doesn't exist returns a 404\n- JSON response (with the appropriate error message).\"\"\"\n- url = url_for('api.get_object_source',\n- source_id='test_collection_index',\n- object_id='i-do-not-exist')\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n- self.assertEqual(response.json['error'], 'Document not found.')\n-\n- def test_get_nonexistent_source(self):\n- \"\"\"Test if getting an object from a source index that doesn't\n- exist returns a 404 JSON response (with the appropriate error\n- message).\"\"\"\n- source_id = 'i-do-not-exist'\n- url = url_for('api.get_object_source', source_id=source_id,\n- object_id='i-do-not-exist')\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n- self.assertEqual(response.json['error'],\n- 'Document not found.')\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_logging_called_if_enabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is called when usage\n- logging is enabled.\"\"\"\n- # Enable usage logging for this test\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n-\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object_source',\n- source_id='test_collection_index', object_id=doc_id)\n- self.get(url)\n- self.assertTrue(mocked_es_create.called)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_not_called_if_disabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is not called when\n- usage logging is disabled.\"\"\"\n- # Make sure usage logging is disabled\n- self.app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object_source',\n- source_id='test_collection_index', object_id=doc_id)\n- self.get(url)\n- self.assertFalse(mocked_es_create.called)\n-\n-\n-class RestApiGetObjectStatsTestCaste(OcdRestTestCaseMixin, TestCase):\n- required_indexes = [\n- 'ori_test_usage_logging_index',\n- 'ori_test_collection_index'\n- ]\n-\n- def test_get_existing_object(self):\n- \"\"\"Test getting the stats of an indexed document.\"\"\"\n- doc_id = self.doc_ids['ori_test_collection_index']['items'][0]\n- url = url_for('api.get_object_stats',\n- source_id='test_collection_index', object_id=doc_id)\n- response = self.get(url)\n-\n- self.assert_ok_json(response)\n-\n- def test_get_nonexistent_object(self):\n- \"\"\"Test if getting an object that doesn't exist returns a 404\n- JSON response.\"\"\"\n- url = url_for('api.get_object_source',\n- source_id='test_collection_index',\n- object_id='i-do-not-exist')\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n-\n- def test_get_nonexistent_source(self):\n- \"\"\"Test if getting an object from a source index that doesn't\n- exist returns a 404 JSON response.\"\"\"\n- url = url_for('api.get_object_stats', source_id='i-do-not-exist',\n- object_id='i-do-not-exist')\n- response = self.get(url)\n-\n- self.assert_not_found_request_json(response)\n-\n-\n-class RestApiResolveTestCase(OcdRestTestCaseMixin, TestCase):\n- required_indexes = [\n- 'ori_test_resolver_index'\n- ]\n-\n- def test_successful_resolve(self):\n- \"\"\"Test if a valid URL resolves and returns a redirect with the\n- correct status, location and content type.\"\"\"\n- doc_id = self.doc_ids['ori_test_resolver_index']['url'][0]\n- url = url_for('api.resolve', url_id=doc_id)\n-\n- response = self.get(url, follow_redirects=False)\n-\n- self.assert_status_code(response, 302)\n- self.assert_content_type(response, 'text/html; charset=utf-8')\n- self.assertIn('location', response.headers)\n- self.assertTrue(response.headers['location'].startswith('http://'))\n-\n- def test_resolve_not_whitelisted_content_type(self):\n- \"\"\"Test that a resolve document with an incorrent content_type resolves\n- to the original url\"\"\"\n- doc_id = self.doc_ids['ori_test_resolver_index']['url'][1]\n- url = url_for('api.resolve', url_id=doc_id)\n-\n- response = self.get(url, follow_redirects=False)\n-\n- self.assert_status_code(response, 302)\n- self.assert_content_type(response, 'text/html; charset=utf-8')\n- self.assertIn('location', response.headers)\n- self.assertTrue(response.headers['location'].startswith('http://'))\n-\n- # def test_successful_thumbnail_resolve(self):\n- # \"\"\"Test if a valid URL resolves and returns a redirect to a thumbnailed\n- # image.\n- # \"\"\"\n- # doc_id = self.doc_ids['ori_test_resolver_index']['url'][0]\n- # url = url_for('api.resolve', url_id=doc_id, size='large')\n- #\n- # response = self.get(url, follow_redirects=False)\n- #\n- # self.assert_status_code(response, 302)\n- # self.assert_content_type(response, 'text/html; charset=utf-8')\n- # self.assertIn('location', response.headers)\n- # self.assertIn('large', response.headers['location'])\n- # self.assertIn(self.app.config.get('THUMBNAIL_URL'), response.headers['location'])\n- #\n- # def test_invalid_thumbnail_size_json(self):\n- # \"\"\"Test if a request with an invalid thumbnail size returns a 400 with\n- # proper content type\"\"\"\n- # doc_id = self.doc_ids['ori_test_resolver_index']['url'][0]\n- # url = url_for('api.resolve', url_id=doc_id, size='humongous')\n- #\n- # response = self.get(url, follow_redirects=False)\n- #\n- # self.assert_bad_request(response)\n- # self.assert_content_type(response, 'application/json')\n- # self.assertEqual(response.json.get('status'), 'error')\n- # self.assertIn('appropriate thumbnail size', response.json.get('error'))\n- #\n- # def test_invalid_thumbnail_size_html(self):\n- # \"\"\"Test if a request with an invalid thumbnail size returns a 400 with\n- # proper content type\"\"\"\n- # doc_id = self.doc_ids['ori_test_resolver_index']['url'][0]\n- # url = url_for('api.resolve', url_id=doc_id, size='humongous')\n- #\n- # response = self.get(url, follow_redirects=False,\n- # content_type='text/html')\n- #\n- # self.assert_bad_request(response)\n- # self.assert_content_type(response, 'text/html; charset=utf-8')\n- # self.assertIn('You did not provide an appropriate '\n- # 'thumbnail size', response.data)\n-\n- def test_invalid_resolve_json(self):\n- \"\"\"Tests if a request to resolve an invalid URL results in a\n- 404 response with the proper content type.\"\"\"\n- url = url_for('api.resolve', url_id='i-do-not-exist')\n- response = self.get(url, follow_redirects=False,\n- content_type='application/json')\n- self.assert_not_found_request_json(response)\n-\n- def test_invalid_resolve_html(self):\n- \"\"\"Tests if a request to resolve an invalid URL results in a\n- 404 response with the proper content type.\"\"\"\n- url = url_for('api.resolve', url_id='i-do-not-exist')\n- response = self.get(url, follow_redirects=False,\n- content_type='text/html')\n-\n- self.assert_not_found(response)\n- self.assert_content_type(response, 'text/html; charset=utf-8')\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_logging_called_if_enabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is called when usage\n- logging is enabled.\"\"\"\n- # Enable usage logging for this test\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n-\n- doc_id = self.doc_ids['ori_test_resolver_index']['url'][0]\n- url = url_for('api.resolve', url_id=doc_id)\n- self.get(url, follow_redirects=False)\n-\n- self.assertTrue(mocked_es_create.called)\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_search_logging_not_called_if_disabled(self, mocked_es_create):\n- \"\"\"Test if the event log storage function is not called when\n- usage logging is disabled.\"\"\"\n- # Make sure usage logging is disabled\n- self.app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- doc_id = self.doc_ids['ori_test_resolver_index']['url'][0]\n- url = url_for('api.resolve', url_id=doc_id)\n- self.get(url, follow_redirects=False)\n- self.assertFalse(mocked_es_create.called)\n-\n-\n-class LogEventTaskTestCase(OcdRestTestCaseMixin, TestCase):\n- default_args = {\n- 'user_agent': 'abc',\n- 'referer': 'def',\n- 'user_ip': '127.0.0.1',\n- 'created_at': '2015-01-01',\n- 'event_type': 'get_object'\n- }\n-\n- @mock.patch('tests.ocd_frontend.current_app.es.create')\n- def test_unknown_event_raises_exception(self, _):\n- task_args = self.default_args\n- task_args['event_type'] = 'unknown-test-event'\n-\n- self.app.config['USAGE_LOGGING_ENABLED'] = True\n- self.assertRaises(ValueError, tasks.log_event, **task_args)\n-\n-\n-class RestApiScrollTestCase(OcdRestTestCaseMixin, TestCase):\n- endpoint_url = 'api.search'\n- endpoint_url_args = {}\n- required_indexes = [\n- 'ori_test_scroll_index'\n- ]\n-\n- def test_scroll_valid_search(self):\n- \"\"\"Tests if a valid search request responds with a JSON and\n- status 200 OK.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- current_app.config['COMBINED_INDEX'] = 'ori_test_scroll_index'\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'scroll': '1m', 'size': 1}))\n- self.assert_ok_json(response)\n- self.assertTrue('scroll' in response.json['meta'])\n- self.assertEqual(int(response.json['meta']['total']), 3)\n-\n- def test_scroll_valid_search_no_scroll(self):\n- \"\"\"Tests if a valid search request responds with a JSON and\n- status 200 OK.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- current_app.config['COMBINED_INDEX'] = 'ori_test_scroll_index'\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'size': 1}))\n- self.assert_ok_json(response)\n- self.assertTrue('scroll' not in response.json['meta'])\n- self.assertEqual(int(response.json['meta']['total']), 3)\n-\n- def test_scroll_valid_search_full(self):\n- \"\"\"Tests if a valid search request responds with a JSON and\n- status 200 OK and check all pages.\"\"\"\n- url = url_for(self.endpoint_url, **self.endpoint_url_args)\n- current_app.config['COMBINED_INDEX'] = 'ori_test_scroll_index'\n-\n- response = self.post(url, content_type='application/json',\n- data=json.dumps({'scroll': '1m', 'size': 1}))\n- self.assert_ok_json(response)\n- self.assertTrue('scroll' in response.json['meta'])\n- self.assertEqual(int(response.json['meta']['total']), 3)\n- self.assertEqual(len(response.json['item']), 1)\n- scroll_id = response.json['meta']['scroll']\n- while (\n- ('item' in response.json) and\n- (len(response.json['item']) > 0)\n- ):\n- response = self.post(\n- url, content_type='application/json',\n- data=json.dumps({'scroll': '1m', 'scroll_id': scroll_id}))\n- self.assert_ok_json(response)\n- self.assertTrue('scroll' in response.json['meta'])\n- self.assertEqual(int(response.json['meta']['total']), 3)\n- if 'item' in response.json:\n- self.assertEqual(len(response.json['item']), 1)\ndiff --git a/tests/ocd_frontend/mixins.py b/tests/ocd_frontend/mixins.py\ndeleted file mode 100644\n--- a/tests/ocd_frontend/mixins.py\n+++ /dev/null\n@@ -1,222 +0,0 @@\n-import json\n-import os\n-import shutil\n-from glob import glob\n-\n-from werkzeug.utils import parse_cookie\n-\n-import ocd_frontend\n-\n-\n-class OcdRestTestCaseMixin(object):\n- required_indexes = []\n-\n- def create_app(self):\n- \"\"\"Create instance of Flask application for testing.\"\"\"\n-\n- app = ocd_frontend.rest.create_app()\n- app.config['TESTING'] = True\n- app.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = False\n- app.config['COMBINED_INDEX'] = 'ori_test_combined_index'\n- app.config['RESOLVER_URL_INDEX'] = 'ori_test_resolver_index'\n- app.config['USAGE_LOGGING_INDEX'] = 'ori_test_usage_logging_index'\n- app.config['USAGE_LOGGING_ENABLED'] = False\n-\n- self.es_client = app.es.get_esclient()\n- self.PWD = os.path.dirname(__file__)\n- self.thumbnail_cache = os.path.join(os.path.abspath(self.PWD),\n- 'test-thumbnail-cache')\n-\n- app.config['THUMBNAIL_DIR'] = self.thumbnail_cache\n-\n- return app\n-\n- def setUp(self):\n- # If ES indexes are required, Elasticsearch should be running\n- if self.required_indexes:\n- self.assertTrue(self.es_client.ping(),\n- msg='Elasticsearch cluster is not running')\n-\n- # Add the specified Elasticsearch indexes\n- self.es_add_indices(self.required_indexes)\n-\n- # Add test documents to the specified indexes\n- self.es_index_docs(self.required_indexes)\n-\n- self.create_thumbnail_cache()\n-\n- def create_thumbnail_cache(self):\n- if not os.path.exists(self.thumbnail_cache):\n- os.makedirs(self.thumbnail_cache)\n- self.addCleanup(self.remove_thumbnail_cache)\n-\n- def es_add_indices(self, indices):\n- \"\"\"Create the ES indexes givin in ``indices``. A cleanup\n- also registered for each index.\"\"\"\n-\n- for index in indices:\n- if not self.es_client.indices.exists(index):\n- self.es_client.indices.create(index)\n-\n- # Try to remove, just in case\n- self.addCleanup(self.es_remove_index, index)\n-\n- def es_index_docs(self, indices):\n- \"\"\"Index test documents for each index specified in ``indices``.\"\"\"\n-\n- # Put the IDs of the indexed docs in a dict that is grouped by\n- # index so the tests can you it to fecth sample docs by ID\n- self.doc_ids = {}\n-\n- for index in indices:\n- self.doc_ids[index] = {}\n-\n- files_path = os.path.join(self.PWD, 'test_data', index, '*.json')\n- test_file_paths = glob(files_path)\n-\n- for doc_file_path in test_file_paths:\n- # The doc_type is determined by the first part of the filename\n- doc_type = os.path.split(doc_file_path)[-1].split('_')[0]\n-\n- with open(doc_file_path, 'rb') as doc_file:\n- doc = json.load(doc_file)\n-\n- # Explicitly refresh index, as the upcoming count\n- # request can be faster to execute than the refresh\n- # rate of the ES instance\n- i_doc = self.es_client.index(index=index, body=doc,\n- doc_type=doc_type,\n- refresh=True)\n-\n- if doc_type not in self.doc_ids[index]:\n- self.doc_ids[index][doc_type] = []\n- self.doc_ids[index][doc_type].append(i_doc['_id'])\n-\n- self.assertEqual(\n- self.es_client.count(index=index).get('count'),\n- len(test_file_paths),\n- msg='Incorrect doc count in ES index %s' % index\n- )\n-\n- def es_remove_index(self, index_name):\n- self.es_client.indices.delete(index_name)\n-\n- def remove_thumbnail_cache(self):\n- \"\"\"Remove thumbnail cache directory\"\"\"\n- shutil.rmtree(self.thumbnail_cache)\n-\n- @staticmethod\n- def _request(method, *args, **kwargs):\n- \"\"\"Execute HTTP method with provided args and kwargs.\n-\n- The ``content_type`` is set to \"application/json\" by default,\n- as the API expects JSON in most requests. Also, the test cases\n- should follow redirects by default.\n-\n- :param method: HTTP method to use (i.e. 'get', 'post', 'put', etc.)\n- :return:\n- \"\"\"\n- kwargs.setdefault('content_type', 'application/json')\n- kwargs.setdefault('follow_redirects', True)\n- return method(*args, **kwargs)\n-\n- def get(self, *args, **kwargs):\n- return self._request(self.client.get, *args, **kwargs)\n-\n- def post(self, *args, **kwargs):\n- return self._request(self.client.post, *args, **kwargs)\n-\n- def put(self, *args, **kwargs):\n- return self._request(self.client.put, *args, **kwargs)\n-\n- def delete(self, *args, **kwargs):\n- return self._request(self.client.delete, *args, **kwargs)\n-\n- @staticmethod\n- def get_cookies(response):\n- \"\"\"Parse cookies from Flask Response.\n-\n- :param response:\n- :return: dict with cookie values\n- \"\"\"\n- cookies = {}\n- for value in response.headers.get_all('Set-Cookie'):\n- cookies.update(parse_cookie(value))\n- return cookies\n-\n- def assert_status_code(self, response, status_code):\n- \"\"\"Assert status code of a Flask test client response\n-\n- :param response: test client response object\n- :param status_code: expected status code\n- :return: Flask.Response\n- \"\"\"\n- self.assertEquals(status_code, response.status_code)\n- return response\n-\n- def assert_content_type(self, response, content_type):\n- \"\"\"Assert the content-type of a Flask test client response\n-\n- :param response: The test client response object\n- :param content_type: The expected content type\n- :return: Flask.Response\n- \"\"\"\n- self.assertEquals(content_type, response.headers.get('Content-Type'))\n- return response\n-\n- def assert_ok(self, response):\n- return self.assert_status_code(response, 200)\n-\n- def assert_bad_request(self, response):\n- return self.assert_status_code(response, 400)\n-\n- def assert_unauthorized(self, response):\n- return self.assert_status_code(response, 401)\n-\n- def assert_forbidden(self, response):\n- return self.assert_status_code(response, 403)\n-\n- def assert_not_found(self, response):\n- return self.assert_status_code(response, 404)\n-\n- def assert_json(self, response):\n- \"\"\"JSON response\n-\n- :param response: Flask.Response\n- :return: Flask.Response\n- \"\"\"\n- return self.assert_content_type(response, 'application/json')\n-\n- def assert_ok_html(self, response):\n- \"\"\"200 OK HTML response\n-\n- :param response: Flask.Response\n- :return: Flask.Response\n- \"\"\"\n- return self.assert_ok(\n- self.assert_content_type(response, 'text/html; charset=utf-8')\n- )\n-\n- def assert_ok_json(self, response):\n- \"\"\"200 OK JSON response\n-\n- :param response: Flask.Response\n- :return: Flask.Response\n- \"\"\"\n- return self.assert_ok(self.assert_json(response))\n-\n- def assert_bad_request_json(self, response):\n- \"\"\"Assert 400 Bad Request JSON response\n-\n- :param response: Flask.Response\n- :return: Flask.Response\n- \"\"\"\n- return self.assert_bad_request(self.assert_json(response))\n-\n- def assert_not_found_request_json(self, response):\n- \"\"\"Assert 404 Not Founrd JSON response\n-\n- :param response: Flask.Response\n- :return: Flask.Response\n- \"\"\"\n- return self.assert_not_found(self.assert_json(response))\ndiff --git a/tests/ocd_frontend/test_data/ori_test_collection_index/items_1.json b/tests/ocd_frontend/test_data/ori_test_collection_index/items_1.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_collection_index/items_1.json\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-{\n- \"date\": \"1642-01-01T00:00:00\",\n- \"description\": \"Het korporaalschap van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de 'Nachtwacht'. Schutters van de kloveniersdoelen uit een poort naar buiten tredend. Op een schild aangebracht naast de poort staan de namen van de afgebeelde personen: Frans Banninck Cocq, heer van purmerlant en Ilpendam, Capiteijn Willem van Ruijtenburch van Vlaerdingen, heer van Vlaerdingen, Lu[ij]tenant, Jan Visscher Cornelisen Vaendrich, Rombout Kemp Sergeant, Reijnier Engelen Sergeant, Barent Harmansen, Jan Adriaensen Keyser, Elbert Willemsen, Jan Clasen Leydeckers, Jan Ockersen, Jan Pietersen bronchorst, Harman Iacobsen wormskerck, Jacob Dircksen de Roy, Jan vander heede, Walich Schellingwou, Jan brugman, Claes van Cruysbergen, Paulus Schoonhoven. De schutters zijn gewapend met lansen, musketten en hellebaarden. Rechts de tamboer met een grote trommel. Tussen de soldaten links staat een meisje met een dode kip om haar middel, rechts een blaffende hond. Linksboven de vaandrig met de uitgestoken vaandel.\",\n- \"source_data\": {\n- \"data\": \"{\\\"webImage\\\": {\\\"url\\\": \\\"http://lh6.ggpht.com/ZYWwML8mVFonXzbmg2rQBulNuCSr3rAaf5ppNcUc2Id8qXqudDL1NSYxaqjEXyDLSbeNFzOHRu0H7rbIws0Js4d7s_M=s0\\\", \\\"height\\\": 2034, \\\"width\\\": 2500, \\\"offsetPercentageX\\\": 50, \\\"offsetPercentageY\\\": 100, \\\"guid\\\": \\\"3ae88fe0-021c-41ae-a4ce-cc70b7bc6295\\\"}, \\\"principalMakers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1669-10-08\\\", \\\"name\\\": \\\"Rembrandt Harmensz. van Rijn\\\", \\\"roles\\\": [\\\"schilder\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Rembrandt Harmensz. van Rijn\\\", \\\"placeOfDeath\\\": \\\"Amsterdam\\\", \\\"dateOfBirth\\\": \\\"1606-07-15\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [\\\"Amsterdam\\\"], \\\"placeOfBirth\\\": \\\"Leiden\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"prentmaker\\\", \\\"tekenaar\\\", \\\"schilder\\\"]}], \\\"historicalPersons\\\": [\\\"Banning Cocq, Frans\\\", \\\"Ruytenburch, Willem van\\\", \\\"Visscher Cornelisen, Jan\\\", \\\"Kemp, Rombout\\\", \\\"Engelen, Reijnier Janszn\\\", \\\"Bolhamer, Barent Harmansen\\\", \\\"Keijser, Jan Adriaensen\\\", \\\"Willemsen, Elbert\\\", \\\"Leijdeckers, Jan Claesen\\\", \\\"Ockersen, Jan\\\", \\\"Bronchorst, Jan Pietersen\\\", \\\"Wormskerck, Harman Jacobsen\\\", \\\"Roy, Jacob Dircksen de\\\", \\\"Heede, Jan van der\\\"], \\\"exhibitions\\\": [], \\\"links\\\": {\\\"search\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection\\\"}, \\\"objectTypes\\\": [\\\"schilderij\\\"], \\\"priref\\\": \\\"5216\\\", \\\"objectCollection\\\": [\\\"schilderijen\\\"], \\\"physicalProperties\\\": [], \\\"productionPlaces\\\": [\\\"Amsterdam\\\"], \\\"colors\\\": [\\\"#261808\\\", \\\" #5E3C14\\\", \\\" #9C8238\\\", \\\" #885617\\\", \\\" #AF9F6B\\\", \\\" #6C6238\\\", \\\" #D7CB9E\\\"], \\\"titles\\\": [\\\"Officieren en andere schutters van wijk II in Amsterdam onder leiding van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de \\\\u2018Nachtwacht\\\\u2019\\\", \\\"Het korporaalschap van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de 'Nachtwacht'\\\"], \\\"scLabelLine\\\": \\\"Rembrandt Harmensz van Rijn (1606\\\\u20131669), olieverf op doek, 1642\\\", \\\"label\\\": {\\\"date\\\": \\\"2013-05-22\\\", \\\"notes\\\": null, \\\"makerLine\\\": \\\"Rembrandt Harmensz van Rijn (1606\\\\u20131669), olieverf op doek, 1642\\\", \\\"description\\\": null, \\\"title\\\": \\\"Schutters van wijk II onder leiding van kapitein Frans Banninck Cocq, bekend als de \\\\u2018Nachtwacht\\\\u2019\\\"}, \\\"makers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1669-10-08\\\", \\\"name\\\": \\\"Rembrandt Harmensz. van Rijn\\\", \\\"roles\\\": [\\\"schilder\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Rembrandt Harmensz. van Rijn\\\", \\\"placeOfDeath\\\": \\\"Amsterdam\\\", \\\"dateOfBirth\\\": \\\"1606-07-15\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [\\\"Amsterdam\\\"], \\\"placeOfBirth\\\": \\\"Leiden\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"prentmaker\\\", \\\"tekenaar\\\", \\\"schilder\\\"]}], \\\"plaqueDescriptionEnglish\\\": null, \\\"techniques\\\": [], \\\"principalMaker\\\": \\\"Rembrandt Harmensz. van Rijn\\\", \\\"labelText\\\": null, \\\"title\\\": \\\"Schutters van wijk II onder leiding van kapitein Frans Banninck Cocq, bekend als de \\\\u2018Nachtwacht\\\\u2019\\\", \\\"id\\\": \\\"nl-SK-C-5\\\", \\\"materials\\\": [\\\"doek\\\", \\\"olieverf\\\"], \\\"dating\\\": {\\\"late\\\": null, \\\"earlyPrecision\\\": null, \\\"yearLate\\\": 1642, \\\"period\\\": 17, \\\"early\\\": null, \\\"yearEarly\\\": 1642, \\\"latePrecision\\\": null, \\\"year\\\": 1642}, \\\"objectNumber\\\": \\\"SK-C-5\\\", \\\"hasImage\\\": true, \\\"associations\\\": [], \\\"classification\\\": {\\\"iconClassIdentifier\\\": [\\\"45(+26)\\\", \\\"45C1\\\", \\\"48C7341\\\", \\\"31D11222\\\", \\\"45D12\\\", \\\"34B11\\\"], \\\"places\\\": [\\\"AmsterdamAmsterdam\\\"], \\\"people\\\": [\\\"Banning Cocq, Frans\\\", \\\"Ruytenburch, Willem van\\\", \\\"Visscher Cornelisen, Jan\\\", \\\"Kemp, Rombout\\\", \\\"Engelen, Reijnier Janszn\\\", \\\"Bolhamer, Barent Harmansen\\\", \\\"Keijser, Jan Adriaensen\\\", \\\"Willemsen, Elbert\\\", \\\"Leijdeckers, Jan Claesen\\\", \\\"Ockersen, Jan\\\", \\\"Bronchorst, Jan Pietersen\\\", \\\"Wormskerck, Harman Jacobsen\\\", \\\"Roy, Jacob Dircksen de\\\", \\\"Heede, Jan van der\\\"], \\\"objectNumbers\\\": [\\\"SK-C-5\\\"], \\\"iconClassDescription\\\": [\\\"warfare; military affairs (+ citizen soldiery, civil guard, citizen militia)\\\", \\\"weapons\\\", \\\"drum (musical instrument)\\\", \\\"girl (child between toddler and youth)\\\", \\\"(military) standard-bearer\\\", \\\"dog\\\"], \\\"periods\\\": [], \\\"motifs\\\": [], \\\"events\\\": []}, \\\"description\\\": \\\"Het korporaalschap van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de 'Nachtwacht'. Schutters van de kloveniersdoelen uit een poort naar buiten tredend. Op een schild aangebracht naast de poort staan de namen van de afgebeelde personen: Frans Banninck Cocq, heer van purmerlant en Ilpendam, Capiteijn Willem van Ruijtenburch van Vlaerdingen, heer van Vlaerdingen, Lu[ij]tenant, Jan Visscher Cornelisen Vaendrich, Rombout Kemp Sergeant, Reijnier Engelen Sergeant, Barent Harmansen, Jan Adriaensen Keyser, Elbert Willemsen, Jan Clasen Leydeckers, Jan Ockersen, Jan Pietersen bronchorst, Harman Iacobsen wormskerck, Jacob Dircksen de Roy, Jan vander heede, Walich Schellingwou, Jan brugman, Claes van Cruysbergen, Paulus Schoonhoven. De schutters zijn gewapend met lansen, musketten en hellebaarden. Rechts de tamboer met een grote trommel. Tussen de soldaten links staat een meisje met een dode kip om haar middel, rechts een blaffende hond. Linksboven de vaandrig met de uitgestoken vaandel.\\\", \\\"colorsWithNormalization\\\": [{\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\"#261808\\\"}, {\\\"normalizedHex\\\": \\\"#B35A1F\\\", \\\"originalHex\\\": \\\" #5E3C14\\\"}, {\\\"normalizedHex\\\": \\\"#E09714\\\", \\\"originalHex\\\": \\\" #9C8238\\\"}, {\\\"normalizedHex\\\": \\\"#B35A1F\\\", \\\"originalHex\\\": \\\" #885617\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #AF9F6B\\\"}, {\\\"normalizedHex\\\": \\\"#367614\\\", \\\"originalHex\\\": \\\" #6C6238\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #D7CB9E\\\"}], \\\"normalized32Colors\\\": [\\\"#000000\\\", \\\" #B35A1F\\\", \\\" #E09714\\\", \\\" #E0CC91\\\", \\\" #367614\\\"], \\\"longTitle\\\": \\\"Schutters van wijk II onder leiding van kapitein Frans Banninck Cocq, bekend als de \\\\u2018Nachtwacht\\\\u2019, Rembrandt Harmensz. van Rijn, 1642\\\", \\\"copyrightHolder\\\": null, \\\"showImage\\\": true, \\\"subTitle\\\": \\\"h 379,5cm \\\\u00d7 b 453,5cm \\\\u00d7 g 337kg\\\", \\\"principalOrFirstMaker\\\": \\\"Rembrandt Harmensz. van Rijn\\\", \\\"dimensions\\\": [{\\\"part\\\": null, \\\"type\\\": \\\"hoogte\\\", \\\"value\\\": \\\"379,5\\\", \\\"unit\\\": \\\"cm\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"breedte\\\", \\\"value\\\": \\\"453,5\\\", \\\"unit\\\": \\\"cm\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"gewicht\\\", \\\"value\\\": \\\"337\\\", \\\"unit\\\": \\\"kg\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"gewicht\\\", \\\"value\\\": \\\"170\\\", \\\"unit\\\": \\\"kg\\\"}], \\\"inscriptions\\\": [\\\"signatuur en datum\\\", \\\"Rembrandt f 1642\\\", \\\"inscriptie\\\", \\\"Frans Banning Cocq, heer van purmerlant en Ilpendam, Capiteijn Willem van Ruijtenburch van Vlaerdingen, heer van Vlaerdingen, Lu[ij]tenant, Jan Visscher Cornelisen Vaendrich, Rombout Kemp Sergeant, Reijnier Engelen Sergeant, Barent Harmansen, Jan Adriaensen Keyser, Elbert Willemsen, Jan Clasen Leydeckers, Jan Ockersen, Jan Pietersen bronchorst, Harman Iacobsen wormskerck, Jacob Dircksen de Roy, Jan vander heede, Walich Schellingwou, Jan brugman, Claes van Cruysbergen, Paulus Schoonhoven\\\"], \\\"language\\\": \\\"nl\\\", \\\"artistRole\\\": null, \\\"documentation\\\": [\\\"Inzoomer object op zaal, 2013 (Nederlands/English).\\\", \\\"A. Jensen Adams, Public Faces and Private Identities in Seventeenth-Century Holland, Portraiture and the Production of Community, New York 2009, p. 211-217, fig. 60.\\\", \\\"M. Rayssac, 'l'Exode des Mus\\\\u00e9es, Histoire des oeuvres d'art sous l'Occupation', Parijs 2007.\\\", \\\"K.M. Groen, 'Earth Matters, The origin of the material used for the preparation of the Nightwatch and many other canvases in Rembrandt's workshop after 1640', Art Matters, volume 3, p. 138.\\\", \\\"E. Runia. A. van Suchtelen, Rembrandt, Den Haag 2006, p. 14.\\\", \\\"Y. van Veelen, 'Work in progress. De strijd om de Nachtwacht', Kunstbeeld nr. 12/1 (dec. 2004/jan. 2005), p. 44-47.\\\", \\\"'Omtrents Rembrandts Nachtwacht', Kunstkrant Rijksmuseum Amsterdam nr. 1 (1998), p. 14-20.\\\", \\\"H. van Os, in: Bulletin van het Rijksmuseum nr. 4 (1996), p. 309-320 + afb.\\\", \\\"J. Boomgaard, 'De Verloren Zoon. Rembrandt en de Nederlandse Kunstgeschiedenisbeschrijving', Kunstreeks (1995).\\\", \\\"P. Taylor, 'Darkness at Noon. Rembrandts Nachtwacht', Kunstschrift 6 (1994), p. 22-27 + afb.\\\", \\\"Fieke Tissink, 'Hoofdstuk uit het Rijksmuseum', Rijksmuseum Kunstkrant 19 (1993) nr. 3, p. 14-18.\\\", \\\"O. Pacht, 'Rembrandt', Munchen 1991, p. 19-30 + ill.\\\", \\\"A.K. Wheelock, 'The Age of Rembrandt', Studies in seventeenth Century Dutch Painting (The Pennsylvania State University, 1988), p. 215, 223 (afb.).\\\", \\\"J.B. Bedaux, 'Een achttiende eeuwse kunsttheoretische discussie', Kunstlicht 15 (1985), p. 25-28.\\\", \\\"C. Grimm, 'Handschrift, schildertechniek en beeldstructuur. Bijdrage tot het onderzoek naar toeschrijving, I: de helmen van Rembrandt', Tableau (1982/83), p. 246-248, afb. 5.\\\", \\\"E. van de Wetering, 'Ontrouw aan Rembrandt', Kunstschrift (Openbaar Kunstbezit) (1982), p. 166-167, 171, afb. 24-26.\\\", \\\"K. Clark, 'What is a masterpiece?', Portfolio. The Magazine of the Visual Arts, 2 (1980), p. 51 + afb.\\\", \\\"H. G\\\\u00fcnther, 'Damals oder heute unverstanden. Zum Problem von Rembrandts Nachtwache', Welkunst 50 (1980), p. 1848-1850 + afb.\\\", \\\"U. Schumacher, 'Gruppenportr\\\\u00e4t und Genrebild. Zur Deutung der Photographie fur die franzosische Malerei des 19. Jahrhunderts, Giessener Beitr\\\\u00e4ge zur Kunstgeschichte 4 (1979), p. 29, afb. 11.\\\", \\\"E. van de Wetering, 'De jonge Rembrandt aan het werk. Materiaalgebruik en schildertechniek in het begin van zijn Leidse periode, Oud Holland 91 (1977), p. 55, 58.\\\", \\\"E.K.J. Reznicek, 'Opmerkingen bij Rembrandt', Oud Holland 91 (1977), p. 99-103, afb. 22.\\\", \\\"M.M. Toth-Ubbens, 'De barbier van Amsterdam. Aantekeningen over de relaties tussen het Waaggebouw en de Schouwburg in de zeventiende eeuw', Antiek 10 (1975), p. 388, afb. 12.\\\", \\\"H. Gerson, 'De Nachtwacht', Openbaar Kunstbezit, 10e jaargang, januari (1966).\\\", \\\"Chr. White, 'Rembrandt', Den Haag 1964, p. 65, 66 + afb.\\\", \\\"A.J. Moes-Veth, 'Rembrandt's Claudius Civilis en de Nachtwacht van terzijde beschouwd, Oud Holland LXXV (1960), p. 143.\\\", \\\"A.J. Moes-Veth, 'De Nachtwacht en haar oude copieen', Oud Holland LXII (1947), p. 188.\\\"], \\\"physicalMedium\\\": \\\"olieverf op doek\\\", \\\"catRefRPK\\\": [], \\\"normalizedColors\\\": [\\\"#000000\\\", \\\" #8B4513\\\", \\\" #B8860B\\\", \\\" #BDB76B\\\", \\\" #556B2F\\\", \\\" #F5DEB3\\\"], \\\"acquisition\\\": {\\\"date\\\": \\\"1808-01-01T00:00:00Z\\\", \\\"method\\\": null, \\\"creditLine\\\": \\\"Bruikleen van de gemeente Amsterdam\\\"}, \\\"plaqueDescriptionDutch\\\": \\\"Rembrandts beroemdste en grootste doek werd gemaakt voor de Kloveniersdoelen. Dit was een van de verenigingsgebouwen van de Amsterdamse schutterij, de burgerwacht van de stad. \\\\r\\\\nRembrandt was de eerste die op een groepsportret de figuren in actie weergaf. De kapitein, in het zwart, geeft zijn luitenant opdracht dat de compagnie moet gaan marcheren. De schutters stellen zich op. Met behulp van licht vestigde Rembrandt de aandacht op belangrijke details, zoals het handgebaar van de kapitein en het kleine meisje op de achtergrond. Zij is de mascotte van de schutters.\\\"}\",\n- \"content_type\": \"application/json\"\n- },\n- \"title\": \"Schutters van wijk II onder leiding van kapitein Frans Banninck Cocq, bekend als de \\u2018Nachtwacht\\u2019\",\n- \"date_granularity\": 4,\n- \"meta\": {\n- \"processing_started\": \"2015-01-13T22:22:08.083732\",\n- \"processing_finished\": \"2015-01-13T22:22:08.088014\",\n- \"rights\": \"Creative Commons Zero\",\n- \"collection\": \"Rijksmuseum\",\n- \"original_object_id\": \"SK-C-5\",\n- \"source_id\": \"rijksmuseum\",\n- \"original_object_urls\": {\n- \"json\": \"https://www.rijksmuseum.nl/api/nl/collection/SK-C-5?format=json\",\n- \"html\": \"https://www.rijksmuseum.nl/nl/collectie/SK-C-5\"\n- }\n- },\n- \"authors\": [\n- \"Rembrandt Harmensz. van Rijn\"\n- ],\n- \"combined_index_data\": \"{\\\"date_granularity\\\": 4, \\\"media_urls\\\": [{\\\"url\\\": \\\"http://localhost:5000/v0/resolve/2f9f9b83798bba5226be78e497f9047cfa7eb190\\\", \\\"width\\\": 2500, \\\"height\\\": 2034, \\\"content_type\\\": \\\"image/jpeg\\\", \\\"original_url\\\": \\\"http://lh6.ggpht.com/ZYWwML8mVFonXzbmg2rQBulNuCSr3rAaf5ppNcUc2Id8qXqudDL1NSYxaqjEXyDLSbeNFzOHRu0H7rbIws0Js4d7s_M=s0\\\"}], \\\"meta\\\": {\\\"processing_started\\\": \\\"2015-01-13T22:22:08.083732\\\", \\\"rights\\\": \\\"Creative Commons Zero\\\", \\\"collection\\\": \\\"Rijksmuseum\\\", \\\"original_object_id\\\": \\\"SK-C-5\\\", \\\"source_id\\\": \\\"rijksmuseum\\\", \\\"original_object_urls\\\": {\\\"json\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection/SK-C-5?format=json\\\", \\\"html\\\": \\\"https://www.rijksmuseum.nl/nl/collectie/SK-C-5\\\"}}, \\\"description\\\": \\\"Het korporaalschap van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de 'Nachtwacht'. Schutters van de kloveniersdoelen uit een poort naar buiten tredend. Op een schild aangebracht naast de poort staan de namen van de afgebeelde personen: Frans Banninck Cocq, heer van purmerlant en Ilpendam, Capiteijn Willem van Ruijtenburch van Vlaerdingen, heer van Vlaerdingen, Lu[ij]tenant, Jan Visscher Cornelisen Vaendrich, Rombout Kemp Sergeant, Reijnier Engelen Sergeant, Barent Harmansen, Jan Adriaensen Keyser, Elbert Willemsen, Jan Clasen Leydeckers, Jan Ockersen, Jan Pietersen bronchorst, Harman Iacobsen wormskerck, Jacob Dircksen de Roy, Jan vander heede, Walich Schellingwou, Jan brugman, Claes van Cruysbergen, Paulus Schoonhoven. De schutters zijn gewapend met lansen, musketten en hellebaarden. Rechts de tamboer met een grote trommel. Tussen de soldaten links staat een meisje met een dode kip om haar middel, rechts een blaffende hond. Linksboven de vaandrig met de uitgestoken vaandel.\\\", \\\"title\\\": \\\"Schutters van wijk II onder leiding van kapitein Frans Banninck Cocq, bekend als de \\\\u2018Nachtwacht\\\\u2019\\\", \\\"date\\\": \\\"1642-01-01T00:00:00\\\", \\\"all_text\\\": \\\"Officieren en andere schutters van wijk II in Amsterdam onder leiding van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de \\\\u2018Nachtwacht\\\\u2019 Het korporaalschap van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de 'Nachtwacht' Het korporaalschap van kapitein Frans Banninck Cocq en luitenant Willem van Ruytenburch, bekend als de 'Nachtwacht'. Schutters van de kloveniersdoelen uit een poort naar buiten tredend. Op een schild aangebracht naast de poort staan de namen van de afgebeelde personen: Frans Banninck Cocq, heer van purmerlant en Ilpendam, Capiteijn Willem van Ruijtenburch van Vlaerdingen, heer van Vlaerdingen, Lu[ij]tenant, Jan Visscher Cornelisen Vaendrich, Rombout Kemp Sergeant, Reijnier Engelen Sergeant, Barent Harmansen, Jan Adriaensen Keyser, Elbert Willemsen, Jan Clasen Leydeckers, Jan Ockersen, Jan Pietersen bronchorst, Harman Iacobsen wormskerck, Jacob Dircksen de Roy, Jan vander heede, Walich Schellingwou, Jan brugman, Claes van Cruysbergen, Paulus Schoonhoven. De schutters zijn gewapend met lansen, musketten en hellebaarden. Rechts de tamboer met een grote trommel. Tussen de soldaten links staat een meisje met een dode kip om haar middel, rechts een blaffende hond. Linksboven de vaandrig met de uitgestoken vaandel. schilderij schilderijen Rembrandt Harmensz. van Rijn Leiden Amsterdam prentmaker tekenaar schilder schilder Noord-Nederlands Amsterdam Rembrandts beroemdste en grootste doek werd gemaakt voor de Kloveniersdoelen. Dit was een van de verenigingsgebouwen van de Amsterdamse schutterij, de burgerwacht van de stad. \\\\r\\\\nRembrandt was de eerste die op een groepsportret de figuren in actie weergaf. De kapitein, in het zwart, geeft zijn luitenant opdracht dat de compagnie moet gaan marcheren. De schutters stellen zich op. Met behulp van licht vestigde Rembrandt de aandacht op belangrijke details, zoals het handgebaar van de kapitein en het kleine meisje op de achtergrond. Zij is de mascotte van de schutters. Bruikleen van de gemeente Amsterdam doek olieverf Amsterdam schilderij\\\", \\\"authors\\\": [\\\"Rembrandt Harmensz. van Rijn\\\"]}\",\n- \"media_urls\": [\n- {\n- \"url\": \"http://localhost:5000/v0/resolve/2f9f9b83798bba5226be78e497f9047cfa7eb190\",\n- \"width\": 2500,\n- \"original_url\": \"http://lh6.ggpht.com/ZYWwML8mVFonXzbmg2rQBulNuCSr3rAaf5ppNcUc2Id8qXqudDL1NSYxaqjEXyDLSbeNFzOHRu0H7rbIws0Js4d7s_M=s0\",\n- \"content_type\": \"image/jpeg\",\n- \"height\": 2034\n- }\n- ]\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_collection_index/items_2.json b/tests/ocd_frontend/test_data/ori_test_collection_index/items_2.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_collection_index/items_2.json\n+++ /dev/null\n@@ -1,32 +0,0 @@\n-{\n- \"date\": \"1475-01-01T00:00:00\",\n- \"description\": \"Graffiguren, tien, zogenaamde rouwdragers of plorannen, afkomstig van het praalgraf van Isabella van Bourbon.\",\n- \"source_data\": {\n- \"data\": \"{\\\"webImage\\\": {\\\"url\\\": \\\"http://lh5.ggpht.com/LE3ggDaLAIwTIOknJm06U2Y_qNffQ0133bWQxnWfDtzi12uWE85MYWtYwvo8eQZx1tc-U-vH_-2ZIoDGYUUrs8YklQ=s0\\\", \\\"height\\\": 1328, \\\"width\\\": 2500, \\\"offsetPercentageX\\\": 50, \\\"offsetPercentageY\\\": 50, \\\"guid\\\": \\\"5b29ba1c-4c23-49c6-8bb6-2253ddbb560b\\\"}, \\\"principalMakers\\\": [], \\\"historicalPersons\\\": [\\\"Bourbon, Isabella van\\\"], \\\"exhibitions\\\": [], \\\"links\\\": {\\\"search\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection\\\"}, \\\"objectTypes\\\": [\\\"graffiguur\\\"], \\\"priref\\\": \\\"437391\\\", \\\"objectCollection\\\": [\\\"beeldhouwwerken\\\"], \\\"physicalProperties\\\": [], \\\"productionPlaces\\\": [\\\"Brussel\\\", \\\"Brussel\\\", \\\"Doornik\\\", \\\"Brussel\\\"], \\\"colors\\\": [\\\"#D4D3D2\\\", \\\" #ACABA8\\\", \\\" #191813\\\", \\\" #565249\\\", \\\" #39362F\\\", \\\" #8E8D89\\\", \\\" #706E69\\\"], \\\"titles\\\": [\\\"Graffiguren Isabella van Bourbon\\\"], \\\"scLabelLine\\\": \\\"modellen toegeschreven aan Jan Borman II (werkzaam ca. 1475\\\\u20131520), uitgevoerd door Renier van Thiene (werkzaam ca. 1465\\\\u20131498), Brussel, ca. 1475-1476, brons\\\", \\\"label\\\": {\\\"date\\\": \\\"2012-11-21\\\", \\\"notes\\\": null, \\\"makerLine\\\": \\\"modellen toegeschreven aan Jan Borman II (werkzaam ca. 1475\\\\u20131520), uitgevoerd door Renier van Thiene (werkzaam ca. 1465\\\\u20131498), Brussel, ca. 1475-1476, brons\\\", \\\"description\\\": \\\"In 1465 overleed Isabella van Bourbon, de vrouw van Karel de Stoute, hertog van Bourgondi\\\\u00eb. Hun dochter Maria liet voor haar een praalgraf maken in de Michielsabdij in Antwerpen. Om de tombe stonden 24 beeldjes van rouwende familieleden en voorouders, zogenoemde pleurants. Ze symboliseerden het belang van de Bourgondische dynastie. Deze tien bleven als enige bewaard.\\\", \\\"title\\\": \\\"Tien pleurants van het praalgraf van Isabella van Bourbon\\\"}, \\\"makers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1498-06\\\", \\\"name\\\": \\\"Renier van Thienen\\\", \\\"roles\\\": [\\\"bronsgieter\\\"], \\\"dateOfDeathPrecision\\\": \\\"voor\\\", \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Thienen, Renier van\\\", \\\"placeOfDeath\\\": null, \\\"dateOfBirth\\\": null, \\\"qualification\\\": \\\"toegeschreven aan\\\", \\\"nationality\\\": \\\"Vlaams\\\", \\\"productionPlaces\\\": [\\\"Brussel\\\"], \\\"placeOfBirth\\\": \\\"Tienen\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"gieter\\\", \\\"bronsgieter\\\"]}, {\\\"dateOfBirthPrecision\\\": \\\"in of voor\\\", \\\"dateOfDeath\\\": \\\"1520\\\", \\\"name\\\": \\\"Jan Borman (II)\\\", \\\"roles\\\": [\\\"beeldhouwer\\\"], \\\"dateOfDeathPrecision\\\": \\\"ca.\\\", \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Borman, Jan (II)\\\", \\\"placeOfDeath\\\": \\\"Brussel\\\", \\\"dateOfBirth\\\": \\\"1479\\\", \\\"qualification\\\": \\\"toegeschreven aan\\\", \\\"nationality\\\": \\\"Vlaams\\\", \\\"productionPlaces\\\": [\\\"Brussel\\\"], \\\"placeOfBirth\\\": null, \\\"biography\\\": null, \\\"occupation\\\": [\\\"beeldhouwer\\\", \\\"steenhouwer\\\"]}, {\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": null, \\\"name\\\": \\\"Jean Delemer\\\", \\\"roles\\\": [\\\"beeldhouwer\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Delemer, Jean\\\", \\\"placeOfDeath\\\": null, \\\"dateOfBirth\\\": null, \\\"qualification\\\": \\\"verworpen toeschrijving\\\", \\\"nationality\\\": null, \\\"productionPlaces\\\": [\\\"Doornik\\\"], \\\"placeOfBirth\\\": null, \\\"biography\\\": null, \\\"occupation\\\": [\\\"beeldhouwer\\\"]}, {\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": null, \\\"name\\\": \\\"Jean Delemer\\\", \\\"roles\\\": [\\\"beeldhouwer\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Delemer, Jean\\\", \\\"placeOfDeath\\\": null, \\\"dateOfBirth\\\": null, \\\"qualification\\\": \\\"verworpen toeschrijving\\\", \\\"nationality\\\": null, \\\"productionPlaces\\\": [\\\"Brussel\\\"], \\\"placeOfBirth\\\": null, \\\"biography\\\": null, \\\"occupation\\\": [\\\"beeldhouwer\\\"]}], \\\"plaqueDescriptionEnglish\\\": null, \\\"techniques\\\": [], \\\"principalMaker\\\": \\\"anoniem\\\", \\\"labelText\\\": null, \\\"title\\\": \\\"Tien pleurants van het praalgraf van Isabella van Bourbon\\\", \\\"id\\\": \\\"nl-BK-AM-33\\\", \\\"materials\\\": [\\\"brons\\\"], \\\"dating\\\": {\\\"late\\\": null, \\\"earlyPrecision\\\": \\\"ca.\\\", \\\"yearLate\\\": 1476, \\\"period\\\": 15, \\\"early\\\": null, \\\"yearEarly\\\": 1475, \\\"latePrecision\\\": \\\"ca.\\\", \\\"year\\\": 1475}, \\\"objectNumber\\\": \\\"BK-AM-33\\\", \\\"hasImage\\\": true, \\\"associations\\\": [], \\\"classification\\\": {\\\"iconClassIdentifier\\\": [\\\"42E224\\\", \\\"46A12\\\", \\\"41D2(+81)\\\", \\\"41D2(+82)\\\"], \\\"places\\\": [], \\\"people\\\": [\\\"Bourbon, Isabella van\\\"], \\\"objectNumbers\\\": [\\\"BK-AM-33\\\"], \\\"iconClassDescription\\\": [\\\"'pleurants'\\\", \\\"nobility and patriciate; chivalry, knighthood\\\", \\\"clothes, costume (+ men's clothes)\\\", \\\"clothes, costume (+ women's clothes)\\\"], \\\"periods\\\": [\\\"14761476\\\"], \\\"motifs\\\": [], \\\"events\\\": []}, \\\"description\\\": \\\"Graffiguren, tien, zogenaamde rouwdragers of plorannen, afkomstig van het praalgraf van Isabella van Bourbon.\\\", \\\"colorsWithNormalization\\\": [{\\\"normalizedHex\\\": \\\"#F6ECF3\\\", \\\"originalHex\\\": \\\"#D4D3D2\\\"}, {\\\"normalizedHex\\\": \\\"#B5BFCC\\\", \\\"originalHex\\\": \\\" #ACABA8\\\"}, {\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\" #191813\\\"}, {\\\"normalizedHex\\\": \\\"#737C84\\\", \\\"originalHex\\\": \\\" #565249\\\"}, {\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\" #39362F\\\"}, {\\\"normalizedHex\\\": \\\"#737C84\\\", \\\"originalHex\\\": \\\" #8E8D89\\\"}, {\\\"normalizedHex\\\": \\\"#737C84\\\", \\\"originalHex\\\": \\\" #706E69\\\"}], \\\"normalized32Colors\\\": [\\\"#F6ECF3\\\", \\\" #000000\\\", \\\" #737C84\\\", \\\" #B5BFCC\\\"], \\\"longTitle\\\": \\\"Tien pleurants van het praalgraf van Isabella van Bourbon, toegeschreven aan Renier van Thienen, ca. 1475 - ca. 1476\\\", \\\"copyrightHolder\\\": null, \\\"showImage\\\": true, \\\"subTitle\\\": \\\"\\\", \\\"principalOrFirstMaker\\\": \\\"toegeschreven aan Renier van Thienen\\\", \\\"dimensions\\\": [], \\\"inscriptions\\\": [], \\\"language\\\": \\\"nl\\\", \\\"artistRole\\\": null, \\\"documentation\\\": [\\\"Inzoomer object op zaal, 2013 (Nederlands/English).\\\", \\\"Frits Scholten, 'Isabella's pleurants : een Bourgondische stamboom in beeld', Rijksmuseum Kunstkrant 33 (2007), nr. 2, p. 10-13, afb..\\\", \\\"K.G. van Acker, 'Iconografische beschouwingen in verband met de 16de eeuwse gegraveerde 'portretten' der graven van Vlaanderen', Oud Holland LXXXIII (1968), p. 95-115, afb. 18, 29.\\\", \\\"F.W.S. van Thienen, \\\\\\\"Drie beeldjes van een graftombe\\\\\\\", Openbaar Kunstbezit (19XX) p. 9a-b.\\\", \\\"J. Leeuwenberg, 'Nogmaals de Amsterdamse gravenbeeldjes', Oud Holland LXXIII (1958), p. 156-159, afb. 1f.\\\", \\\"R. van Luttervelt, 'Bijdragen tot de Iconographie van de Graven van Holland, naar aanleiding van de beelden uit de Amsterdamse vierschaar', Oud Holland LXXII (1957), p. 73 e.v., 141 e.v., afb. 29.\\\", \\\"D. Roggen, 'Prae-sluteriaanse, sluteriaanse, post-sluteriaanse Nederlandse sculptuur', Gentse Bijdragen tot de Kunstgeschiedenis XVI (1955/56), p. 179, afb. 39.\\\", \\\"J. Leeuwenberg, 'De tien bronzen 'plorannen' in het Rijksmuseum te Amsterdam, hun herkomst en de voorbeelden waaraan zij zijn ontleend', Gentse Bijdragen tot de Kunstgeschiedenis XIII (1951), p. 13-57.\\\", \\\"C.M.A.A. Lindeman, 'De datering, herkomst en identificatie der 'Gravenbeeldjes' van Jacques de G\\\\u00e9rines', Oud Holland LVIII (1941), p. 49-57, 97-105, p. 161-186, 93-219, afb. 8, 10, 12, 22, 27, 29.\\\", \\\"R. Gavelle, 'Le tombeau de Louis de Male', Societ\\\\u00e9 des sciences, de l'griculture et des arts de Lille, 1935.\\\", \\\"J. Six, 'De 'Gravenbeeldjes' te Amsterdam', Onze Kunst XL (1922), p. 65-84.\\\", \\\"M. Devigne, 'Een nieuw document voor de geschiedenis der beeldjes van Jan van Gerines in het Nederlansch Museum te Amsterdam', Onze Kunst XXXIX (1922), p. 49-76.\\\", \\\"F. Donnet, 'Les aventures posthumes d'une princesse bourguignonne', Annales de l'Acad\\\\u00e9mie Royale de l 'Arch\\\\u00e9ologie de Belgique III (1919), p. 44-80.\\\", \\\"F. Schmidt-Degener, 'De 'Zeven Deugden' van Johannes van Eyck in het Nederlandsch Museum te Amsterdam', Onze Kunst XI (1907), p. 18-32, 70-82.\\\", \\\"F. Schmidt-Degener, 'Rembrandt imitateur de Claus Sluter et de Jean van Eyck, Gazette des Beaux Arts XXXVI (1906), p. 89-108.\\\", \\\"J. Destr\\\\u00e9e, 'Het oude koperwerk op de tentoonstelling te Dinant en te Middelburg', Onze Kunst IV (1905), p. 44, afb. 7-16.\\\", \\\"J. Destr\\\\u00e9e, 'Etude sur la sculpture braban\\\\u00e7onne au Moyen Age IV, Annales de la Soci\\\\u00e9t\\\\u00e9 d'Archeologie de Bruxelles XIII (1899), p. 310-330, pl. XVI-XVII.\\\", \\\"J. Six, 'Les bronzes de Jacques de Gerines', Gazette des Beaux Arts XV (1896), p. 388-404.\\\", \\\"N. de Roever, 'De Rarieteiten-kamer verbonden aan 't Amsterdamsche Gemeente-archief', Oud Holland VI (1888), p. 209.\\\", \\\"F.H. Mertens en E. Buschmann, 'Annales Antverpienses ab urbe condita ad annum MDCC collecti ex ipsius civitatis monumentis ..., auctore Daniele Papebrochio II, Antwerpen, 1845, p. 67-69.\\\", \\\"D. papebrochius, Acta Sanctorum Junii I, Antwerpen, 1695, p. 943.\\\"], \\\"physicalMedium\\\": \\\"brons\\\", \\\"catRefRPK\\\": [], \\\"normalizedColors\\\": [\\\"#D3D3D3\\\", \\\" #696969\\\", \\\" #A9A9A9\\\", \\\" #000000\\\", \\\" #808080\\\"], \\\"acquisition\\\": {\\\"date\\\": \\\"1887-01-01T00:00:00Z\\\", \\\"method\\\": null, \\\"creditLine\\\": \\\"Bruikleen van de gemeente Amsterdam\\\"}, \\\"plaqueDescriptionDutch\\\": null}\",\n- \"content_type\": \"application/json\"\n- },\n- \"title\": \"Tien pleurants van het praalgraf van Isabella van Bourbon\",\n- \"date_granularity\": 4,\n- \"meta\": {\n- \"processing_started\": \"2015-01-13T22:22:03.549390\",\n- \"processing_finished\": \"2015-01-13T22:22:03.553327\",\n- \"rights\": \"Creative Commons Zero\",\n- \"collection\": \"Rijksmuseum\",\n- \"original_object_id\": \"BK-AM-33\",\n- \"source_id\": \"rijksmuseum\",\n- \"original_object_urls\": {\n- \"json\": \"https://www.rijksmuseum.nl/api/nl/collection/BK-AM-33?format=json\",\n- \"html\": \"https://www.rijksmuseum.nl/nl/collectie/BK-AM-33\"\n- }\n- },\n- \"combined_index_data\": \"{\\\"date_granularity\\\": 4, \\\"meta\\\": {\\\"processing_started\\\": \\\"2015-01-13T22:22:03.549390\\\", \\\"rights\\\": \\\"Creative Commons Zero\\\", \\\"collection\\\": \\\"Rijksmuseum\\\", \\\"original_object_id\\\": \\\"BK-AM-33\\\", \\\"source_id\\\": \\\"rijksmuseum\\\", \\\"original_object_urls\\\": {\\\"json\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection/BK-AM-33?format=json\\\", \\\"html\\\": \\\"https://www.rijksmuseum.nl/nl/collectie/BK-AM-33\\\"}}, \\\"description\\\": \\\"Graffiguren, tien, zogenaamde rouwdragers of plorannen, afkomstig van het praalgraf van Isabella van Bourbon.\\\", \\\"title\\\": \\\"Tien pleurants van het praalgraf van Isabella van Bourbon\\\", \\\"date\\\": \\\"1475-01-01T00:00:00\\\", \\\"all_text\\\": \\\"Graffiguren Isabella van Bourbon Graffiguren, tien, zogenaamde rouwdragers of plorannen, afkomstig van het praalgraf van Isabella van Bourbon. graffiguur beeldhouwwerken Renier van Thienen Tienen gieter bronsgieter bronsgieter Vlaams Brussel toegeschreven aan Jan Borman (II) Brussel beeldhouwer steenhouwer beeldhouwer Vlaams Brussel toegeschreven aan Jean Delemer beeldhouwer beeldhouwer Doornik verworpen toeschrijving Jean Delemer beeldhouwer beeldhouwer Brussel verworpen toeschrijving Bruikleen van de gemeente Amsterdam brons Brussel Brussel Doornik Brussel graffiguur\\\", \\\"media_urls\\\": [{\\\"url\\\": \\\"http://localhost:5000/v0/resolve/74d7b006e2d1fbcfc41fccfbcf3b068ccea78ce0\\\", \\\"width\\\": 2500, \\\"height\\\": 1328, \\\"content_type\\\": \\\"image/jpeg\\\", \\\"original_url\\\": \\\"http://lh5.ggpht.com/LE3ggDaLAIwTIOknJm06U2Y_qNffQ0133bWQxnWfDtzi12uWE85MYWtYwvo8eQZx1tc-U-vH_-2ZIoDGYUUrs8YklQ=s0\\\"}]}\",\n- \"media_urls\": [\n- {\n- \"url\": \"http://localhost:5000/v0/resolve/74d7b006e2d1fbcfc41fccfbcf3b068ccea78ce0\",\n- \"width\": 2500,\n- \"original_url\": \"http://lh5.ggpht.com/LE3ggDaLAIwTIOknJm06U2Y_qNffQ0133bWQxnWfDtzi12uWE85MYWtYwvo8eQZx1tc-U-vH_-2ZIoDGYUUrs8YklQ=s0\",\n- \"content_type\": \"image/jpeg\",\n- \"height\": 1328\n- }\n- ]\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_collection_index/items_3.json b/tests/ocd_frontend/test_data/ori_test_collection_index/items_3.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_collection_index/items_3.json\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-{\n- \"date\": \"1648-01-01T00:00:00\",\n- \"description\": \"De schuttersmaaltijd in de Voetboogdoelen of St. Jorisdoelen te Amsterdam ter viering van het sluiten van de vrede van Munster, 18 juni 1648. Voorgesteld zijn: kapitein Cornelis Jansz Witsen (met de zilveren drinkhoorn), luitenant Johan Oetgens van Waveren (die zijn hand schudt), de vaandrig Jacob Banningh (zittend naast de grote trommel), Dirck Claesz Thoveling en Thomas Hartog (sergeanten), Pieter van Hoorn, Willem Pietersz van der Voort, Adriaen Dirck Sparwer, Hendrick Calaber, Govert van der Mij, Johannes Calaber, Benedictus Schaeck, Jan Maes, Jacob van Diemen, Jan van Ommeren, Isaac Ooyens, Gerrit Pietersz van Anstenraadt, Herman Teunisz de Kluyter, Andries van Anstenraadt, Christoffel Poock, Hendrick Dommer Wz, Paulus Hennekijn, Lambregt van den Bos en Willem (de trommelslager). Op de grote trommel hangt een papier met een gedicht van Jan Vos. Door de openstaande ramen is de gevel van de brouwerij 'het Lam' aan de Singel zichtbaar. Rechts brengt een vrouwen een kalkoenpastei binnen. Op tafel staan tinnen borden, roemers en andere glazen. Links staat op de vloer een grote metalen koeler met een wijnvat.\",\n- \"source_data\": {\n- \"data\": \"{\\\"webImage\\\": {\\\"url\\\": \\\"http://lh5.ggpht.com/dMaR9T0-0j9erOeI3dFRnTt4L7UeL1qtt-IA1_Kj-WDLwt5RHFc45I5n6aluaMFL1b8gZIYIIYsHavL-FWpl-gOQpw=s0\\\", \\\"height\\\": 1034, \\\"width\\\": 2500, \\\"offsetPercentageX\\\": 50, \\\"offsetPercentageY\\\": 50, \\\"guid\\\": \\\"a7e3db70-7efc-42d8-8ba5-e810bec9b0fc\\\"}, \\\"principalMakers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1670-12-16\\\", \\\"name\\\": \\\"Bartholomeus van der Helst\\\", \\\"roles\\\": [\\\"schilder\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Helst, Bartholomeus van der\\\", \\\"placeOfDeath\\\": \\\"Amsterdam\\\", \\\"dateOfBirth\\\": \\\"1613\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [], \\\"placeOfBirth\\\": \\\"Haarlem\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"tekenaar\\\", \\\"schilder\\\"]}], \\\"historicalPersons\\\": [\\\"Witsen, Cornelis\\\"], \\\"exhibitions\\\": [], \\\"links\\\": {\\\"search\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection\\\"}, \\\"objectTypes\\\": [\\\"schilderij\\\"], \\\"priref\\\": \\\"8662\\\", \\\"objectCollection\\\": [\\\"schilderijen\\\"], \\\"physicalProperties\\\": [], \\\"productionPlaces\\\": [], \\\"colors\\\": [\\\"#120F09\\\", \\\" #4E402C\\\", \\\" #856E47\\\", \\\" #9F8E6A\\\", \\\" #ABA892\\\", \\\" #425763\\\", \\\" #C7C5B1\\\"], \\\"titles\\\": [\\\"De schuttersmaaltijd in de Voetboog- of St. Jorisdoelen te Amsterdam ter viering van het sluiten van de vrede van Munster, 18 juni 1648\\\"], \\\"scLabelLine\\\": \\\"Bartholomeus van der Helst (1613\\\\u20131670), olieverf op doek, 1648\\\", \\\"label\\\": {\\\"date\\\": \\\"2013-04-08\\\", \\\"notes\\\": null, \\\"makerLine\\\": \\\"Bartholomeus van der Helst (1613\\\\u20131670), olieverf op doek, 1648\\\", \\\"description\\\": \\\"18 juni 1648: het is feest bij de Amsterdamse voetboogschutters. De aanleiding is de Vrede van Munster, het einde van de oorlog met Spanje. De aanvoerders van de schutterij schudden elkaar de hand als vredesteken, de drinkhoorn doet de ronde. De gewapende macht van Amsterdam is blij dat de wapens voortaan rusten, zo blijkt uit het gedicht op de trommel.\\\", \\\"title\\\": \\\"Schuttersmaaltijd ter viering van de Vrede van Munster\\\"}, \\\"makers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1670-12-16\\\", \\\"name\\\": \\\"Bartholomeus van der Helst\\\", \\\"roles\\\": [\\\"schilder\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Helst, Bartholomeus van der\\\", \\\"placeOfDeath\\\": \\\"Amsterdam\\\", \\\"dateOfBirth\\\": \\\"1613\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [], \\\"placeOfBirth\\\": \\\"Haarlem\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"tekenaar\\\", \\\"schilder\\\"]}], \\\"plaqueDescriptionEnglish\\\": null, \\\"techniques\\\": [], \\\"principalMaker\\\": \\\"Bartholomeus van der Helst\\\", \\\"labelText\\\": null, \\\"title\\\": \\\"Schuttersmaaltijd ter viering van de Vrede van Munster\\\", \\\"id\\\": \\\"nl-SK-C-2\\\", \\\"materials\\\": [\\\"doek\\\", \\\"olieverf\\\"], \\\"dating\\\": {\\\"late\\\": null, \\\"earlyPrecision\\\": null, \\\"yearLate\\\": 1648, \\\"period\\\": 17, \\\"early\\\": null, \\\"yearEarly\\\": 1648, \\\"latePrecision\\\": null, \\\"year\\\": 1648}, \\\"objectNumber\\\": \\\"SK-C-2\\\", \\\"hasImage\\\": true, \\\"associations\\\": [], \\\"classification\\\": {\\\"iconClassIdentifier\\\": [\\\"41C5\\\", \\\"43A2\\\", \\\"45(+26)\\\", \\\"41C323\\\", \\\"45D1\\\"], \\\"places\\\": [\\\"AmsterdamAmsterdam\\\"], \\\"people\\\": [\\\"Witsen, Cornelis\\\"], \\\"objectNumbers\\\": [\\\"SK-C-2\\\"], \\\"iconClassDescription\\\": [\\\"celebration meal, feast, banquet\\\", \\\"private festivities, merry company\\\", \\\"warfare; military affairs (+ citizen soldiery, civil guard, citizen militia)\\\", \\\"glass, rummer\\\", \\\"(military) flags and standards\\\"], \\\"periods\\\": [\\\"1648-06-181648-06-18\\\"], \\\"motifs\\\": [], \\\"events\\\": [\\\"Vrede van M\\\\u00fcnster\\\", \\\"Viering van de Vrede van Munster te Amsterdam\\\"]}, \\\"description\\\": \\\"De schuttersmaaltijd in de Voetboogdoelen of St. Jorisdoelen te Amsterdam ter viering van het sluiten van de vrede van Munster, 18 juni 1648. Voorgesteld zijn: kapitein Cornelis Jansz Witsen (met de zilveren drinkhoorn), luitenant Johan Oetgens van Waveren (die zijn hand schudt), de vaandrig Jacob Banningh (zittend naast de grote trommel), Dirck Claesz Thoveling en Thomas Hartog (sergeanten), Pieter van Hoorn, Willem Pietersz van der Voort, Adriaen Dirck Sparwer, Hendrick Calaber, Govert van der Mij, Johannes Calaber, Benedictus Schaeck, Jan Maes, Jacob van Diemen, Jan van Ommeren, Isaac Ooyens, Gerrit Pietersz van Anstenraadt, Herman Teunisz de Kluyter, Andries van Anstenraadt, Christoffel Poock, Hendrick Dommer Wz, Paulus Hennekijn, Lambregt van den Bos en Willem (de trommelslager). Op de grote trommel hangt een papier met een gedicht van Jan Vos. Door de openstaande ramen is de gevel van de brouwerij 'het Lam' aan de Singel zichtbaar. Rechts brengt een vrouwen een kalkoenpastei binnen. Op tafel staan tinnen borden, roemers en andere glazen. Links staat op de vloer een grote metalen koeler met een wijnvat.\\\", \\\"colorsWithNormalization\\\": [{\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\"#120F09\\\"}, {\\\"normalizedHex\\\": \\\"#737C84\\\", \\\"originalHex\\\": \\\" #4E402C\\\"}, {\\\"normalizedHex\\\": \\\"#B35A1F\\\", \\\"originalHex\\\": \\\" #856E47\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #9F8E6A\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #ABA892\\\"}, {\\\"normalizedHex\\\": \\\"#2F4F4F\\\", \\\"originalHex\\\": \\\" #425763\\\"}, {\\\"normalizedHex\\\": \\\"#FBF6E1\\\", \\\"originalHex\\\": \\\" #C7C5B1\\\"}], \\\"normalized32Colors\\\": [\\\"#000000\\\", \\\" #737C84\\\", \\\" #B35A1F\\\", \\\" #E0CC91\\\", \\\" #2F4F4F\\\", \\\" #FBF6E1\\\"], \\\"longTitle\\\": \\\"Schuttersmaaltijd ter viering van de Vrede van Munster, Bartholomeus van der Helst, 1648\\\", \\\"copyrightHolder\\\": null, \\\"showImage\\\": true, \\\"subTitle\\\": \\\"h 232cm \\\\u00d7 b 547cm\\\", \\\"principalOrFirstMaker\\\": \\\"Bartholomeus van der Helst\\\", \\\"dimensions\\\": [{\\\"part\\\": null, \\\"type\\\": \\\"hoogte\\\", \\\"value\\\": \\\"232\\\", \\\"unit\\\": \\\"cm\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"breedte\\\", \\\"value\\\": \\\"547\\\", \\\"unit\\\": \\\"cm\\\"}], \\\"inscriptions\\\": [\\\"signatuur en datum\\\", \\\"Bartholomeus Vander Helst, fecit A\\\\u00ba 1648.\\\", \\\"inscriptie\\\", \\\"Belloone walgt van Bloedt / ja Mars vervloeckt het daveren / Van't zwangere metaal, / en 't zwaardt bemint de schee: / Dies biedt de dapp're Wits / aan d'eedele van Waveren / Op 't eeuwige verbondt, / den hooren van de Vree.\\\"], \\\"language\\\": \\\"nl\\\", \\\"artistRole\\\": null, \\\"documentation\\\": [\\\"Inzoomer object op zaal, 2013 (Nederlands/English).\\\", \\\"Jaarverslag Rijksmuseum (2011), afb. p. 86, afb. p. 96.\\\", \\\"Judith van Gent, 'Een 'vleyend penceel' : Bertholomeus van der Helst : begaafd schilder van de Amsterdamse elite', Ons Amsterdam 63 (2011) nr. 10, p. 408-409.\\\", \\\"Rijksmuseum Kunstkrant 33 (2007) sept.-okt., nr. 5, p. 9\\\", \\\"Rijksmuseum Amsterdam Jaarverslag 2006, p. 53\\\", \\\"Annual report : Rijksmuseum Amsterdam (1999), p. 38-39, afb.\\\", \\\"P. Jeroense, 'Govert Flinck. Eine K\\\\u00fcnstlerbiographie', Niederdeutsche Beitrage zur Kunstgeschichte 36 (1997), p. 90.\\\", \\\"J. Bos, 'Capitaele Stucken', Jaarboek Amstelodamum 88 (1996), p. 65-102.\\\", \\\"R. Jellema, 'De Schuttersmaaltijd door Van der Helst in het Teylers Museum?', Teylers Museum Magazijn 16 (1987), p. 1-4.\\\", \\\"R.B.F. van der Sloot en J.B. Kist, 'Iets over de degenvesten in Hoorn rond het jaar 1650', Armamentaria 5 (1970), p. 16.\\\", \\\"A.-E. Theuerkauff-Liederwald, 'Die niederl\\\\u00e4ndischen Gl\\\\u00e4ser aus dem Gem\\\\u00e4lde von Bathelom\\\\u00e4us van der Helst 'Sch\\\\u00fctsenmahlzeit am 18. Juni 1648 in der St. Jorisdoelen zu Amsterdam'', Rijksmuseum Amsterdam, ICOM glass Congress, Brussel 1965, paper 266\\\", \\\"A.J de Bull, 'Van der Helst - Kaiser', Kunstkronijk (1856), p. 59-60.\\\", \\\"Documentatiemap: aantekeningen R. van Luttervelt.\\\"], \\\"physicalMedium\\\": \\\"olieverf op doek\\\", \\\"catRefRPK\\\": [], \\\"normalizedColors\\\": [\\\"#000000\\\", \\\" #696969\\\", \\\" #D2B48C\\\", \\\" #A9A9A9\\\", \\\" #2F4F4F\\\", \\\" #C0C0C0\\\"], \\\"acquisition\\\": {\\\"date\\\": \\\"1808-01-01T00:00:00Z\\\", \\\"method\\\": null, \\\"creditLine\\\": \\\"Bruikleen van de gemeente Amsterdam\\\"}, \\\"plaqueDescriptionDutch\\\": \\\"Schutters vormden de gewapende macht van de stad. Zij waren vrijwilligers. In Amsterdam had elk stadsdeel een eigen schutterij met een eigen verenigingsgebouw. In de 17de eeuw werden die gebouwen groter en mooier. De interieurs werden versierd met groepsportretten van de leden. In 1648 legde Van der Helst Amsterdamse schutters vast in hun verenigingsgebouw, waar zij de vrede tussen Nederland en Spanje vierden.\\\"}\",\n- \"content_type\": \"application/json\"\n- },\n- \"title\": \"Schuttersmaaltijd ter viering van de Vrede van Munster\",\n- \"date_granularity\": 4,\n- \"meta\": {\n- \"processing_started\": \"2015-01-13T22:22:01.865630\",\n- \"processing_finished\": \"2015-01-13T22:22:02.036465\",\n- \"rights\": \"Creative Commons Zero\",\n- \"collection\": \"Rijksmuseum\",\n- \"original_object_id\": \"SK-C-2\",\n- \"source_id\": \"rijksmuseum\",\n- \"original_object_urls\": {\n- \"json\": \"https://www.rijksmuseum.nl/api/nl/collection/SK-C-2?format=json\",\n- \"html\": \"https://www.rijksmuseum.nl/nl/collectie/SK-C-2\"\n- }\n- },\n- \"authors\": [\n- \"Bartholomeus van der Helst\"\n- ],\n- \"combined_index_data\": \"{\\\"date_granularity\\\": 4, \\\"media_urls\\\": [{\\\"url\\\": \\\"http://localhost:5000/v0/resolve/02cf328932c52e2086454f29478c45ae3c66fd49\\\", \\\"width\\\": 2500, \\\"height\\\": 1034, \\\"content_type\\\": \\\"image/jpeg\\\", \\\"original_url\\\": \\\"http://lh5.ggpht.com/dMaR9T0-0j9erOeI3dFRnTt4L7UeL1qtt-IA1_Kj-WDLwt5RHFc45I5n6aluaMFL1b8gZIYIIYsHavL-FWpl-gOQpw=s0\\\"}], \\\"meta\\\": {\\\"processing_started\\\": \\\"2015-01-13T22:22:01.865630\\\", \\\"rights\\\": \\\"Creative Commons Zero\\\", \\\"collection\\\": \\\"Rijksmuseum\\\", \\\"original_object_id\\\": \\\"SK-C-2\\\", \\\"source_id\\\": \\\"rijksmuseum\\\", \\\"original_object_urls\\\": {\\\"json\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection/SK-C-2?format=json\\\", \\\"html\\\": \\\"https://www.rijksmuseum.nl/nl/collectie/SK-C-2\\\"}}, \\\"description\\\": \\\"De schuttersmaaltijd in de Voetboogdoelen of St. Jorisdoelen te Amsterdam ter viering van het sluiten van de vrede van Munster, 18 juni 1648. Voorgesteld zijn: kapitein Cornelis Jansz Witsen (met de zilveren drinkhoorn), luitenant Johan Oetgens van Waveren (die zijn hand schudt), de vaandrig Jacob Banningh (zittend naast de grote trommel), Dirck Claesz Thoveling en Thomas Hartog (sergeanten), Pieter van Hoorn, Willem Pietersz van der Voort, Adriaen Dirck Sparwer, Hendrick Calaber, Govert van der Mij, Johannes Calaber, Benedictus Schaeck, Jan Maes, Jacob van Diemen, Jan van Ommeren, Isaac Ooyens, Gerrit Pietersz van Anstenraadt, Herman Teunisz de Kluyter, Andries van Anstenraadt, Christoffel Poock, Hendrick Dommer Wz, Paulus Hennekijn, Lambregt van den Bos en Willem (de trommelslager). Op de grote trommel hangt een papier met een gedicht van Jan Vos. Door de openstaande ramen is de gevel van de brouwerij 'het Lam' aan de Singel zichtbaar. Rechts brengt een vrouwen een kalkoenpastei binnen. Op tafel staan tinnen borden, roemers en andere glazen. Links staat op de vloer een grote metalen koeler met een wijnvat.\\\", \\\"title\\\": \\\"Schuttersmaaltijd ter viering van de Vrede van Munster\\\", \\\"date\\\": \\\"1648-01-01T00:00:00\\\", \\\"all_text\\\": \\\"De schuttersmaaltijd in de Voetboog- of St. Jorisdoelen te Amsterdam ter viering van het sluiten van de vrede van Munster, 18 juni 1648 De schuttersmaaltijd in de Voetboogdoelen of St. Jorisdoelen te Amsterdam ter viering van het sluiten van de vrede van Munster, 18 juni 1648. Voorgesteld zijn: kapitein Cornelis Jansz Witsen (met de zilveren drinkhoorn), luitenant Johan Oetgens van Waveren (die zijn hand schudt), de vaandrig Jacob Banningh (zittend naast de grote trommel), Dirck Claesz Thoveling en Thomas Hartog (sergeanten), Pieter van Hoorn, Willem Pietersz van der Voort, Adriaen Dirck Sparwer, Hendrick Calaber, Govert van der Mij, Johannes Calaber, Benedictus Schaeck, Jan Maes, Jacob van Diemen, Jan van Ommeren, Isaac Ooyens, Gerrit Pietersz van Anstenraadt, Herman Teunisz de Kluyter, Andries van Anstenraadt, Christoffel Poock, Hendrick Dommer Wz, Paulus Hennekijn, Lambregt van den Bos en Willem (de trommelslager). Op de grote trommel hangt een papier met een gedicht van Jan Vos. Door de openstaande ramen is de gevel van de brouwerij 'het Lam' aan de Singel zichtbaar. Rechts brengt een vrouwen een kalkoenpastei binnen. Op tafel staan tinnen borden, roemers en andere glazen. Links staat op de vloer een grote metalen koeler met een wijnvat. schilderij schilderijen Bartholomeus van der Helst Haarlem Amsterdam tekenaar schilder schilder Noord-Nederlands Schutters vormden de gewapende macht van de stad. Zij waren vrijwilligers. In Amsterdam had elk stadsdeel een eigen schutterij met een eigen verenigingsgebouw. In de 17de eeuw werden die gebouwen groter en mooier. De interieurs werden versierd met groepsportretten van de leden. In 1648 legde Van der Helst Amsterdamse schutters vast in hun verenigingsgebouw, waar zij de vrede tussen Nederland en Spanje vierden. Bruikleen van de gemeente Amsterdam doek olieverf schilderij\\\", \\\"authors\\\": [\\\"Bartholomeus van der Helst\\\"]}\",\n- \"media_urls\": [\n- {\n- \"url\": \"http://localhost:5000/v0/resolve/02cf328932c52e2086454f29478c45ae3c66fd49\",\n- \"width\": 2500,\n- \"original_url\": \"http://lh5.ggpht.com/dMaR9T0-0j9erOeI3dFRnTt4L7UeL1qtt-IA1_Kj-WDLwt5RHFc45I5n6aluaMFL1b8gZIYIIYsHavL-FWpl-gOQpw=s0\",\n- \"content_type\": \"image/jpeg\",\n- \"height\": 1034\n- }\n- ]\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_collection_index/items_4.json b/tests/ocd_frontend/test_data/ori_test_collection_index/items_4.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_collection_index/items_4.json\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-{\n- \"date\": \"1668-01-01T00:00:00\",\n- \"description\": \"De molen bij Wijk bij Duurstede. Links de rivier de Lek met een bootje, rechts de molen nabij de oever. In de verte de torens van kasteel Duurstede, rechts de toren van de Sint-Janskerk. Langs de oever lopen enkele vrouwen.\",\n- \"source_data\": {\n- \"data\": \"{\\\"webImage\\\": {\\\"url\\\": \\\"http://lh6.ggpht.com/1gH99j2GD85SW4r3CA18uwTDuRioMYTNZlH5N2xuZsbh_4QUnzxettm6WqCsLa_ciGCzhWwLzF35QtHEpz4M9LWv_yvl=s0\\\", \\\"height\\\": 2376, \\\"width\\\": 2880, \\\"offsetPercentageX\\\": 50, \\\"offsetPercentageY\\\": 50, \\\"guid\\\": \\\"61d85969-9cb6-41ce-b216-0f19e06e198a\\\"}, \\\"principalMakers\\\": [{\\\"dateOfBirthPrecision\\\": \\\"ca.\\\", \\\"dateOfDeath\\\": \\\"1682-03-14\\\", \\\"name\\\": \\\"Jacob Isaacksz. van Ruisdael\\\", \\\"roles\\\": [\\\"schilder\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Ruisdael, Jacob Isaacksz. van\\\", \\\"placeOfDeath\\\": \\\"Amsterdam\\\", \\\"dateOfBirth\\\": \\\"1629-06\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [], \\\"placeOfBirth\\\": \\\"Haarlem\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"prentmaker\\\", \\\"tekenaar\\\", \\\"schilder\\\"]}], \\\"historicalPersons\\\": [], \\\"exhibitions\\\": [{\\\"end\\\": \\\"2011-06-06T00:00:00Z\\\", \\\"startYear\\\": 2011, \\\"title\\\": \\\"The Golden Age of Dutch Painting\\\", \\\"organiser\\\": null, \\\"start\\\": \\\"2011-03-09T00:00:00Z\\\", \\\"endYear\\\": 2011, \\\"place\\\": \\\"Doha\\\"}, {\\\"end\\\": \\\"2006-06-04T00:00:00Z\\\", \\\"startYear\\\": 2006, \\\"title\\\": \\\"Master of Landscape. Jacob van Ruisdael's Paintings, Drawings and Etchings - III\\\", \\\"organiser\\\": null, \\\"start\\\": \\\"2006-02-25T00:00:00Z\\\", \\\"endYear\\\": 2006, \\\"place\\\": \\\"Londen\\\"}, {\\\"end\\\": \\\"2005-01-23T00:00:00Z\\\", \\\"startYear\\\": 2004, \\\"title\\\": \\\"Van der Hoop - C\\\", \\\"organiser\\\": null, \\\"start\\\": \\\"2004-10-15T00:00:00Z\\\", \\\"endYear\\\": 2005, \\\"place\\\": \\\"Amsterdam\\\"}], \\\"links\\\": {\\\"search\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection\\\"}, \\\"objectTypes\\\": [\\\"schilderij\\\"], \\\"priref\\\": \\\"5333\\\", \\\"objectCollection\\\": [\\\"schilderijen\\\"], \\\"physicalProperties\\\": [], \\\"productionPlaces\\\": [], \\\"colors\\\": [\\\"#65563B\\\", \\\" #77705A\\\", \\\" #898D83\\\", \\\" #231E12\\\", \\\" #3C3828\\\", \\\" #A7A58F\\\", \\\" #988561\\\"], \\\"titles\\\": [\\\"De molen bij Wijk bij Duurstede\\\"], \\\"scLabelLine\\\": \\\"Jacob Isaacksz van Ruisdael (ca. 1628\\\\u20131682), olieverf op doek, ca. 1668\\\\u20131670\\\", \\\"label\\\": {\\\"date\\\": \\\"2013-04-11\\\", \\\"notes\\\": null, \\\"makerLine\\\": \\\"Jacob Isaacksz van Ruisdael (ca. 1628\\\\u20131682), olieverf op doek, ca. 1668\\\\u20131670\\\", \\\"description\\\": \\\"Majesteitelijk rijst de molen omhoog, hij trotseert de donkere regenwolken en maakt het slot en de kerk van Wijk bij Duurstede nietig. Op de voorgrond stroomt de Lek. Terecht is dit een wereldberoemd schilderij. Ruisdael verenigde er op een indrukwekkende wijze alle Hollandse elementen in \\\\u2013 het laagland, het water en de lucht \\\\u2013 en hij laat ze samenkomen in de al even Hollandse watermolen.\\\", \\\"title\\\": \\\"De molen bij Wijk bij Duurstede\\\"}, \\\"makers\\\": [{\\\"dateOfBirthPrecision\\\": \\\"ca.\\\", \\\"dateOfDeath\\\": \\\"1682-03-14\\\", \\\"name\\\": \\\"Jacob Isaacksz. van Ruisdael\\\", \\\"roles\\\": [\\\"schilder\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Ruisdael, Jacob Isaacksz. van\\\", \\\"placeOfDeath\\\": \\\"Amsterdam\\\", \\\"dateOfBirth\\\": \\\"1629-06\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [], \\\"placeOfBirth\\\": \\\"Haarlem\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"prentmaker\\\", \\\"tekenaar\\\", \\\"schilder\\\"]}], \\\"plaqueDescriptionEnglish\\\": null, \\\"techniques\\\": [], \\\"principalMaker\\\": \\\"Jacob Isaacksz. van Ruisdael\\\", \\\"labelText\\\": null, \\\"title\\\": \\\"De molen bij Wijk bij Duurstede\\\", \\\"id\\\": \\\"nl-SK-C-211\\\", \\\"materials\\\": [\\\"doek\\\", \\\"olieverf\\\"], \\\"dating\\\": {\\\"late\\\": null, \\\"earlyPrecision\\\": \\\"ca.\\\", \\\"yearLate\\\": 1670, \\\"period\\\": 17, \\\"early\\\": null, \\\"yearEarly\\\": 1668, \\\"latePrecision\\\": \\\"ca.\\\", \\\"year\\\": 1668}, \\\"objectNumber\\\": \\\"SK-C-211\\\", \\\"hasImage\\\": true, \\\"associations\\\": [], \\\"classification\\\": {\\\"iconClassIdentifier\\\": [\\\"25I41\\\", \\\"25H213\\\", \\\"46C24\\\"], \\\"places\\\": [\\\"Wijk bij DuurstedeWijk bij Duurstede\\\", \\\"Kasteel Duurstede (Wijk bij Duurstede)\\\", \\\"Lek\\\"], \\\"people\\\": [], \\\"objectNumbers\\\": [\\\"SK-C-211\\\"], \\\"iconClassDescription\\\": [\\\"windmill in landscape\\\", \\\"river\\\", \\\"sailing-ship, sailing-boat\\\"], \\\"periods\\\": [], \\\"motifs\\\": [], \\\"events\\\": []}, \\\"description\\\": \\\"De molen bij Wijk bij Duurstede. Links de rivier de Lek met een bootje, rechts de molen nabij de oever. In de verte de torens van kasteel Duurstede, rechts de toren van de Sint-Janskerk. Langs de oever lopen enkele vrouwen.\\\", \\\"colorsWithNormalization\\\": [{\\\"normalizedHex\\\": \\\"#B35A1F\\\", \\\"originalHex\\\": \\\"#65563B\\\"}, {\\\"normalizedHex\\\": \\\"#737C84\\\", \\\"originalHex\\\": \\\" #77705A\\\"}, {\\\"normalizedHex\\\": \\\"#737C84\\\", \\\"originalHex\\\": \\\" #898D83\\\"}, {\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\" #231E12\\\"}, {\\\"normalizedHex\\\": \\\"#2F4F4F\\\", \\\"originalHex\\\": \\\" #3C3828\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #A7A58F\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #988561\\\"}], \\\"normalized32Colors\\\": [], \\\"longTitle\\\": \\\"De molen bij Wijk bij Duurstede, Jacob Isaacksz. van Ruisdael, ca. 1668 - ca. 1670\\\", \\\"copyrightHolder\\\": null, \\\"showImage\\\": true, \\\"subTitle\\\": \\\"h 83cm \\\\u00d7 b 101cm\\\", \\\"principalOrFirstMaker\\\": \\\"Jacob Isaacksz. van Ruisdael\\\", \\\"dimensions\\\": [{\\\"part\\\": null, \\\"type\\\": \\\"hoogte\\\", \\\"value\\\": \\\"83\\\", \\\"unit\\\": \\\"cm\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"breedte\\\", \\\"value\\\": \\\"101\\\", \\\"unit\\\": \\\"cm\\\"}], \\\"inscriptions\\\": [\\\"signatuur\\\", \\\"Ruisdael\\\"], \\\"language\\\": \\\"nl\\\", \\\"artistRole\\\": null, \\\"documentation\\\": [\\\"Ulrich Kuder, 'Ein Van Gogh nach Jacob van Ruisdael in Schleswig-holsteinischem Privatbesitz', Nordelbingen : Beitr\\\\u00e4ge zur Kunst- und Kulturgeschichte Schleswig-Holsteins 78 (2009), p. 56, afb. 1.\\\", \\\"Rijksmuseum Kunstkrant, Jaargang 33 (sept.-okt. 2007), nr. 5, p. 6.\\\", \\\"Pieter van der Heijden, 'Oud & Nieuw', Rijksmuseum Kunstkrant 16 (1989/1990) nr. 1, p. 11.\\\"], \\\"physicalMedium\\\": \\\"olieverf op doek\\\", \\\"catRefRPK\\\": [], \\\"normalizedColors\\\": [\\\"#696969\\\", \\\" #808080\\\", \\\" #000000\\\", \\\" #A9A9A9\\\", \\\" #D2B48C\\\"], \\\"acquisition\\\": {\\\"date\\\": \\\"1885-06-30T00:00:00Z\\\", \\\"method\\\": null, \\\"creditLine\\\": \\\"Bruikleen van de gemeente Amsterdam (legaat A. van der Hoop)\\\"}, \\\"plaqueDescriptionDutch\\\": \\\"De molen is van onderaf weergegeven en steekt majestueus af tegen de donkere lucht. De gebouwen op de achtergrond zijn het kasteel en de Sint Maartenskerk van Wijk bij Duurstede, een belangrijke stad in de Gouden Eeuw. Op de voorgrond stroomt de rivier de Lek. Dit schilderij laat het ultieme Hollandse landschap zien: vlak, met veel water, lucht en molens.\\\"}\",\n- \"content_type\": \"application/json\"\n- },\n- \"title\": \"De molen bij Wijk bij Duurstede\",\n- \"date_granularity\": 4,\n- \"meta\": {\n- \"processing_started\": \"2015-01-13T22:22:05.625489\",\n- \"processing_finished\": \"2015-01-13T22:22:05.629028\",\n- \"rights\": \"Creative Commons Zero\",\n- \"collection\": \"Rijksmuseum\",\n- \"original_object_id\": \"SK-C-211\",\n- \"source_id\": \"rijksmuseum\",\n- \"original_object_urls\": {\n- \"json\": \"https://www.rijksmuseum.nl/api/nl/collection/SK-C-211?format=json\",\n- \"html\": \"https://www.rijksmuseum.nl/nl/collectie/SK-C-211\"\n- }\n- },\n- \"authors\": [\n- \"Jacob Isaacksz. van Ruisdael\"\n- ],\n- \"combined_index_data\": \"{\\\"date_granularity\\\": 4, \\\"media_urls\\\": [{\\\"url\\\": \\\"http://localhost:5000/v0/resolve/54e3f802fee41a205b456ef9a208ad63a22807a4\\\", \\\"width\\\": 2880, \\\"height\\\": 2376, \\\"content_type\\\": \\\"image/jpeg\\\", \\\"original_url\\\": \\\"http://lh6.ggpht.com/1gH99j2GD85SW4r3CA18uwTDuRioMYTNZlH5N2xuZsbh_4QUnzxettm6WqCsLa_ciGCzhWwLzF35QtHEpz4M9LWv_yvl=s0\\\"}], \\\"meta\\\": {\\\"processing_started\\\": \\\"2015-01-13T22:22:05.625489\\\", \\\"rights\\\": \\\"Creative Commons Zero\\\", \\\"collection\\\": \\\"Rijksmuseum\\\", \\\"original_object_id\\\": \\\"SK-C-211\\\", \\\"source_id\\\": \\\"rijksmuseum\\\", \\\"original_object_urls\\\": {\\\"json\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection/SK-C-211?format=json\\\", \\\"html\\\": \\\"https://www.rijksmuseum.nl/nl/collectie/SK-C-211\\\"}}, \\\"description\\\": \\\"De molen bij Wijk bij Duurstede. Links de rivier de Lek met een bootje, rechts de molen nabij de oever. In de verte de torens van kasteel Duurstede, rechts de toren van de Sint-Janskerk. Langs de oever lopen enkele vrouwen.\\\", \\\"title\\\": \\\"De molen bij Wijk bij Duurstede\\\", \\\"date\\\": \\\"1668-01-01T00:00:00\\\", \\\"all_text\\\": \\\"De molen bij Wijk bij Duurstede De molen bij Wijk bij Duurstede. Links de rivier de Lek met een bootje, rechts de molen nabij de oever. In de verte de torens van kasteel Duurstede, rechts de toren van de Sint-Janskerk. Langs de oever lopen enkele vrouwen. schilderij schilderijen Jacob Isaacksz. van Ruisdael Haarlem Amsterdam prentmaker tekenaar schilder schilder Noord-Nederlands De molen is van onderaf weergegeven en steekt majestueus af tegen de donkere lucht. De gebouwen op de achtergrond zijn het kasteel en de Sint Maartenskerk van Wijk bij Duurstede, een belangrijke stad in de Gouden Eeuw. Op de voorgrond stroomt de rivier de Lek. Dit schilderij laat het ultieme Hollandse landschap zien: vlak, met veel water, lucht en molens. Bruikleen van de gemeente Amsterdam (legaat A. van der Hoop) The Golden Age of Dutch Painting Doha Master of Landscape. Jacob van Ruisdael's Paintings, Drawings and Etchings - III Londen Van der Hoop - C Amsterdam doek olieverf schilderij\\\", \\\"authors\\\": [\\\"Jacob Isaacksz. van Ruisdael\\\"]}\",\n- \"media_urls\": [\n- {\n- \"url\": \"http://localhost:5000/v0/resolve/54e3f802fee41a205b456ef9a208ad63a22807a4\",\n- \"width\": 2880,\n- \"original_url\": \"http://lh6.ggpht.com/1gH99j2GD85SW4r3CA18uwTDuRioMYTNZlH5N2xuZsbh_4QUnzxettm6WqCsLa_ciGCzhWwLzF35QtHEpz4M9LWv_yvl=s0\",\n- \"content_type\": \"image/jpeg\",\n- \"height\": 2376\n- }\n- ]\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_collection_index/items_5.json b/tests/ocd_frontend/test_data/ori_test_collection_index/items_5.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_collection_index/items_5.json\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-{\n- \"date\": \"1677-01-01T00:00:00\",\n- \"description\": \"Het hoofd met gesloten ogen, driekwart naar links gewend, vertoont imperiale en knevel; op de linkerwang een wrat. Het golvende haar valt tot op de schouders. Boven het harnas steekt een geplooid kraagje uit. Het stuk eindigt even onder de hals.\",\n- \"source_data\": {\n- \"data\": \"{\\\"webImage\\\": {\\\"url\\\": \\\"http://lh3.ggpht.com/7eeRiDQLguCwPA7159-wn9Jd3AhGzmFzra0kda7SX5Vm1TqTW3sLf6UksceWq6MUJmrUSxVC5bCviXAMX1S0qd0lUi0=s0\\\", \\\"height\\\": 2500, \\\"width\\\": 2083, \\\"offsetPercentageX\\\": 50, \\\"offsetPercentageY\\\": 50, \\\"guid\\\": \\\"dfb56cbe-6e82-4f6f-aa7a-b560ed91f07f\\\"}, \\\"principalMakers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1698-11-27\\\", \\\"name\\\": \\\"Rombout Verhulst\\\", \\\"roles\\\": [\\\"beeldhouwer\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Verhulst, Rombout\\\", \\\"placeOfDeath\\\": \\\"Den Haag\\\", \\\"dateOfBirth\\\": \\\"1624-01-15\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [], \\\"placeOfBirth\\\": \\\"Mechelen\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"beeldhouwer\\\"]}], \\\"historicalPersons\\\": [\\\"Ruyter, Michiel Adriaansz. de\\\"], \\\"exhibitions\\\": [{\\\"end\\\": \\\"2007-11-11T00:00:00Z\\\", \\\"startYear\\\": 2007, \\\"title\\\": \\\"Held - B\\\", \\\"organiser\\\": null, \\\"start\\\": \\\"2007-08-11T00:00:00Z\\\", \\\"endYear\\\": 2007, \\\"place\\\": \\\"Amsterdam\\\"}, {\\\"end\\\": \\\"1957-06-17T00:00:00Z\\\", \\\"startYear\\\": 1957, \\\"title\\\": \\\"Tentoonstelling ter herdenking van Michiel de Ruyter geboren 24 maart 1607 (1957-03-24)\\\", \\\"organiser\\\": null, \\\"start\\\": \\\"1957-03-24T00:00:00Z\\\", \\\"endYear\\\": 1957, \\\"place\\\": \\\"Amsterdam\\\"}, {\\\"end\\\": \\\"1950-03-29T00:00:00Z\\\", \\\"startYear\\\": 1950, \\\"title\\\": \\\"De Stadhouder-Koning en zijn tijd : 1650-1950\\\", \\\"organiser\\\": null, \\\"start\\\": \\\"1950-03-18T00:00:00Z\\\", \\\"endYear\\\": 1950, \\\"place\\\": \\\"Amsterdam\\\"}], \\\"links\\\": {\\\"search\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection\\\"}, \\\"objectTypes\\\": [\\\"beeldhouwwerk\\\"], \\\"priref\\\": \\\"24803\\\", \\\"objectCollection\\\": [\\\"beeldhouwwerken\\\"], \\\"physicalProperties\\\": [], \\\"productionPlaces\\\": [], \\\"colors\\\": [\\\"#101209\\\", \\\" #4F4528\\\", \\\" #332C14\\\", \\\" #81764F\\\", \\\" #645A3B\\\", \\\" #9A8F63\\\", \\\" #B2AD89\\\"], \\\"titles\\\": [\\\"Michiel Adriaanszoon de Ruyter\\\"], \\\"scLabelLine\\\": \\\"Rombout Verhulst (1624\\\\u20131698), gesausde terracotta, 1677\\\\u20131681\\\", \\\"label\\\": {\\\"date\\\": \\\"2013-03-27\\\", \\\"notes\\\": null, \\\"makerLine\\\": \\\"Rombout Verhulst (1624\\\\u20131698), gesausde terracotta, 1677\\\\u20131681\\\", \\\"description\\\": \\\"Dit portret boetseerde Rombout Verhulst in klei toen hij het grootse grafmonument voor Michiel de Ruyter in de Nieuwe Kerk in Amsterdam ontwierp. Verhulst keek goed naar geschilderde portretten van de admiraal. Hij hield deze schets tot zijn dood in eigen bezit.\\\", \\\"title\\\": \\\"Portret van Michiel de Ruyter\\\"}, \\\"makers\\\": [{\\\"dateOfBirthPrecision\\\": null, \\\"dateOfDeath\\\": \\\"1698-11-27\\\", \\\"name\\\": \\\"Rombout Verhulst\\\", \\\"roles\\\": [\\\"beeldhouwer\\\"], \\\"dateOfDeathPrecision\\\": null, \\\"schoolStyles\\\": [], \\\"unFixedName\\\": \\\"Verhulst, Rombout\\\", \\\"placeOfDeath\\\": \\\"Den Haag\\\", \\\"dateOfBirth\\\": \\\"1624-01-15\\\", \\\"qualification\\\": null, \\\"nationality\\\": \\\"Noord-Nederlands\\\", \\\"productionPlaces\\\": [], \\\"placeOfBirth\\\": \\\"Mechelen\\\", \\\"biography\\\": null, \\\"occupation\\\": [\\\"beeldhouwer\\\"]}], \\\"plaqueDescriptionEnglish\\\": null, \\\"techniques\\\": [\\\"boetseren\\\", \\\"sausen\\\"], \\\"principalMaker\\\": \\\"Rombout Verhulst\\\", \\\"labelText\\\": null, \\\"title\\\": \\\"Portret van Michiel de Ruyter\\\", \\\"id\\\": \\\"nl-BK-NM-13150\\\", \\\"materials\\\": [\\\"terracotta\\\"], \\\"dating\\\": {\\\"late\\\": null, \\\"earlyPrecision\\\": null, \\\"yearLate\\\": 1681, \\\"period\\\": 17, \\\"early\\\": null, \\\"yearEarly\\\": 1677, \\\"latePrecision\\\": null, \\\"year\\\": 1677}, \\\"objectNumber\\\": \\\"BK-NM-13150\\\", \\\"hasImage\\\": true, \\\"associations\\\": [], \\\"classification\\\": {\\\"iconClassIdentifier\\\": [\\\"61B2(+51)\\\"], \\\"places\\\": [], \\\"people\\\": [\\\"Ruyter, Michiel Adriaansz. de\\\"], \\\"objectNumbers\\\": [\\\"BK-NM-13150\\\"], \\\"iconClassDescription\\\": [\\\"historical persons (+ head (and shoulders) (portrait))\\\"], \\\"periods\\\": [\\\"16761676\\\"], \\\"motifs\\\": [], \\\"events\\\": []}, \\\"description\\\": \\\"Het hoofd met gesloten ogen, driekwart naar links gewend, vertoont imperiale en knevel; op de linkerwang een wrat. Het golvende haar valt tot op de schouders. Boven het harnas steekt een geplooid kraagje uit. Het stuk eindigt even onder de hals.\\\", \\\"colorsWithNormalization\\\": [{\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\"#101209\\\"}, {\\\"normalizedHex\\\": \\\"#2F4F4F\\\", \\\"originalHex\\\": \\\" #4F4528\\\"}, {\\\"normalizedHex\\\": \\\"#000000\\\", \\\"originalHex\\\": \\\" #332C14\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #81764F\\\"}, {\\\"normalizedHex\\\": \\\"#367614\\\", \\\"originalHex\\\": \\\" #645A3B\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #9A8F63\\\"}, {\\\"normalizedHex\\\": \\\"#E0CC91\\\", \\\"originalHex\\\": \\\" #B2AD89\\\"}], \\\"normalized32Colors\\\": [], \\\"longTitle\\\": \\\"Portret van Michiel de Ruyter, Rombout Verhulst, 1677 - 1681\\\", \\\"copyrightHolder\\\": null, \\\"showImage\\\": true, \\\"subTitle\\\": \\\"h 35cm \\\\u00d7 b 32cm \\\\u00d7 d 30cm\\\", \\\"principalOrFirstMaker\\\": \\\"Rombout Verhulst\\\", \\\"dimensions\\\": [{\\\"part\\\": null, \\\"type\\\": \\\"hoogte\\\", \\\"value\\\": \\\"35\\\", \\\"unit\\\": \\\"cm\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"breedte\\\", \\\"value\\\": \\\"32\\\", \\\"unit\\\": \\\"cm\\\"}, {\\\"part\\\": null, \\\"type\\\": \\\"diepte\\\", \\\"value\\\": \\\"30\\\", \\\"unit\\\": \\\"cm\\\"}], \\\"inscriptions\\\": [], \\\"language\\\": \\\"nl\\\", \\\"artistRole\\\": null, \\\"documentation\\\": [\\\"Jaarverslag Rijksmuseum (1973), p. 27 (restauratie).\\\", \\\"Catalogus Mauritshuis (1869), cat.nr. 369.\\\", \\\"Documentatiemap: aantekeningen R. van Luttervelt (literatuur met opmerkingen); foto.\\\"], \\\"physicalMedium\\\": \\\"terracotta, geel gesaust\\\", \\\"catRefRPK\\\": [], \\\"normalizedColors\\\": [\\\"#000000\\\", \\\" #556B2F\\\", \\\" #BDB76B\\\"], \\\"acquisition\\\": {\\\"date\\\": \\\"1923-01-01T00:00:00Z\\\", \\\"method\\\": null, \\\"creditLine\\\": \\\"Bruikleen van het Koninklijk Kabinet van Schilderijen Mauritshuis\\\"}, \\\"plaqueDescriptionDutch\\\": \\\"Admiraal De Ruyter raakte in 1676 zwaar gewond in een zeeslag tegen de Fransen in de Middellandse Zee. Hij bezweek aan zijn verwondingen en werd begraven op een prominente plaats in de Nieuwe Kerk in Amsterdam. Dit portret was een model voor het graf.\\\"}\",\n- \"content_type\": \"application/json\"\n- },\n- \"title\": \"Portret van Michiel de Ruyter\",\n- \"date_granularity\": 4,\n- \"meta\": {\n- \"processing_started\": \"2015-01-13T22:22:09.955206\",\n- \"processing_finished\": \"2015-01-13T22:22:09.959300\",\n- \"rights\": \"Creative Commons Zero\",\n- \"collection\": \"Rijksmuseum\",\n- \"original_object_id\": \"BK-NM-13150\",\n- \"source_id\": \"rijksmuseum\",\n- \"original_object_urls\": {\n- \"json\": \"https://www.rijksmuseum.nl/api/nl/collection/BK-NM-13150?format=json\",\n- \"html\": \"https://www.rijksmuseum.nl/nl/collectie/BK-NM-13150\"\n- }\n- },\n- \"authors\": [\n- \"Rombout Verhulst\"\n- ],\n- \"combined_index_data\": \"{\\\"date_granularity\\\": 4, \\\"media_urls\\\": [{\\\"url\\\": \\\"http://localhost:5000/v0/resolve/059262e18e177b10b9a09ef13082e07bda0554a7\\\", \\\"width\\\": 2083, \\\"height\\\": 2500, \\\"content_type\\\": \\\"image/jpeg\\\", \\\"original_url\\\": \\\"http://lh3.ggpht.com/7eeRiDQLguCwPA7159-wn9Jd3AhGzmFzra0kda7SX5Vm1TqTW3sLf6UksceWq6MUJmrUSxVC5bCviXAMX1S0qd0lUi0=s0\\\"}], \\\"meta\\\": {\\\"processing_started\\\": \\\"2015-01-13T22:22:09.955206\\\", \\\"rights\\\": \\\"Creative Commons Zero\\\", \\\"collection\\\": \\\"Rijksmuseum\\\", \\\"original_object_id\\\": \\\"BK-NM-13150\\\", \\\"source_id\\\": \\\"rijksmuseum\\\", \\\"original_object_urls\\\": {\\\"json\\\": \\\"https://www.rijksmuseum.nl/api/nl/collection/BK-NM-13150?format=json\\\", \\\"html\\\": \\\"https://www.rijksmuseum.nl/nl/collectie/BK-NM-13150\\\"}}, \\\"description\\\": \\\"Het hoofd met gesloten ogen, driekwart naar links gewend, vertoont imperiale en knevel; op de linkerwang een wrat. Het golvende haar valt tot op de schouders. Boven het harnas steekt een geplooid kraagje uit. Het stuk eindigt even onder de hals.\\\", \\\"title\\\": \\\"Portret van Michiel de Ruyter\\\", \\\"date\\\": \\\"1677-01-01T00:00:00\\\", \\\"all_text\\\": \\\"Michiel Adriaanszoon de Ruyter Het hoofd met gesloten ogen, driekwart naar links gewend, vertoont imperiale en knevel; op de linkerwang een wrat. Het golvende haar valt tot op de schouders. Boven het harnas steekt een geplooid kraagje uit. Het stuk eindigt even onder de hals. beeldhouwwerk beeldhouwwerken Rombout Verhulst Mechelen Den Haag beeldhouwer beeldhouwer Noord-Nederlands Admiraal De Ruyter raakte in 1676 zwaar gewond in een zeeslag tegen de Fransen in de Middellandse Zee. Hij bezweek aan zijn verwondingen en werd begraven op een prominente plaats in de Nieuwe Kerk in Amsterdam. Dit portret was een model voor het graf. Bruikleen van het Koninklijk Kabinet van Schilderijen Mauritshuis Held - B Amsterdam Tentoonstelling ter herdenking van Michiel de Ruyter geboren 24 maart 1607 (1957-03-24) Amsterdam De Stadhouder-Koning en zijn tijd : 1650-1950 Amsterdam terracotta boetseren sausen beeldhouwwerk\\\", \\\"authors\\\": [\\\"Rombout Verhulst\\\"]}\",\n- \"media_urls\": [\n- {\n- \"url\": \"http://localhost:5000/v0/resolve/059262e18e177b10b9a09ef13082e07bda0554a7\",\n- \"width\": 2083,\n- \"original_url\": \"http://lh3.ggpht.com/7eeRiDQLguCwPA7159-wn9Jd3AhGzmFzra0kda7SX5Vm1TqTW3sLf6UksceWq6MUJmrUSxVC5bCviXAMX1S0qd0lUi0=s0\",\n- \"content_type\": \"image/jpeg\",\n- \"height\": 2500\n- }\n- ]\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_combined_index/item_1.json b/tests/ocd_frontend/test_data/ori_test_combined_index/item_1.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_combined_index/item_1.json\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-{\n- \"description\": \"Omdat bij de bevrijding van Den Bosch in 1944 door de 53e Welsh Division onder commando van generaal-majoor Ross er geen mogelijkheid was voor een feestelijke ontvangst, wordt het nu, \u00e9\u00e9n jaar later, alsnog gedaan. De daarvoor speciaal uit Duitsland overgekomen divisie maakt een korte mars door de stad m\u00e9t de regimentsbok voorop. Generaal Ross biedt de burgemeester, mr. C.A.F.H.W.B. van den Clooster baron Sloet tot Everloo, een herdenkingsschild aan op het bordes van het stadhuis. 01:18 Sloet tot Everloo. Aantekening : BRON: Gemeente Archief Den Bosch en Instituut voor Genealogie Den Haag.\",\n- \"title\": \"Een jaar bevrijd\",\n- \"date_granularity\": 8,\n- \"meta\": {\n- \"processing_started\": \"2014-12-09T10:52:25.385822\",\n- \"processing_finished\": \"2014-12-09T10:52:37.305056\",\n- \"rights\": \"Creative Commons Attribution-ShareAlike\",\n- \"collection\": \"Open Beelden\",\n- \"original_object_id\": \"oai:openimages.eu:21484\",\n- \"source_id\": \"ocd_openbeelden\",\n- \"original_object_urls\": {\n- \"xml\": \"http://openbeelden.nl/feeds/oai/?verb=GetRecord&identifier=oai:openimages.eu:21484&metadataPrefix=oai_oi\",\n- \"html\": \"http://openbeelden.nl/media/21484/\"\n- }\n- },\n- \"authors\": [\n- \"Polygoon-Profilti (producent) / Nederlands Instituut voor Beeld en Geluid (beheerder)\"\n- ],\n- \"date\": \"1945-10-27T00:00:00\",\n- \"all_text\": \"Een jaar bevrijd Weeknummer 45-44 Polygoon-Profilti (producent) / Nederlands Instituut voor Beeld en Geluid (beheerder) bevrijding militairen mascottes defil\u00e9s Bioscoopjournaals waarin Nederlandse onderwerpen van een bepaalde week worden gepresenteerd. Omdat bij de bevrijding van Den Bosch in 1944 door de 53e Welsh Division onder commando van generaal-majoor Ross er geen mogelijkheid was voor een feestelijke ontvangst, wordt het nu, \u00e9\u00e9n jaar later, alsnog gedaan. De daarvoor speciaal uit Duitsland overgekomen divisie maakt een korte mars door de stad m\u00e9t de regimentsbok voorop. Generaal Ross biedt de burgemeester, mr. C.A.F.H.W.B. van den Clooster baron Sloet tot Everloo, een herdenkingsschild aan op het bordes van het stadhuis. 01:18 Sloet tot Everloo. Aantekening : BRON: Gemeente Archief Den Bosch en Instituut voor Genealogie Den Haag. Nederlands Instituut voor Beeld en Geluid 520992 WEEKNUMMER454-HRE0001B040 Moving Image\",\n- \"media_urls\": []\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_combined_index/item_2.json b/tests/ocd_frontend/test_data/ori_test_combined_index/item_2.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_combined_index/item_2.json\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-{\n- \"description\": \"Rotganzen grazend op graslanden.\",\n- \"title\": \"Rotganzen grazend op graslanden\",\n- \"date_granularity\": 8,\n- \"meta\": {\n- \"processing_started\": \"2014-12-09T10:52:25.335257\",\n- \"processing_finished\": \"2014-12-09T10:52:37.111356\",\n- \"rights\": \"Creative Commons Attribution-ShareAlike\",\n- \"collection\": \"Open Beelden\",\n- \"original_object_id\": \"oai:openimages.eu:725170\",\n- \"source_id\": \"ocd_openbeelden\",\n- \"original_object_urls\": {\n- \"xml\": \"http://openbeelden.nl/feeds/oai/?verb=GetRecord&identifier=oai:openimages.eu:725170&metadataPrefix=oai_oi\",\n- \"html\": \"http://openbeelden.nl/media/725170/\"\n- }\n- },\n- \"authors\": [\n- \"Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden\"\n- ],\n- \"date\": \"2012-04-19T00:00:00\",\n- \"all_text\": \"Rotganzen grazend op graslanden Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden bewolkt branta bernicla buitenopname close cultuurlandschappen dag eten fauna foerageren ganzenschade grazen landbouwschade rotganzen totaal vogels wijd winter wintergast zonnig Clip gemaakt in de Nederlandse natuur uit de collectie van Stichting Natuurbeelden (www.natuurbeelden.nl). Rotganzen grazend op graslanden. Stichting Natuurbeelden Plomp, Marc (camera) 4961929 0000ROTGANS00-NAT00Z03NT6 Moving Image\",\n- \"media_urls\": []\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_combined_index/item_3.json b/tests/ocd_frontend/test_data/ori_test_combined_index/item_3.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_combined_index/item_3.json\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-{\n- \"description\": \"Grote groep tureluurs op hoogwatervluchtplaats.\",\n- \"title\": \"Grote groep tureluurs\",\n- \"date_granularity\": 8,\n- \"meta\": {\n- \"processing_started\": \"2014-12-09T10:52:25.606450\",\n- \"processing_finished\": \"2014-12-09T10:52:38.251339\",\n- \"rights\": \"Creative Commons Attribution-ShareAlike\",\n- \"collection\": \"Open Beelden\",\n- \"original_object_id\": \"oai:openimages.eu:728496\",\n- \"source_id\": \"ocd_openbeelden\",\n- \"original_object_urls\": {\n- \"xml\": \"http://openbeelden.nl/feeds/oai/?verb=GetRecord&identifier=oai:openimages.eu:728496&metadataPrefix=oai_oi\",\n- \"html\": \"http://openbeelden.nl/media/728496/\"\n- }\n- },\n- \"authors\": [\n- \"Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden\"\n- ],\n- \"date\": \"2012-08-20T00:00:00\",\n- \"all_text\": \"Grote groep tureluurs Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden bewolkt buitenopname close dag fauna getijdengebieden hoogwatervluchtplaats ondiep overtijen slapen slik staan totaal tringa totanus tureluurs vogels weidevogel wijd zomer Clip gemaakt in de Nederlandse natuur uit de collectie van Stichting Natuurbeelden (www.natuurbeelden.nl). Grote groep tureluurs op hoogwatervluchtplaats. Stichting Natuurbeelden Plomp, Marc (camera) 4962032 0000TURELUUR0-NAT00Z03NTZ Moving Image\",\n- \"media_urls\": []\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_1.json b/tests/ocd_frontend/test_data/ori_test_resolver_index/url_1.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_1.json\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-{\n- \"original_url\": \"http://lh4.ggpht.com/geMErXtSoypSyBaGkBOYHG8XxO1sP2MoYtfbs70fRbPbpWjvP04jEvhlamJ0kmbuo6C2UZYCBZQumngASkyZjO4MRgI=s0\",\n- \"content_type\": \"image/jpeg\"\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_2.json b/tests/ocd_frontend/test_data/ori_test_resolver_index/url_2.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_2.json\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-{\n- \"original_url\": \"http://lh5.ggpht.com/H-KfOaNgW2an_g0kODWKua5BELckMTr7zauQZCbnOZ69fyNlr67uavKaDmvSawg8j6TB88abmtAjNbcMjbOdU94zuzM=s0\",\n- \"content_type\": \"does-not-exist\"\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_3.json b/tests/ocd_frontend/test_data/ori_test_resolver_index/url_3.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_3.json\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-{\n- \"original_url\": \"http://lh3.ggpht.com/lAJ1wnr_hEOncOfh9eKzvaS8w-fhLLq5yGlzHBctnjgyOzsbuP4cGIqP4q0A-YvnyXBhJi96il6NIZNhRVW-BVg2lW0=s0\",\n- \"content_type\": \"image/jpeg\"\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_4.json b/tests/ocd_frontend/test_data/ori_test_resolver_index/url_4.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_4.json\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-{\n- \"original_url\": \"http://lh4.ggpht.com/NwCWmjro4h__Ord5RqicIJsJbTY104UditPHR-swB9a7pQRt67KfneX_tBEazLnkNGsWqCvfsZam8Pxj1Ixiqbne7Q=s0\",\n- \"content_type\": \"image/jpeg\"\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_5.json b/tests/ocd_frontend/test_data/ori_test_resolver_index/url_5.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_resolver_index/url_5.json\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-{\n- \"original_url\": \"http://lh6.ggpht.com/gOTbLnfHUVFp3PgQQSNiEmQ0fjVAPCNJbO8ofTXlFJMpUWDye9ernn75qmkGj8KqAQTr60cyOHiXK3LnWwhwvc1mGQ=s0\",\n- \"content_type\": \"image/jpeg\"\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_scroll_index/item_1.json b/tests/ocd_frontend/test_data/ori_test_scroll_index/item_1.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_scroll_index/item_1.json\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-{\n- \"description\": \"Omdat bij de bevrijding van Den Bosch in 1944 door de 53e Welsh Division onder commando van generaal-majoor Ross er geen mogelijkheid was voor een feestelijke ontvangst, wordt het nu, \u00e9\u00e9n jaar later, alsnog gedaan. De daarvoor speciaal uit Duitsland overgekomen divisie maakt een korte mars door de stad m\u00e9t de regimentsbok voorop. Generaal Ross biedt de burgemeester, mr. C.A.F.H.W.B. van den Clooster baron Sloet tot Everloo, een herdenkingsschild aan op het bordes van het stadhuis. 01:18 Sloet tot Everloo. Aantekening : BRON: Gemeente Archief Den Bosch en Instituut voor Genealogie Den Haag.\",\n- \"title\": \"Een jaar bevrijd\",\n- \"date_granularity\": 8,\n- \"hidden\": false,\n- \"meta\": {\n- \"processing_started\": \"2014-12-09T10:52:25.385822\",\n- \"processing_finished\": \"2014-12-09T10:52:37.305056\",\n- \"rights\": \"Creative Commons Attribution-ShareAlike\",\n- \"collection\": \"Open Beelden\",\n- \"original_object_id\": \"oai:openimages.eu:21484\",\n- \"source_id\": \"ocd_openbeelden\",\n- \"original_object_urls\": {\n- \"xml\": \"http://openbeelden.nl/feeds/oai/?verb=GetRecord&identifier=oai:openimages.eu:21484&metadataPrefix=oai_oi\",\n- \"html\": \"http://openbeelden.nl/media/21484/\"\n- }\n- },\n- \"authors\": [\n- \"Polygoon-Profilti (producent) / Nederlands Instituut voor Beeld en Geluid (beheerder)\"\n- ],\n- \"date\": \"1945-10-27T00:00:00\",\n- \"all_text\": \"Een jaar bevrijd Weeknummer 45-44 Polygoon-Profilti (producent) / Nederlands Instituut voor Beeld en Geluid (beheerder) bevrijding militairen mascottes defil\u00e9s Bioscoopjournaals waarin Nederlandse onderwerpen van een bepaalde week worden gepresenteerd. Omdat bij de bevrijding van Den Bosch in 1944 door de 53e Welsh Division onder commando van generaal-majoor Ross er geen mogelijkheid was voor een feestelijke ontvangst, wordt het nu, \u00e9\u00e9n jaar later, alsnog gedaan. De daarvoor speciaal uit Duitsland overgekomen divisie maakt een korte mars door de stad m\u00e9t de regimentsbok voorop. Generaal Ross biedt de burgemeester, mr. C.A.F.H.W.B. van den Clooster baron Sloet tot Everloo, een herdenkingsschild aan op het bordes van het stadhuis. 01:18 Sloet tot Everloo. Aantekening : BRON: Gemeente Archief Den Bosch en Instituut voor Genealogie Den Haag. Nederlands Instituut voor Beeld en Geluid 520992 WEEKNUMMER454-HRE0001B040 Moving Image\",\n- \"media_urls\": []\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_scroll_index/item_2.json b/tests/ocd_frontend/test_data/ori_test_scroll_index/item_2.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_scroll_index/item_2.json\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-{\n- \"description\": \"Rotganzen grazend op graslanden.\",\n- \"title\": \"Rotganzen grazend op graslanden\",\n- \"date_granularity\": 8,\n- \"hidden\": false,\n- \"meta\": {\n- \"processing_started\": \"2014-12-09T10:52:25.335257\",\n- \"processing_finished\": \"2014-12-09T10:52:37.111356\",\n- \"rights\": \"Creative Commons Attribution-ShareAlike\",\n- \"collection\": \"Open Beelden\",\n- \"original_object_id\": \"oai:openimages.eu:725170\",\n- \"source_id\": \"ocd_openbeelden\",\n- \"original_object_urls\": {\n- \"xml\": \"http://openbeelden.nl/feeds/oai/?verb=GetRecord&identifier=oai:openimages.eu:725170&metadataPrefix=oai_oi\",\n- \"html\": \"http://openbeelden.nl/media/725170/\"\n- }\n- },\n- \"authors\": [\n- \"Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden\"\n- ],\n- \"date\": \"2012-04-19T00:00:00\",\n- \"all_text\": \"Rotganzen grazend op graslanden Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden bewolkt branta bernicla buitenopname close cultuurlandschappen dag eten fauna foerageren ganzenschade grazen landbouwschade rotganzen totaal vogels wijd winter wintergast zonnig Clip gemaakt in de Nederlandse natuur uit de collectie van Stichting Natuurbeelden (www.natuurbeelden.nl). Rotganzen grazend op graslanden. Stichting Natuurbeelden Plomp, Marc (camera) 4961929 0000ROTGANS00-NAT00Z03NT6 Moving Image\",\n- \"media_urls\": []\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_scroll_index/item_3.json b/tests/ocd_frontend/test_data/ori_test_scroll_index/item_3.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_scroll_index/item_3.json\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-{\n- \"description\": \"Grote groep tureluurs op hoogwatervluchtplaats.\",\n- \"title\": \"Grote groep tureluurs\",\n- \"date_granularity\": 8,\n- \"hidden\": false,\n- \"meta\": {\n- \"processing_started\": \"2014-12-09T10:52:25.606450\",\n- \"processing_finished\": \"2014-12-09T10:52:38.251339\",\n- \"rights\": \"Creative Commons Attribution-ShareAlike\",\n- \"collection\": \"Open Beelden\",\n- \"original_object_id\": \"oai:openimages.eu:728496\",\n- \"source_id\": \"ocd_openbeelden\",\n- \"original_object_urls\": {\n- \"xml\": \"http://openbeelden.nl/feeds/oai/?verb=GetRecord&identifier=oai:openimages.eu:728496&metadataPrefix=oai_oi\",\n- \"html\": \"http://openbeelden.nl/media/728496/\"\n- }\n- },\n- \"authors\": [\n- \"Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden\"\n- ],\n- \"date\": \"2012-08-20T00:00:00\",\n- \"all_text\": \"Grote groep tureluurs Natuur Digitaal (Marc Plomp); Stichting Natuurbeelden bewolkt buitenopname close dag fauna getijdengebieden hoogwatervluchtplaats ondiep overtijen slapen slik staan totaal tringa totanus tureluurs vogels weidevogel wijd zomer Clip gemaakt in de Nederlandse natuur uit de collectie van Stichting Natuurbeelden (www.natuurbeelden.nl). Grote groep tureluurs op hoogwatervluchtplaats. Stichting Natuurbeelden Plomp, Marc (camera) 4962032 0000TURELUUR0-NAT00Z03NTZ Moving Image\",\n- \"media_urls\": []\n-}\ndiff --git a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_1.json b/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_1.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_1.json\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-{\n- \"processed_at\": \"2015-01-18T19:09:31.318591\",\n- \"user_properties\": {\n- \"ip\": \"10.0.2.2\",\n- \"referer\": null,\n- \"user_agent\": \"curl/7.37.1\"\n- },\n- \"created_at\": \"2015-01-18T19:09:31.315919\",\n- \"event_properties\": {\n- \"source_id\": \"rijksmuseum\",\n- \"object_id\": \"73baee365e56903b90eb0aba279735211df2daee\"\n- }\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_2.json b/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_2.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_2.json\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-{\n- \"processed_at\": \"2015-01-18T20:26:39.331090\",\n- \"user_properties\": {\n- \"ip\": \"10.0.2.2\",\n- \"referer\": null,\n- \"user_agent\": \"curl/7.37.1\"\n- },\n- \"created_at\": \"2015-01-18T20:26:39.327927\",\n- \"event_properties\": {\n- \"source_id\": \"rijksmuseum\",\n- \"object_id\": \"73baee365e56903b90eb0aba279735211df2daee\"\n- }\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_3.json b/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_3.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/get_object_3.json\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-{\n- \"processed_at\": \"2015-01-18T20:26:40.316745\",\n- \"user_properties\": {\n- \"ip\": \"10.0.2.2\",\n- \"referer\": null,\n- \"user_agent\": \"curl/7.37.1\"\n- },\n- \"created_at\": \"2015-01-18T20:26:40.312621\",\n- \"event_properties\": {\n- \"source_id\": \"rijksmuseum\",\n- \"object_id\": \"73baee365e56903b90eb0aba279735211df2daee\"\n- }\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_1.json b/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_1.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_1.json\n+++ /dev/null\n@@ -1,76 +0,0 @@\n-{\n- \"processed_at\": \"2015-01-18T18:52:20.799698\",\n- \"user_properties\": {\n- \"ip\": \"10.0.2.2\",\n- \"referer\": null,\n- \"user_agent\": \"curl/7.37.1\"\n- },\n- \"created_at\": \"2015-01-18T18:52:20.796455\",\n- \"event_properties\": {\n- \"source_id\": null,\n- \"query\": {\n- \"sort\": \"_score\",\n- \"include_fields\": [],\n- \"n_from\": 0,\n- \"facets\": {},\n- \"n_size\": 10,\n- \"filters\": [],\n- \"query\": \"de\",\n- \"order\": \"desc\"\n- },\n- \"hits\": [\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.8734272,\n- \"object_id\": \"ac6e75cfc5c5b20c1ac9b79f283c67e8c2f0b24f\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.6956471,\n- \"object_id\": \"17807adadb9c2c449cc4837bb26c1c20f8ed89bf\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.6906533,\n- \"object_id\": \"233fe7b59bb29534ff2ce58e6f7ac20d255ba9f7\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.1869588,\n- \"object_id\": \"77547d252ba8e4cd2f413f499f6ac9f1aa8e7fef\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.95091164,\n- \"object_id\": \"3fc39c98b3296c145d7da2bd96e99464c12ce55b\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.85171604,\n- \"object_id\": \"7e6e1a49e96fa141efb34e0980571151f7b144b2\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.778896,\n- \"object_id\": \"86699c235a0c950cb8e0ba908fc8dd8faf715b58\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.76340187,\n- \"object_id\": \"b7a8609bdbe0a60b5b1eb6b3b7e708b06435d217\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.73292685,\n- \"object_id\": \"327465a9f71878f30db527d8a9672a1821668de5\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.72893363,\n- \"object_id\": \"73baee365e56903b90eb0aba279735211df2daee\"\n- }\n- ],\n- \"n_total_hits\": 95,\n- \"query_time_ms\": 4\n- }\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_2.json b/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_2.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_2.json\n+++ /dev/null\n@@ -1,76 +0,0 @@\n-{\n- \"processed_at\": \"2015-01-18T18:52:22.135165\",\n- \"user_properties\": {\n- \"ip\": \"10.0.2.2\",\n- \"referer\": null,\n- \"user_agent\": \"curl/7.37.1\"\n- },\n- \"created_at\": \"2015-01-18T18:52:22.132652\",\n- \"event_properties\": {\n- \"source_id\": null,\n- \"query\": {\n- \"sort\": \"_score\",\n- \"include_fields\": [],\n- \"n_from\": 0,\n- \"facets\": {},\n- \"n_size\": 10,\n- \"filters\": [],\n- \"query\": \"de\",\n- \"order\": \"desc\"\n- },\n- \"hits\": [\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.8734272,\n- \"object_id\": \"ac6e75cfc5c5b20c1ac9b79f283c67e8c2f0b24f\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.6956471,\n- \"object_id\": \"17807adadb9c2c449cc4837bb26c1c20f8ed89bf\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.6906533,\n- \"object_id\": \"233fe7b59bb29534ff2ce58e6f7ac20d255ba9f7\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.1869588,\n- \"object_id\": \"77547d252ba8e4cd2f413f499f6ac9f1aa8e7fef\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.95091164,\n- \"object_id\": \"3fc39c98b3296c145d7da2bd96e99464c12ce55b\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.85171604,\n- \"object_id\": \"7e6e1a49e96fa141efb34e0980571151f7b144b2\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.778896,\n- \"object_id\": \"86699c235a0c950cb8e0ba908fc8dd8faf715b58\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.76340187,\n- \"object_id\": \"b7a8609bdbe0a60b5b1eb6b3b7e708b06435d217\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.73292685,\n- \"object_id\": \"327465a9f71878f30db527d8a9672a1821668de5\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.72893363,\n- \"object_id\": \"73baee365e56903b90eb0aba279735211df2daee\"\n- }\n- ],\n- \"n_total_hits\": 95,\n- \"query_time_ms\": 2\n- }\n-}\n\\ No newline at end of file\ndiff --git a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_3.json b/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_3.json\ndeleted file mode 100644\n--- a/tests/ocd_frontend/test_data/ori_test_usage_logging_index/search_3.json\n+++ /dev/null\n@@ -1,76 +0,0 @@\n-{\n- \"processed_at\": \"2015-01-18T18:52:23.376672\",\n- \"user_properties\": {\n- \"ip\": \"10.0.2.2\",\n- \"referer\": null,\n- \"user_agent\": \"curl/7.37.1\"\n- },\n- \"created_at\": \"2015-01-18T18:52:23.373598\",\n- \"event_properties\": {\n- \"source_id\": null,\n- \"query\": {\n- \"sort\": \"_score\",\n- \"include_fields\": [],\n- \"n_from\": 0,\n- \"facets\": {},\n- \"n_size\": 10,\n- \"filters\": [],\n- \"query\": \"de\",\n- \"order\": \"desc\"\n- },\n- \"hits\": [\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.8734272,\n- \"object_id\": \"ac6e75cfc5c5b20c1ac9b79f283c67e8c2f0b24f\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.6956471,\n- \"object_id\": \"17807adadb9c2c449cc4837bb26c1c20f8ed89bf\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.6906533,\n- \"object_id\": \"233fe7b59bb29534ff2ce58e6f7ac20d255ba9f7\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 1.1869588,\n- \"object_id\": \"77547d252ba8e4cd2f413f499f6ac9f1aa8e7fef\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.95091164,\n- \"object_id\": \"3fc39c98b3296c145d7da2bd96e99464c12ce55b\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.85171604,\n- \"object_id\": \"7e6e1a49e96fa141efb34e0980571151f7b144b2\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.778896,\n- \"object_id\": \"86699c235a0c950cb8e0ba908fc8dd8faf715b58\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.76340187,\n- \"object_id\": \"b7a8609bdbe0a60b5b1eb6b3b7e708b06435d217\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.73292685,\n- \"object_id\": \"327465a9f71878f30db527d8a9672a1821668de5\"\n- },\n- {\n- \"source_id\": \"rijksmuseum\",\n- \"score\": 0.72893363,\n- \"object_id\": \"73baee365e56903b90eb0aba279735211df2daee\"\n- }\n- ],\n- \"n_total_hits\": 95,\n- \"query_time_ms\": 3\n- }\n-}\n\\ No newline at end of file\n", "problem_statement": "", "hints_text": "", "created_at": "2019-08-09T09:00:47Z"}