#!/usr/bin/env python3

import csv
import json
import logging
import pdb
import re
import sys

from glob import glob
from hashlib import md5
from multiprocessing import Pool, cpu_count
from os import path, unlink
from pathlib import Path
from uuid import UUID, uuid4, uuid5

from rdflib.term import URIRef, Literal, BNode

from lakesuperior import env
env.setup()

from lakesuperior.api import admin as admin_api
from lakesuperior.api import resource as rsrc_api
from lakesuperior.dictionaries.namespaces import ns_collection
from lakesuperior.model.rdf.graph import Graph as LSGraph

sys.path.append(path.dirname(path.dirname(__file__)))

from grayspread.namespaces import nsc

from migrate.image import image_to_ptiff

DATA_DIR = path.join(path.dirname(path.dirname(__file__)), 'data')
PTIFF_DIR = f'{DATA_DIR}/documents/StillImage/'

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

nsc.update(ns_collection)


def get_uuid(type_name, uid):
    type_uuid = UUID(md5(type_name.encode()).hexdigest())
    return '/' + str(uuid5(type_uuid, str(uid)))


def iter_csv(type_name):
    with open(f'{DATA_DIR}/{type_name}.csv', newline='') as fh:
        reader = csv.DictReader(fh)
        #yield from reader
        for row in reader:
            try:
                yield row
            except Exception as e:
                pdb.set_trace()
                raise e

def ingest(uid, triples):
    with env.app_globals.rdf_store.txn_ctx(True):
        try:
            rsrc_api.create_or_replace(uid, graph=triples)
        except Exception as e:
            pdb.set_trace()
            raise


def cleanup():
    ## Clean up repo.
    env.app_globals.rdfly.bootstrap()
    env.app_globals.nonrdfly.bootstrap()


def cleanup_ptiff():
    for fn in glob(PTIFF_DIR + '/*'):
        unlink(fn)

# # # BEGIN INGESTION # # #

# # # TYPES # # #

def ingest_types():
    ## Types have a specific path and human-readable identifier as they are part of
    ## a vocabulary,

    for row in iter_csv('types'):
        uid = f'/type/{row["id"]}'
        triples = {
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Type),
            (URIRef(''), nsc['owl'].sameAs, nsc['lii'][row['id']]),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['label'], 'en-US')),
            (URIRef(''), nsc['skos'].scopeNote, Literal(row['description'], 'en-US')),
        }

        ingest(uid, triples)


# # # DOCUMENTS # # #

def convert_images():
    src_dir = f'{DATA_DIR}/raw/'

    for fpath in Path(src_dir).rglob('*.*'):
        if fpath.is_file():
            dest_fname = re.sub(
                    r'\.[a-zA-Z0-9]+$', '.tif', path.basename(fpath))
            destf = PTIFF_DIR + dest_fname
            if not path.isfile(destf):
                with open(fpath, 'rb') as fh:
                    logger.info(f'Saving converted image to {destf}')
                    image_to_ptiff(
                            fh, convert=True, auto_flatten=True, out=destf)
            else:
                logger.info('Skipping xeisting file.')


def ingest_docs():
    def _ingest_doc(row):
        if int(row['status']) != 1:
            return

        uid = get_uuid('Document', row['id'])
        fpath = f'{DATA_DIR}/documents/{row["type"]}/{row["fname"]}'
        with open(fpath, 'rb') as fh:
            triples = {
                (URIRef(''), nsc['lii'].legacyUid, Literal(
                        row['id'], datatype=nsc['xsd'].integer)),
                (URIRef(''), nsc['rdf'].type, nsc['lii'].Document),
                (URIRef(''), nsc['rdf'].type, nsc['dctype'][row['type']]),
                (URIRef(''), nsc['skos'].prefLabel, Literal(
                        row['fname'], 'en-US')),
                (URIRef(''), nsc['lii'].localUid, Literal(row['local_id'])),
            }

            with env.app_globals.rdf_store.txn_ctx(True):
                logger.info(f'Ingesting {row["fname"]}')
                rsrc_api.create_or_replace(
                        uid, graph=triples, stream=fh,
                        mimetype=row['mimetype'],
                        disposition=f'attachment; filename={row["fname"]}')

    #for row in iter_csv('documents'):
    #    try:
    #        _ingest_doc(row)
    #    except Exception as e:
    #        pdb.set_trace()
    #        raise e
    with Pool(processes=cpu_count() * 2) as pool:
        pool.map(_ingest_doc, iter_csv('documents'))
        pool.close()
        pool.join()


# # # AGGREGATIONS # # #

def ingest_aggregations():
    for row in iter_csv('aggregations'):
        # Aggregation
        a_uid = get_uuid('Aggregation', row['id'])
        a_trp = {
            (URIRef(''), nsc['rdf'].type, nsc['ore'].Aggregation),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['label'])),
        }

        # Aggregated resources & proxies
        _ids = json.loads(row['aggregates'])
        proxies = []
        for i, _id in enumerate(_ids):
            ar_uid = get_uuid('Document', _id)
            p_uid = a_uid + ar_uid
            p_trp = {
                (URIRef(''), nsc['rdf'].type, nsc['ore'].Proxy),
                (URIRef(''), nsc['ore'].proxyIn, nsc['fcres'][a_uid]),
                (URIRef(''), nsc['ore'].proxyFor, nsc['fcres'][ar_uid]),
            }

            a_trp.add((URIRef(''), nsc['ore'].aggregates, nsc['fcres'][ar_uid]))

            # Insert in reverse order to maintain referential integrity
            proxies.insert(0, (p_uid, p_trp))

            # Singly-linked list.
            if i == 0:
                a_trp.add((URIRef(''), nsc['nav']['first'], nsc['fcres'][p_uid]))
            else:
                #breakpoint()
                proxies[1][1].add(
                        (URIRef(''), nsc['nav']['next'], nsc['fcres'][p_uid]))

        for p_uid, p_trp in proxies:
            ingest(p_uid, p_trp)

        ingest(a_uid, a_trp)


# # # AGGREGATIONS # # #

    for row in iter_csv('aggregations'):
        # Aggregation
        a_uid = f'/{uuid4()}'
        a_trp = {
            (URIRef(''), nsc['rdf'].type, nsc['ore'].Aggregation),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['label'])),
        }

        # Aggregated resources & proxies
        _ids = json.loads(row['aggregates'])
        proxies = []
        for i, _id in enumerate(_ids):
            ar_uid = get_uuid('Document', _id)
            p_uid = a_uid + ar_uid
            p_trp = {
                (URIRef(''), nsc['rdf'].type, nsc['ore'].Proxy),
                (URIRef(''), nsc['ore'].proxyIn, nsc['fcres'][a_uid]),
                (URIRef(''), nsc['ore'].proxyFor, nsc['fcres'][ar_uid]),
            }

            a_trp.add((URIRef(''), nsc['ore'].aggregates, nsc['fcres'][ar_uid]))
            # Singly-linked list.
            #pdb.set_trace()
            if i == 0:
                a_trp.add((URIRef(''), nsc['nav']['first'], nsc['fcres'][p_uid]))
            else:
                proxies[i-1][1].add((URIRef(''), nsc['nav']['next'], nsc['fcres'][p_uid]))

            proxies.append((p_uid, p_trp))

        ingest(a_uid, a_trp)

        for p_uid, p_trp in proxies:
            ingest(p_uid, p_trp)


# # # PLACES # # #

def ingest_places():
    for row in iter_csv('places'):
        uid = get_uuid('Place', row['id'])
        triples = {
            (URIRef(''), nsc['lii'].legacyUid, Literal(row['id'], datatype=nsc['xsd'].integer)),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['name'], 'en-US')),
            (URIRef(''), nsc['owl'].sameAs, URIRef(row['tgn_uri'])),
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Place),
            (URIRef(''), nsc['rdf'].type, nsc['dcterms'].Location),
        }
        if row['description']:
            triples.add(
                (URIRef(''), nsc['skos'].scopeNote, URIRef(row['description'], 'en-US'))
            )

        if row['icon']:
            _uid = get_uuid('Document', row['icon'])
            triples.add((URIRef(''), nsc['lii'].hasPreferredRepresentation, nsc['fcres'][_uid]))

        ingest(uid, triples)


# # # AGENTS # # #

def ingest_agents():
    for row in iter_csv('agents'):
        uid = get_uuid('Agent', row['id'])
        triples = {
            (URIRef(''), nsc['lii'].legacyUid, Literal(row['id'], datatype=nsc['xsd'].integer)),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['name'], 'en-US')),
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Agent),
            (URIRef(''), nsc['rdf'].type, nsc['dcterms'].Agent),
        }
        if row['description']:
            triples.add(
                (URIRef(''), nsc['skos'].scopeNote, Literal(row['description'], 'en-US'))
            )
        if row['ulan_uri']:
            triples.add(
                (URIRef(''), nsc['owl'].sameAs, URIRef(row['ulan_uri'])),
            )

        if row['icon']:
            _uid = get_uuid('Document', row['icon'])
            triples.add((URIRef(''), nsc['lii'].hasPreferredRepresentation, nsc['fcres'][_uid]))

        ingest(uid, triples)


# # # MATERIALS # # #

def ingest_materials():
    for row in iter_csv('materials'):
        uid = get_uuid('Material', row['id'])
        triples = {
            (URIRef(''), nsc['lii'].legacyUid, Literal(row['id'], datatype=nsc['xsd'].integer)),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['name'], 'en-US')),
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Material),
        }
        if row['description']:
            triples.add(
                (URIRef(''), nsc['skos'].scopeNote, Literal(row['description'], 'en-US'))
            )

        if row['icon']:
            _uid = get_uuid('Document', row['icon'])
            triples.add((URIRef(''), nsc['lii'].hasPreferredRepresentation, nsc['fcres'][_uid]))

        ingest(uid, triples)


# # # ARTIFACTS # # #

def ingest_artifacts():
    for row in iter_csv('artifacts'):
        uid = get_uuid('Artifact', row['id'])
        triples = {
            (URIRef(''), nsc['lii'].legacyUid, Literal(row['id'], datatype=nsc['xsd'].integer)),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['title_eng'], 'en-US')),
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Artifact),
        }
        if row['description_eng']:
            triples.add(
                (URIRef(''), nsc['skos'].scopeNote, Literal(row['description_eng'], 'en-US'))
            )

        if row['material']:
            _ids = json.loads(row['material'])
            for _id in _ids:
                _uid = get_uuid('Material', _id)
                triples.add((URIRef(''), nsc['lii'].isMadeOf, nsc['fcres'][_uid]))

        if row['dimensions']:
            logger.info(f'Dimension string: {row["dimensions"]}')
            dims = json.loads(row['dimensions'])
            for dname, ddata in dims.items():
                dnode = BNode(str(uuid4()))
                unit = ddata.get('unit', 'cm')
                triples |= {
                    (dnode, nsc['rdf']['type'], nsc['lii']['Measurement']),
                    (
                            dnode, nsc['lii'].unit,
                            Literal(unit, datatype=nsc['xsd'].integer)),
                    (
                            dnode, nsc['skos'].prefLabel,
                            Literal(dname, datatype=nsc['xsd'].integer)),
                }
                if 'w' in ddata:
                    triples.add((
                        dnode, nsc['lii'].width,
                        Literal(ddata['w'], datatype=nsc['xsd'].integer))
                    )
                if 'h' in ddata:
                    triples.add((
                        dnode, nsc['lii'].height,
                        Literal(ddata['h'], datatype=nsc['xsd'].integer))
                    )
                if 'd' in ddata:
                    triples.add((
                        dnode, nsc['lii'].depth,
                        Literal(ddata['d'], datatype=nsc['xsd'].integer))
                    )
                if 'di' in ddata:
                    triples.add((
                        dnode, nsc['lii'].diameter,
                        Literal(ddata['di'], datatype=nsc['xsd'].integer))
                    )
                triples.add((URIRef(''), nsc['lii'].measurement, dnode))

        if row['inscription']:
            triples.add(
                (URIRef(''), nsc['lii'].inscription, Literal(row['inscription']))
            )

        if row['represented_by']:
            _ids = json.loads(row['represented_by'])
            for _id in _ids:
                _uid = get_uuid('Document', _id)
                triples.add((URIRef(''), nsc['lii'].hasRepresentation, nsc['fcres'][_uid]))

        if row['year']:
            triples.add(
                (URIRef(''), nsc['dcterms'].temporal, Literal(row['year']))
            )

        if row['icon']:
            _uid = get_uuid('Document', row['icon'])
            triples.add((URIRef(''), nsc['lii'].hasPreferredRepresentation, nsc['fcres'][_uid]))

        ingest(uid, triples)


# # # ACTIONS # # #

def ingest_actions():
    for row in iter_csv('actions'):
        uid = get_uuid('Action', row['id'])
        triples = {
            (URIRef(''), nsc['lii'].legacyUid, Literal(row['id'], datatype=nsc['xsd'].integer)),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['title_eng'], 'en-US')),
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Action),
        }
        if row['description_eng']:
            triples.add(
                (URIRef(''), nsc['skos'].scopeNote, Literal(row['description_eng'], 'en-US'))
            )

        if row['spatial']:
            _ids = json.loads(row['spatial'])
            for _id in _ids:
                _uid = get_uuid('Place', _id)
                triples.add((URIRef(''), nsc['dcterms'].spatial, nsc['fcres'][_uid]))

        if row['temporal']:
            triples.add(
                (URIRef(''), nsc['dcterms'].temporal, Literal(row['temporal']))
            )

        if row['produced']:
            _ids = json.loads(row['produced'])
            for _id in _ids:
                _uid = get_uuid('Artifact', _id)
                triples.add((URIRef(''), nsc['lii'].produced, nsc['fcres'][_uid]))

        if row['contributor']:
            _ids = json.loads(row['contributor'])
            for _id in _ids:
                _uid = get_uuid('Agent', _id)
                triples.add((URIRef(''), nsc['dcterms'].contributor, nsc['fcres'][_uid]))

        if row['represented_by']:
            _ids = json.loads(row['represented_by'])
            for _id in _ids:
                _uid = get_uuid('Document', _id)
                triples.add((URIRef(''), nsc['lii'].hasRepresentation, nsc['fcres'][_uid]))

        if row['employed']:
            _ids = json.loads(row['employed'])
            for _id in _ids:
                _uid = get_uuid('Artifact', _id)
                triples.add((URIRef(''), nsc['lii'].employed, nsc['fcres'][_uid]))

        if row['icon']:
            _uid = get_uuid('Document', row['icon'])
            triples.add((URIRef(''), nsc['lii'].hasPreferredRepresentation, nsc['fcres'][_uid]))

        ingest(uid, triples)

# # # IDEAS # # #

def ingest_ideas():
    for row in iter_csv('ideas'):
        uid = get_uuid('Idea', row['id'])
        logger.info(f'UID: {uid}')
        #logger.info(f'Row data: {row}')
        triples = {
            (URIRef(''), nsc['lii'].legacyUid, Literal(row['id'], datatype=nsc['xsd'].integer)),
            (URIRef(''), nsc['skos'].prefLabel, Literal(row['title_eng'], 'en-US')),
            (URIRef(''), nsc['rdf'].type, nsc['lii'].Idea),
        }
        if row['content_eng']:
            triples.add(
                (URIRef(''), nsc['skos'].scopeNote, Literal(row['content_eng'], 'en-US'))
            )

        if row['engendered']:
            _ids = json.loads(row['engendered'])
            for _id in _ids:
                _uid = get_uuid('Action', _id)
                triples.add((URIRef(''), nsc['lii'].engendered, nsc['fcres'][_uid]))

        if row['produced']:
            _ids = json.loads(row['produced'])
            for _id in _ids:
                _uid = get_uuid('Artifact', _id)
                triples.add((URIRef(''), nsc['lii'].produced, nsc['fcres'][_uid]))

        if row['year']:
            triples.add(
                (URIRef(''), nsc['dcterms'].temporal, Literal(row['year']))
            )

        if row['icon']:
            _uid = get_uuid('Document', row['icon'])
            triples.add((URIRef(''), nsc['lii'].hasPreferredRepresentation, nsc['fcres'][_uid]))

        if row['related_media']:
            _ids = json.loads(row['related_media'])
            for _id in _ids:
                _uid = get_uuid('Aggregation', _id)
                triples.add((URIRef(''), nsc['lii'].hasRelatedMedium, nsc['fcres'][_uid]))

        ingest(uid, triples)


if __name__ == '__main__':
    cleanup_ptiff()
    convert_images()
    cleanup()
    ingest_types()
    ingest_docs()
    ingest_aggregations()
    ingest_places()
    ingest_agents()
    ingest_materials()
    ingest_artifacts()
    ingest_actions()
    ingest_ideas()
