# -*- coding: utf-8 -*-
"""
deploy to neo4j by uploading neo4j database to a ftp server

Usually we specify following spark configurations:

1. in cluster mode:
spark.master=yarn-cluster

2. make the driver memory and cpu cores larger and configure the proper cpu_limit and mem_limit according to this
spark.driver.memory=4g
spark.driver.cores=2

3. attempt only once; for a shell job like this, there is no need for multiple attempts
spark.yarn.maxAppAttempts=1

And we have some external dependencies:

1. neo4j-import binary zip, it should be specified in neo4j_import_zip config parameter
2. we need hdfs dfs commands to get csv files from the hdfs file system
3. we need awk program to remove duplicate headers
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

import os
import glob
import shutil
import zipfile

from typing import AnyStr, Optional, List, Tuple

from subprocess import call
from ftplib import FTP

from kgpipeline.job import KgJob, JobConfigError
from kgpipeline.sparkutil import HDFS, Path

import logging

logger = logging.getLogger(__name__)


def upload_file(ftp, file_path):
    _, file_name = os.path.split(file_path)
    with open(file_path, "rb") as f:
        ftp.storbinary("stor {0}".format(file_name), f)


def directory_exists(ftp, dir_name):
    dirs = []
    ftp.retrlines("list", dirs.append)
    dirs = [d.split()[-1] for d in dirs if d.startswith("d")]
    return dir_name in dirs


def upload_files(host, port, username, password, directory, destination):
    try:
        ftp = FTP()
        ftp.connect(host, port)
        ftp.login(username, password)

        ftp.cwd(destination)

        if not directory_exists(ftp, "graph.db.tmp"):
            ftp.mkd("graph.db.tmp")
        ftp.cwd("graph.db.tmp")

        files = glob.glob(os.path.join(directory, "neostore*"))

        for file_path in files:
            logger.info("uploading {} ...".format(file_path))
            upload_file(ftp, file_path)

        ftp.cwd("..")
        ftp.rename("graph.db.tmp", "graph.db.new")
        ftp.quit()
    except Exception as e:
        logger.error("error uploading to ftp: {}".format(e))
        raise e


def call_neo4j_import(neo4j_import_path, neo4j_cpu, neo4j_mem,
                      delimiter=",", link_exists=True):  # type: (AnyStr, int, int, Optional[AnyStr], bool) -> bool
    """
    call neo4j-import command
    :param neo4j_import_path: the directory where neo4j-import command exists
    :param neo4j_cpu: number of CPUs to use
    :param neo4j_mem: number of memory to use (in GB)
    :param delimiter: delimiter to use
    :param link_exists: if there is a relationship file
    :return: True if succeeded
    """
    command = ["bash", neo4j_import_path,
               "--delimiter=" + delimiter,
               "--multiline-fields=true",
               "--legacy-style-quoting=false",
               "--max-memory={0}g".format(neo4j_mem),
               "--processors={0}".format(neo4j_cpu),
               "--into", "graph.db",
               "--nodes", "entity.csv"]
    if link_exists:
        command.extend(["--relationships", "link.csv"])

    logger.info("execute: {}".format(" ".join(command)))

    return 0 == call(command)


def call_neo4j_index(neo4j_index_path, graph_db_path,
                     indexes):  # type: (AnyStr, AnyStr, List[Tuple[AnyStr,AnyStr]]) -> bool
    """
    call neo4j-index command
    :param neo4j_index_path: the directory where neo4j-index command exists
    :param graph_db_path: where graph database exists
    :param indexes: list of label and property pairs
    :return: True if succeeded
    """
    command = ["bash", neo4j_index_path, graph_db_path]
    for label, prop in indexes:
        command.append(label)
        command.append(prop)

    logger.info("execute: ".format(" ".join(command)))

    return 0 == call(command)


AWK_PROGRAM = r'NR==1 {header=$0; print} NR>1 && $0 != header'


def get_hdfs_csv(src_path, dest_path):
    tmp_path = dest_path + ".tmp"
    call(["hdfs", "dfs", "-getmerge", src_path, tmp_path])

    with open(tmp_path, "r") as i, open(dest_path, "w") as o:
        call(["awk", AWK_PROGRAM], stdin=i, stdout=o)
    os.remove(tmp_path)


class KgNeo4jJob(KgJob):
    def process(self, inputs):
        # ftp connection info
        host = self.config.get("host")
        if not host:
            raise JobConfigError("Input parameter `host` is missing!")

        port = self.config.get("port", 21)

        username = self.config.get("username")
        if not username:
            raise JobConfigError("Input parameter `username` is missing!")

        password = self.config.get("password")
        if not password:
            raise JobConfigError("Input parameter `password` is missing!")

        # neo4j zip package containing the neo4j-import command
        neo4j_import_zip = self.config.get("neo4j_import_zip")
        if not neo4j_import_zip:
            raise JobConfigError("Input parameter `neo4j_import_zip` is missing!")

        # the entity.csv and link.csv path in hdfs
        entity_path = self.config.get("entity_path")
        if not entity_path:
            raise JobConfigError("Input parameter `entity_path` is missing!")

        entity_path = entity_path.replace("$DATE", self.config["date"])

        link_path = self.config.get("link_path")

        if link_path:
            link_path = link_path.replace("$DATE", self.config["date"])

        # the destination folder in ftp server for this graph
        destination = self.config.get("destination")
        if not destination:
            raise JobConfigError("Input parameter `destination` is missing!")

        # cpu and memory limits according to spark.driver.cores and spark.driver.memory
        cpu_limit = self.config.get("cpu_limit", 1)
        mem_limit = self.config.get("mem_limit", 1)
        delimiter = self.config.get("delimiter", ",")

        indexes = self.config.get("indexes", [])

        logger.info("start neo4j deployment job...")

        # current working directory
        current_dir = os.getcwdu()
        logger.info("current directory is: ".format(current_dir))

        # download and unzip neo4j-import
        HDFS.copyToLocalFile(Path(neo4j_import_zip), Path("neo4j-import.zip"))

        with zipfile.ZipFile("neo4j-import.zip") as zip_ref:
            zip_ref.extractall("neo4j-import")

        neo4j_import_command = os.path.join(current_dir, "neo4j-import", "bin", "neo4j-import")
        if not os.path.exists(neo4j_import_command):
            raise Exception("neo4j-import command is not correctly extracted!")

        neo4j_index_command = os.path.join(current_dir, "neo4j-import", "bin", "neo4j-index")
        if not os.path.exists(neo4j_index_command):
            raise Exception("neo4j-index command is not correctly extracted!")

        # download entity.csv and link.csv
        get_hdfs_csv(entity_path, "entity.csv")
        if link_path:
            get_hdfs_csv(link_path, "link.csv")

        logger.info("download complete!")

        logger.info("entity header:")
        call(["head", "-n", "1", "entity.csv"])

        logger.info("link header:")
        call(["head", "-n", "1", "link.csv"])

        # run neo4j-import to generate graph.db
        if not call_neo4j_import(neo4j_import_command, cpu_limit, mem_limit, delimiter, bool(link_path)):
            raise Exception("Error executing neo4j-import command!")

        # create index if it exists
        if indexes:
            if not call_neo4j_index(neo4j_index_command, "graph.db", indexes):
                raise Exception("Error executing neo4j-index command!")

        # upload graph.db to ftp server
        upload_files(host, port, username, password, "graph.db", destination)

        # clean up
        os.remove("entity.csv")
        if link_path is not None:
            os.remove("link.csv")
        os.remove("neo4j-import.zip")
        shutil.rmtree("neo4j-import")
        shutil.rmtree("graph.db")

        logger.info("neo4j deployment completed.")

        return []


KgJob.register("neo4j", KgNeo4jJob)
