#!/usr/bin/python3
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-

import os
import re
import sys
import glob
import gzip
import time
import shutil
import psycopg
import subprocess
import lxml.html
import urllib.error
import urllib.request
import mirrors.plugin


class Main:

    def __init__(self, sock):
        self.bInit = (mirrors.plugin.params["run-mode"] == "init")
        self.dbSockDir = os.path.dirname(mirrors.plugin.params["storage-postgresql"]["unix-socket-file"])
        self.dbPort = mirrors.plugin.params["storage-postgresql"]["unix-socket-file"].split(".")[-1]
        self.dbFilesDir = mirrors.plugin.params["storage-postgresql"]["files-directory"]
        self.sock = sock
        self.p = InfoPrinter()
        self.progress = 0

    def run(self):
        if self.bInit:
            # full update
            self._fullUpdate()
        else:
            # incremental update
            pass
            # try:
            #     self._incrementalUpdate()
            # except Exception:
            #     self._fullUpdate()

    def _fullUpdate(self):
        self.__downloadSnapshots()                          # download dump files
        # self.__importDatabase()                             # import database
        # Util.forceClearDir(self.dbFilesDir)                 # delete downloaded files

    def _incrementalUpdate(self):
        # FIXME
        raise Exception("abc")

    def __downloadSnapshots(self):
        url = "https://www.omdb.org/en/us/content/Help:DataDownload"

        self.p.print("Downloading from \"%s\"..." % (url))
        self.p.incIndent()
        try:
            resp = urllib.request.urlopen(url, timeout=60)
            root = lxml.html.parse(resp)
            for aTag in root.xpath(".//ul/li/a"):
                m = re.fullmatch(r".*\.csv\.bz2", aTag.text)
                if m is not None:
                    self.p.print("Downloading \"%s\"..." % (aTag.get("href")))
                    self.__downloadFile(aTag.get("href"), os.path.join(self.dbFilesDir, aTag.text))
        finally:
            self.p.decIndent()

    def __importDatabase(self):
        dbName = "omdb"
        tmpDbName = "_omdb"
        superDbName = "postgres"

        self.p.print("Importing data...")
        self.p.incIndent()
        try:
            # create temporary database
            self.p.print("Creating database...")
            with psycopg.connect(host=self.dbSockDir, port=self.dbPort, dbname=superDbName, autocommit=True) as db:
                with db.cursor() as c:
                    c.execute("DROP DATABASE IF EXISTS %s WITH (FORCE);" % (tmpDbName))
                    c.execute("CREATE DATABASE %s;" % (tmpDbName))

            # create schema
            with psycopg.connect(host=self.dbSockDir, port=self.dbPort, dbname=tmpDbName, autocommit=True) as db:
                with db.cursor() as c:
                    # create omdb schemas
                    self.p.print("Creating schemas...")
                    c.execute("CREATE SCHEMA omdb;")

            # import data
            c.execute("SET search_path = omdb;")                     # make tables be in omdb schema

            # from https://github.com/cinemagoer/cinemagoer/tree/master/bin/s32cinemagoer.py
            with sqlalchemy.create_engine(db_uri, encoding='utf-8', echo=False) as engine:
                metadata = sqlalchemy.MetaData()
                metadata.bind = engine
                with engine.connect() as connection:
                    for fullfn in glob.glob(os.path.join(self.dbFilesDir, '*.tsv.gz')):
                        with gzip.GzipFile(fullfn, 'rb') as gz_file:
                            headers = gz_file.readline().decode('utf-8').strip().split('\t')

                            # build table by headers
                            table = None
                            if True:
                                table_name = os.path.basename(fullfn).replace('.tsv.gz', '').replace('.', '_')
                                table_map = DB_TRANSFORM.get(table_name) or {}
                                columns = []
                                all_headers = set(headers)
                                all_headers.update(table_map.keys())
                                for header in all_headers:
                                    col_info = table_map.get(header) or {}
                                    col_type = col_info.get('type') or sqlalchemy.UnicodeText
                                    if 'length' in col_info and col_type is sqlalchemy.String:
                                        col_type = sqlalchemy.String(length=col_info['length'])
                                    col_args = {
                                        'name': header,
                                        'type_': col_type,
                                        'index': col_info.get('index', False)
                                    }
                                    col_obj = sqlalchemy.Column(**col_args)
                                    columns.append(col_obj)
                                table = sqlalchemy.Table(table_name, metadata, *columns)

                            # drop table
                            try:
                                table.drop()
                            except:
                                pass

                            insert = table.insert()
                            metadata.create_all(tables=[table])

                            data_transf = {}
                            for column, conf in DB_TRANSFORM.get(table.name, {}).items():
                                if 'transform' in conf:
                                    data_transf[column] = conf['transform']

                            BLOCK_SIZE = 10000
                            data = []
                            for line in gz_file:
                                s_line = line.decode('utf-8').strip().split('\t')
                                if len(s_line) != len(headers):
                                    continue
                                info = dict(zip(headers, [x if x != r'\N' else None for x in s_line]))
                                for key, tranf in data_transf.items():
                                    if key not in info:
                                        continue
                                    info[key] = tranf(info[key])
                                if table.name == 'title_basics':
                                    info['t_soundex'] = title_soundex(info['primaryTitle'])
                                elif table.name == 'title_akas':
                                    info['t_soundex'] = title_soundex(info['title'])
                                elif table.name == 'name_basics':
                                    info['ns_soundex'], info['sn_soundex'], info['s_soundex'] = name_soundexes(info['primaryName'])
                                data.append(info)
                                if len(data) >= BLOCK_SIZE:
                                    connection.execute(insert, data)
                                    data = []
                            if len(data) > 0:
                                connection.execute(insert, data)
                                data = []

            # rename to formal database
            with psycopg.connect(host=self.dbSockDir, port=self.dbPort, dbname=superDbName, autocommit=True) as db:
                with db.cursor() as c:
                    c.execute("DROP DATABASE IF EXISTS %s WITH (FORCE);" % (dbName))
                    c.execute("ALTER DATABASE %s RENAME TO %s;" % (tmpDbName, dbName))
        finally:
            self.p.decIndent()

    def __downloadFile(self, url, localFile):
        while True:
            try:
                subprocess.check_call(["wget", "--quiet", "--no-check-certificate", "-O", localFile, url])      # always re-dowloand
                break
            except subprocess.CalledProcessError as e:
                if e.returncode == 8:       # not found
                    raise
                self.p.print("download failed and try again: %s" % str(e))
                time.sleep(60)


class InfoPrinter:

    def __init__(self):
        self.indent = 0

    def incIndent(self):
        self.indent = self.indent + 1

    def decIndent(self):
        assert self.indent > 0
        self.indent = self.indent - 1

    def print(self, s):
        line = ""
        line += "\t" * self.indent
        line += s
        print(line)


class Util:

    @staticmethod
    def forceDelete(path):
        if os.path.islink(path):
            os.remove(path)
        elif os.path.isfile(path):
            os.remove(path)
        elif os.path.isdir(path):
            shutil.rmtree(path)
        elif os.path.lexists(path):
            os.remove(path)             # other type of file, such as device node
        else:
            pass                        # path does not exist, do nothing

    @staticmethod
    def forceClearDir(path):
        for fn in os.listdir(path):
            Util.forceDelete(os.path.join(path, fn))


###############################################################################

if __name__ == "__main__":
    with mirrors.plugin.ApiClient() as sock:
        try:
            Main(sock).run()
            sock.progress_changed(100)
        except Exception:
            sock.error_occured(sys.exc_info())
            raise

# https://github.com/dlwhittenbury/MySQL_omdb_Project
# Each movie is available as Xml using urls of the kind http://www.omdb.org/movie/[movie_id]/embed_data
# https://github.com/omdb-API/omdbApiLib
