#!/usr/bin/python3
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-

import os
import re
import sys
import glob
import gzip
import time
import shutil
import psycopg
import subprocess
import sqlalchemy
import sqlalchemy.pool
import lxml.html
import urllib.error
import urllib.request
import imdb.parser.s3.utils
import mirrors.plugin


class Main:

    def __init__(self, sock):
        self.DATE_FMT = "%Y-%m-%d"

        self.bInit = (mirrors.plugin.params["run-mode"] == "init")
        self.dbSockDir = os.path.dirname(mirrors.plugin.params["storage-postgresql"]["unix-socket-file"])
        self.dbPort = int(mirrors.plugin.params["storage-postgresql"]["unix-socket-file"].split(".")[-1])
        self.dbFilesDir = mirrors.plugin.params["storage-postgresql"]["files-directory"]
        self.sock = sock
        self.p = InfoPrinter()
        self.progress = 0

    def run(self):
        if self.bInit:
            # full update
            self._fullUpdate()
        else:
            # incremental update
            try:
                self._incrementalUpdate()
            except Exception:
                self._fullUpdate()

    def _fullUpdate(self):
        self.__downloadSnapshots()                          # download dump files
        self.__importDatabase()                             # import database
        Util.forceClearDir(self.dbFilesDir)                 # delete downloaded files

    def _incrementalUpdate(self):
        # FIXME
        raise Exception("abc")

    def __downloadSnapshots(self):
        url = "https://datasets.imdbws.com"

        self.p.print("Downloading from \"%s\"..." % (url))
        self.p.incIndent()
        try:
            resp = urllib.request.urlopen(url, timeout=60)
            root = lxml.html.parse(resp)
            for aTag in root.xpath(".//a"):
                m = re.fullmatch(r".*\.tsv\.gz*", aTag.text)
                if m is not None:
                    self.p.print("Downloading \"%s\"..." % (aTag.get("href")))
                    self.__downloadFile(aTag.get("href"), os.path.join(self.dbFilesDir, aTag.text))
        finally:
            self.p.decIndent()

    def __importDatabase(self):
        dbName = "imdb"
        tmpDbName = "_imdb"
        superDbName = "postgres"

        self.p.print("Importing data...")
        self.p.incIndent()
        try:
            # create temporary database
            self.p.print("Creating database...")
            with psycopg.connect(host=self.dbSockDir, port=self.dbPort, dbname=superDbName, autocommit=True) as db:
                with db.cursor() as c:
                    c.execute("DROP DATABASE IF EXISTS %s WITH (FORCE);" % (tmpDbName))
                    c.execute("CREATE DATABASE %s;" % (tmpDbName))

            # create schema
            # self.p.print("Creating schemas...")
            # with psycopg.connect(host=self.dbSockDir, port=self.dbPort, dbname=tmpDbName, autocommit=True) as db:
            #     with db.cursor() as c:
            #         c.execute("CREATE SCHEMA imdb;")

            # import data
            # from https://github.com/cinemagoer/cinemagoer/tree/master/bin/s32cinemagoer.py
            dbUrl = "postgresql+psycopg2cffi://@/%s?host=%s&port=%d" % (tmpDbName, self.dbSockDir, self.dbPort)
            engine = sqlalchemy.create_engine(dbUrl, echo=False, poolclass=sqlalchemy.pool.NullPool)
            with engine.connect() as c:
                metadata = sqlalchemy.MetaData(schema="imdb")
                inspector = sqlalchemy.inspect(c)
                for fullfn in glob.glob(os.path.join(self.dbFilesDir, '*.tsv.gz')):
                    fn = os.path.basename(fullfn)
                    self.p.print("Importing %s..." % (fn))
                    with gzip.GzipFile(fullfn, 'rb') as gz_file:
                        headers = gz_file.readline().decode('utf-8').strip().split('\t')

                        # build table by headers
                        table = None
                        if True:
                            table_name = fn.replace('.tsv.gz', '').replace('.', '_')
                            table_map = imdb.parser.s3.utils.DB_TRANSFORM.get(table_name) or {}
                            columns = []
                            all_headers = set(headers)
                            all_headers.update(table_map.keys())
                            for header in all_headers:
                                col_info = table_map.get(header) or {}
                                col_type = col_info.get('type') or sqlalchemy.UnicodeText
                                if 'length' in col_info and col_type is sqlalchemy.String:
                                    col_type = sqlalchemy.String(length=col_info['length'])
                                col_args = {
                                    'name': header,
                                    'type_': col_type,
                                    'index': col_info.get('index', False)
                                }
                                col_obj = sqlalchemy.Column(**col_args)
                                columns.append(col_obj)
                            table = sqlalchemy.Table(table_name, metadata, *columns)

                        # build data transfer object
                        data_transf = {}
                        for column, conf in imdb.parser.s3.utils.DB_TRANSFORM.get(table.name, {}).items():
                            if 'transform' in conf:
                                data_transf[column] = conf['transform']

                        # create table
                        if inspector.has_table(table.name):
                            table.drop(bind=c)
                        table.create(bind=c)

                        # insert data
                        data = []
                        for line in gz_file:
                            s_line = line.decode('utf-8').strip().split('\t')
                            if len(s_line) != len(headers):
                                continue
                            info = dict(zip(headers, [x if x != r'\N' else None for x in s_line]))
                            for key, tranf in data_transf.items():
                                if key not in info:
                                    continue
                                info[key] = tranf(info[key])
                            if table.name == 'title_basics':
                                info['t_soundex'] = imdb.parser.s3.utils.title_soundex(info['primaryTitle'])
                            elif table.name == 'title_akas':
                                info['t_soundex'] = imdb.parser.s3.utils.title_soundex(info['title'])
                            elif table.name == 'name_basics':
                                info['ns_soundex'], info['sn_soundex'], info['s_soundex'] = imdb.parser.s3.utils.name_soundexes(info['primaryName'])
                            data.append(info)
                            if len(data) >= 10000:
                                c.execute(table.insert(), data)
                                data = []
                        if len(data) > 0:
                            c.execute(table.insert(), data)
                            data = []

            # rename to formal database
            with psycopg.connect(host=self.dbSockDir, port=self.dbPort, dbname=superDbName, autocommit=True) as db:
                with db.cursor() as c:
                    c.execute("DROP DATABASE IF EXISTS %s WITH (FORCE);" % (dbName))
                    c.execute("ALTER DATABASE %s RENAME TO %s;" % (tmpDbName, dbName))
        finally:
            self.p.decIndent()

    def __downloadFile(self, url, localFile):
        while True:
            try:
                subprocess.check_call(["wget", "--quiet", "--no-check-certificate", "-O", localFile, url])      # always re-dowloand
                break
            except subprocess.CalledProcessError as e:
                if e.returncode == 8:       # not found
                    raise
                self.p.print("download failed and try again: %s" % str(e))
                time.sleep(60)


class InfoPrinter:

    def __init__(self):
        self.indent = 0

    def incIndent(self):
        self.indent = self.indent + 1

    def decIndent(self):
        assert self.indent > 0
        self.indent = self.indent - 1

    def print(self, s):
        line = ""
        line += "\t" * self.indent
        line += s
        print(line)


class Util:

    @staticmethod
    def forceDelete(path):
        if os.path.islink(path):
            os.remove(path)
        elif os.path.isfile(path):
            os.remove(path)
        elif os.path.isdir(path):
            shutil.rmtree(path)
        elif os.path.lexists(path):
            os.remove(path)             # other type of file, such as device node
        else:
            pass                        # path does not exist, do nothing

    @staticmethod
    def forceClearDir(path):
        for fn in os.listdir(path):
            Util.forceDelete(os.path.join(path, fn))


###############################################################################

if __name__ == "__main__":
    with mirrors.plugin.ApiClient() as sock:
        try:
            Main(sock).run()
            sock.progress_changed(100)
        except Exception:
            sock.error_occured(sys.exc_info())
            raise

# https://github.com/dlwhittenbury/MySQL_IMDb_Project
# developers.themoviedb.org getting-started daily-file-expovariate
# https://github.com/IMDb-API/IMDbApiLib
