import os

from pyspark.sql.functions import lower

from Modules.mod_spark import Spark
from Modules.mod_psql import PSQLDB
from Modules.mod_logging import Logging
from Task_2.Task_2_2.Modules.create_mirror import Mirrors
from Task_1.Task_1_1.modules.module_insert_data import ImportFromDB


def get_list_files(path_delta) -> dict:
    ids_path: list = [j[0] for j in list(os.walk(path_delta))]
    _id: list = list(os.walk(path_delta))[0][1]
    path_files: dict = {}
    for i in range(1, len(ids_path)):
        file_names: list = [files for root, dirs, files in os.walk(ids_path[i])][0]
        for j in file_names:
            path_files[_id[i - 1]] = {'name_table': j.replace('.csv', ''), 'path': f'{ids_path[i]}/{j}'}
    return path_files


def update_mirror(name_table: str, data: dict):
    con = PSQLDB().connection()
    primary_keys: list = ImportFromDB('mirror').get_keys()[f"mirror.{name_table}"]
    headers: list = [c for c in data.keys() if c.lower() not in primary_keys]
    set_db: str = ''
    for i in range(len(headers)):
        if i < len(headers):
            set_db += f""" {headers[i].lower()} = {data.get(headers[i])},"""
        else:
            set_db += f""" {headers[i].lower()} = {data.get(headers[i])}"""
    sql: str = f"""update mirror.{name_table} set ({set_db.replace('None', 'null')})"""
    PSQLDB().insert(connect=con, sql=sql)
    PSQLDB().exit_db(con)


def insert_mirror(name_table: str, data: dict):
    con = PSQLDB().connection()
    headers: list = [c for c in data.keys()]
    values: list = []
    for i in data.keys():
        if data.get(i) is None:
            values.append('null')
        else:
            values.append(f"'{data.get(i)}'")
    sql: str = f"""insert into mirror.{name_table} ({', '.join(headers)}) values ({', '.join(values)})"""
    PSQLDB().insert(connect=con, sql=sql)
    PSQLDB().exit_db(con)


class Delta:
    def __init__(self):
        self.session = Spark().create_session('DELTA')

    def get_headers_delta(self, path: str) -> dict:
        header: dict = {}
        read = self.session.read.options(header=True, delimiter=';').csv(path)
        data: list = [row.asDict() for row in read.collect()]
        types: list = [i[1] for i in read.dtypes]
        headers: list = [i[0] for i in read.dtypes]
        for i in range(len(read.dtypes)):
            header[headers[i]] = types[i]
        return header

    def get_data_delta(self, path) -> list:
        read = self.session.read.options(header=True, delimiter=';').csv(path)
        data: list = [row.asDict() for row in read.collect()]
        return data

    def insert_unique_values(self, path: str, option: str = 'add_csv'):
        files: dict = get_list_files(path)
        for ids, val in files.items():
            match option:
                case 'all_mirror':
                    Mirrors(r'Task_2/Task_2_2/Query/create_mirros.sql').create_mirror()
                    postgre = self.session.read.format('jdbc'). \
                        option("url", f"jdbc:postgresql://{PSQLDB().HOST}:5432/{PSQLDB().DATABASE}"). \
                        option("dbtable", f"mirror.{val.get('name_table')}"). \
                        option("user", "postgres"). \
                        option("password", PSQLDB().PASSWORD). \
                        option("driver", "org.postgresql.Driver"). \
                        load()
                    read = self.session.read.options(header=True, delimiter=';').csv(val.get('path'))
                    lower_column: dict = {}
                    for h in read.dtypes:
                        lower_column[h[0].lower()] = h[0]
                    low_read = read.withColumns(lower_column)
                    low_postgre = postgre.withColumns(lower_column)
                    target_df = low_read.subtract(low_postgre)
                    if not [row.asDict() for row in target_df.collect()]:
                        for i in [d.asDict() for d in low_read.collect()]:
                            insert_mirror(val.get('name_table'), i)
                            Logging().set_log(PSQLDB().connection(), f"{ids}; table: {val.get('name_table')}; val: {i}",
                                              'inserted', 'MIRROR')
                    else:
                        for i in [d.asDict() for d in target_df.collect()]:
                            update_mirror(val.get('name_table'), i)
                            Logging().set_log(PSQLDB().connection(), f"{ids}; table: {val.get('name_table')}; val: {i}",
                                              'update', 'MIRROR')
                    dataframe = self.session.read.format('jdbc'). \
                        option("url", f"jdbc:postgresql://{PSQLDB().HOST}:5432/{PSQLDB().DATABASE}"). \
                        option("dbtable", f"mirror.{val.get('name_table')}"). \
                        option("user", "postgres"). \
                        option("password", PSQLDB().PASSWORD). \
                        option("driver", "org.postgresql.Driver"). \
                        load()
                    dataframe.toPandas().to_csv(
                        path_or_buf=f"{path}/mirror_{val.get('name_table')}.csv", sep=';')
                    Logging().write_log_to_file(f"inserted: {ids}; table: {val.get('name_table')}", 'info')
                case 'add_postgesql':
                    Mirrors(r'Task_2/Task_2_2/Query/create_mirros.sql').create_mirror()
                    postgre = self.session.read.format('jdbc'). \
                        option("url", f"jdbc:postgresql://{PSQLDB().HOST}:5432/{PSQLDB().DATABASE}"). \
                        option("dbtable", f"mirror.{val.get('name_table')}"). \
                        option("user", "postgres"). \
                        option("password", PSQLDB().PASSWORD). \
                        option("driver", "org.postgresql.Driver"). \
                        load()
                    read = self.session.read.options(header=True, delimiter=';').csv(val.get('path'))
                    lower_column: dict = {}
                    for h in read.dtypes:
                        lower_column[h[0].lower()] = h[0]
                    low_read = read.withColumns(lower_column)
                    low_postgre = postgre.withColumns(lower_column)
                    target_df = low_read.subtract(low_postgre)

                    if not [row.asDict() for row in target_df.collect()]:
                        for i in [d.asDict() for d in low_read.collect()]:
                            insert_mirror(val.get('name_table'), i)
                            Logging().set_log(PSQLDB().connection(), f"{ids}; table: {val.get('name_table')}; val: {i}",
                                              'inserted', 'MIRROR')
                    else:
                        for i in [d.asDict() for d in target_df.collect()]:
                            target_df.toPandas().to_csv(
                                path_or_buf=f"{val.get('path')}/intermediate_{val.get('name_table')}.csv", sep=';')
                            update_mirror(val.get('name_table'), i)
                            Logging().set_log(PSQLDB().connection(), f"{ids}; table: {val.get('name_table')}; val: {i}",
                                              'update', 'MIRROR')
                case 'add_csv':
                    name_mirror: str = f"mirror_{val.get('name_table')}.csv"
                    if name_mirror not in list(os.walk(path))[0][2]:
                        mirror_df = self.session.read.options(header=True, delimiter=';').csv(val.get('path'))
                        mirror_df. \
                            toPandas(). \
                            to_csv(path_or_buf=f"{path}/{name_mirror}", sep=";", index=False)
                        Logging().write_log_to_file(f"inserted: {ids}; table: {val.get('name_table')}", 'info')
                    else:
                        mirror_df = self.session.read.options(header=True, delimiter=';').csv(f"{path}/{name_mirror}")
                        read = self.session.read.options(header=True, delimiter=';').csv(val.get('path'))
                        target_df = read.subtract(mirror_df)
                        presence = mirror_df.intersect(read)
                        if [row.asDict() for row in target_df.collect()] == [] and [row.asDict() for row in presence.collect()] == []:
                            mirror_df.\
                                union(read).\
                                toPandas().\
                                to_csv(path_or_buf=f"{path}/{name_mirror}", sep=";", index=False)
                            Logging().write_log_to_file(f"inserted: {ids}; table: {val.get('name_table')}; val: "
                                                        f"{[row.asDict() for row in read.collect()]}", 'info')
                        if [row.asDict() for row in target_df.collect()] != [] or [row.asDict() for row in presence.collect()] != []:
                            if [row.asDict() for row in presence.collect()] == []:
                                mirror_df. \
                                    union(read). \
                                    toPandas(). \
                                    to_csv(path_or_buf=f"{path}/{name_mirror}", sep=";", index=False)
                                Logging().write_log_to_file(f"inserted: {ids}; table: {val.get('name_table')}; val: "
                                                            f"{[row.asDict() for row in read.collect()]}", 'info')
                            else:
                                presence. \
                                    union(target_df). \
                                    toPandas(). \
                                    to_csv(path_or_buf=f"{path}/{name_mirror}", sep=";", index=False)
                                Logging().write_log_to_file(f"inserted: {ids}; table: {val.get('name_table')}; val: "
                                                            f"{[row.asDict() for row in target_df.collect()]}", 'info')
                case _: ...
