import hashlib
import os
import re
import numpy as np
import json
import pandas as pd
import argparse


class DataIntersect(object):
    def __init__(self, csv_path, config):
        self.pd_data = pd.read_csv(csv_path)
        self.raw_headers = self.pd_data.columns
        self.pd_data.columns = [f'col_{i}' for i in range(len(self.raw_headers))]
        self.new_key_name = "col_psi"
        self.config = config

    @staticmethod
    def deal_str(col_idx, mode, val):
        """

        :param col_idx:
        :param mode: 1: in, 2: not in, 3: not null, 4: null
        :param val:
        :return:
        """
        if mode == "in":
            expr = f"col_{col_idx}.isin({val})"
        elif mode == "not in":
            expr = f"~col_{col_idx}.isin({val})"
        elif mode == "not null":
            expr = f"col_{col_idx}.str.strip().str.len() > 0"
        elif mode == "null":
            expr = f"~(col_{col_idx}.str.strip().str.len() > 0)"
        else:
            raise ValueError(f"invalid mode: [{mode}]")
        return expr

    @staticmethod
    def deal_num(col_idx, mode, val):
        """

        :param col_idx:
        :param mode:
        :param val:
        :return:
        """
        num_inf = 2 ** 256
        if mode == "in":
            expr = f"col_{col_idx}.isin({val})"
        elif mode == "not in":
            expr = f"~col_{col_idx}.isin({val})"
        elif mode in (">", ">=", "<", "<=", "=", "!="):
            expr = f"col_{col_idx} {mode} {val}"
        elif mode == "not null":
            expr = f"~(col_{col_idx}.isnull())"
        elif mode == "null":
            expr = f"col_{col_idx}.isnull()"
        elif mode == "inf":
            expr = f"(col_{col_idx}>{num_inf}) | (col_{col_idx}<{-num_inf})"
        elif mode == "not inf":
            expr = f"~((col_{col_idx}>{num_inf}) | (col_{col_idx}<{-num_inf}))"
        elif mode == "invalid":
            expr = f"(col_{col_idx}>{num_inf}) | (col_{col_idx}<{-num_inf}) | (col_{col_idx}.isnull())"
        elif mode == "valid":
            expr = f"~((col_{col_idx}>{num_inf}) | (col_{col_idx}<{-num_inf}) | (col_{col_idx}.isnull()))"

        else:
            raise ValueError(f"invalid mode: [{mode}]")
        return expr

    @staticmethod
    def check_rule(expr):
        new_expr = re.sub('[^()]', '', expr)
        new_expr2 = expr.strip().replace("\n", '').replace("\t", '').replace(" ", '')
        matches = re.finditer(r'[|&]', new_expr2)
        for match in matches:
            idx = match.start()
            if idx == 0 or idx == len(new_expr2):
                return False
            if new_expr2[idx - 1] in ["(", "|", "&"]:
                return False
            if new_expr2[idx + 1] in [")", "|", "&"]:
                return False
        while True:
            if "()" in new_expr:
                new_expr = new_expr.replace("()", "")
            else:
                break

        if new_expr == '':
            return True
        else:
            return False

    def parse_rules(self):
        rule_expr = self.config["filter_rule"]
        if not self.check_rule(rule_expr):
            raise ValueError("invalid rule expression")
        for r_name, r in self.config["filter_detail"].items():
            if r_name not in rule_expr:
                continue
            r_name_sha = hashlib.sha256(r_name.encode('utf-8')).hexdigest()
            rule_expr = rule_expr.replace(r_name, f"({r_name_sha})")

        for r_name, r in self.config["filter_detail"].items():
            r_name_sha = hashlib.sha256(r_name.encode('utf-8')).hexdigest()
            if r_name_sha not in rule_expr:
                continue

            if r['type'] in ("int", "float"):
                expr = self.deal_num(r['col_idx'], r['mode'], r['val'])
            elif r['type'] == "string":
                expr = self.deal_str(r['col_idx'], r['mode'], r['val'])
            else:
                raise ValueError(f"invalid type: [{r['type']}]")
            rule_expr = rule_expr.replace(r_name_sha, f"({expr})")
        return rule_expr

    def filter(self):
        rule_expr = self.parse_rules()

        self.pd_data = self.pd_data.query(rule_expr)

    def intersect(self, drop_nan=True, drop_duplicates=True, ignore_case=True):
        tmp_header = []
        keys_header = []

        for col_idx in self.config["keys_col_idx"]:

            if col_idx not in self.config["intersect"].keys():
                keys_header.append(f"col_{col_idx}")
                continue

            col_intersect = self.config["intersect"][col_idx]
            col_hash = hashlib.md5(str(col_intersect).encode('utf-8')).hexdigest()

            if col_intersect["mode"] == "add":
                self.pd_data[col_hash] = self.pd_data[f"col_{col_idx}"].apply(
                    lambda x: x + int(col_intersect["val"]))

            elif col_intersect["mode"] == "sub":
                self.pd_data[col_hash] = self.pd_data[f"col_{col_idx}"].apply(
                    lambda x: x - int(col_intersect["val"]))
            else:
                raise ValueError("invalid mode")

            tmp_header.append(col_hash)
            keys_header.append(col_hash)

        def concat_field(row):
            val = '_'.join([str(float(row[i])) if type(row[i]) in [int, float] else str(row[i]) for i in keys_header])
            if val == "_".join(["nan" for _ in range(len(keys_header))]):
                return np.nan
            if ignore_case:
                hash_val = hashlib.md5(str(val.lower()).encode('utf-8')).hexdigest()
            else:
                hash_val = hashlib.md5(str(val).encode('utf-8')).hexdigest()
            return int(hash_val, 16)
            # return val.lower()

        new_id = self.pd_data.apply(concat_field, axis=1)

        self.pd_data = self.pd_data.drop(tmp_header, axis=1)

        self.pd_data.columns = self.raw_headers

        self.pd_data.insert(0, self.new_key_name, new_id)

        if drop_duplicates:
            self.pd_data = self.pd_data.drop_duplicates(subset=[self.new_key_name], keep='first')

        if drop_nan:
            self.pd_data = self.pd_data.dropna(subset=[self.new_key_name])

    def run(self, drop_nan=True, drop_duplicates=True, ignore_case=True):
        if self.config["filter_rule"] is not None:
            self.filter()

        if "intersect" not in self.config.keys():
            self.config["intersect"] = {}
        self.intersect(drop_nan, drop_duplicates, ignore_case)

        return self.export(self.config["output_col_idx"])

    def export(self, col_names):
        cols = [self.raw_headers[i] for i in col_names]
        psi_data = self.pd_data[[self.new_key_name, *cols]]
        return psi_data


def start_intersect(file_dir, intersect_params, export_dir, drop_nan=True, drop_duplicates=True, ignore_case=True):
    intersect_data = None
    for table_id, file_path in enumerate(file_dir):
        data_inter = DataIntersect(file_path, intersect_params[table_id])

        data_res = data_inter.run(drop_nan, drop_duplicates, ignore_case)

        data_res.columns = [f'T{table_id}_{i}' if i != "col_psi" else i for i in data_res.columns]

        if intersect_data is None:
            intersect_data = data_res

        else:
            intersect_data = intersect_data.join(data_res.set_index("col_psi"), on='col_psi',
                                                 rsuffix=f'table_{table_id}', how='inner')
    new_columns = []
    for n, i in enumerate(intersect_data.columns):
        if i == "col_psi":
            col_name = "id"
        elif i == "id":
            col_name = f"id_{n}"
        else:
            col_name = i
        new_columns.append(col_name)

    intersect_data.columns = new_columns

    intersect_data.to_csv(export_dir, index=None)

    return intersect_data.shape


def drop_col(csv_path, limit=None):
    pd_data = pd.read_csv(csv_path).drop('id', axis=1)
    if limit is not None:
        pd_data = pd_data[:int(limit)]
    pd_data.to_csv(csv_path, index=None)

    return pd_data.shape


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('file', type=str, help='config path or csv path')
    parser.add_argument("-m", "--mode", help="pick one mode, {0: start_intersect, 1: drop col_psi}", required=True,
                        type=int, choices=[0, 1])

    args = parser.parse_args()
    if not os.path.exists(args.file):
        raise ValueError("File Not Found")

    if args.mode == 0:
        with open(args.file, 'r', encoding='utf8') as fr:
            data_params = json.loads(fr.read())

        start_intersect(data_params["file_dir"], data_params["params"], data_params["export_dir"])
    else:
        drop_col(args.file)
