import argparse
import csv
import os.path
import time
from datetime import datetime
from typing import Union

import utils


def column_match(rule: str, key_word: str, s: str) -> bool:
    flag: bool = False
    if rule == "neq":
        flag = s != key_word
    elif rule == "eq":
        flag = s == key_word
    elif rule == 'ncontains':
        flag = key_word not in s
    elif rule == 'contains':
        flag = key_word in s
    elif rule == 'nempty':
        flag = len(s) > 0
    elif rule == 'empty':
        flag = len(s) == 0
    elif rule == 'any':
        flag = True
    return flag


def column_action(s: str, action: str, action_val: str) -> str:
    if action == "replace":
        s = action_val
    elif action == "append":
        s += action_val
    elif action == "prepend":
        s = action_val + s
    return s


def row_converter(mp: dict, converters: list[list[str]]) -> dict:
    for f in converters:
        h, rule, key_word, action, action_val = f[0], f[1], f[2], f[3], f[4]
        s: str = mp.get(h, None)
        if s is None:
            s = ''
        if column_match(rule, key_word, s):
            mp[h] = column_action(s, action, action_val)
    return mp


def row_filter(mp: dict, filters: list[list[str]]) -> bool:
    for f in filters:
        h, rule, key_word = f[0], f[1], f[2]
        s: str = mp.get(h, None)
        if s is None:
            s = ''
        if not column_match(rule, key_word, s):
            return False
    return True


def process(idx: int, config: dict, headers: list[str], values: list[str]) -> Union[list | None]:
    # 保留的列
    keep_headers = config.get('keep_headers', [])
    # 需要转换的列名列表
    date_format_headers = config.get('date_format_headers', [])
    # 需要过滤的条件
    filters: list[list[str]] = config.get('filters', [])
    converters: list[list[str]] = config.get('converters', [])
    mp: dict = utils.str_list_2_map(headers, values)
    if not row_filter(mp, filters):
        return None
    ks: list = list(mp.keys())
    for k in ks:
        if k not in keep_headers:
            mp.pop(k)
    for h in date_format_headers:
        s: str = mp.get(h, None)
        if s and len(s) > 0:
            mp[h] = datetime.strptime(s.replace('/', '-'), '%Y-%m-%d').strftime('%Y-%m-%d')
    mp = row_converter(mp, converters)
    res = [idx]
    for k in keep_headers:
        res.append(mp.get(k, '').replace(' ', ''))
    return res


def pre_process(input_csv_dir: str, output_csv: str, pre_process_json: str):
    """
    预处理函数
    :param input_csv_dir:       原始数据目录
    :param output_csv:          输出csv
    :param pre_process_json     配置文件
    :return:
    """
    config: dict = utils.load_json(pre_process_json)
    if not os.path.exists(input_csv_dir):
        utils.eprint(f"目录:{input_csv_dir} 不存在")
        return
    if not os.path.isdir(input_csv_dir):
        utils.eprint(f"目录:{input_csv_dir} 不是目录")
        return

    idx = 0
    datas = []
    if os.path.exists(output_csv):
        os.remove(output_csv)
        utils.eprint(f"{output_csv} 文件删除")
        time.sleep(3)
    with open(output_csv, mode='w', newline='', encoding=config.get('output-encoding')) as outfile:
        writer = csv.writer(outfile)
        if config.get('write_headers', 0) == 1:
            hs = ["序号"]
            utils.add_list(hs, list(config.get('keep_headers', [])))
            writer.writerow(hs)
        for filename in os.listdir(input_csv_dir):
            if filename.endswith('.csv'):
                # 构建完整的文件路径
                csv_path = os.path.join(input_csv_dir, filename)
                with open(csv_path, mode='r', newline='', encoding=config.get('input-encoding')) as infile:
                    reader = csv.reader(infile)
                    headers = next(reader)
                    for values in reader:
                        l = process(idx + 1, config, headers, values)
                        if l is None or len(l) <= 0:
                            continue
                        datas.append(l)
                        if len(datas) >= config.get('batch', 50):
                            writer.writerows(datas)
                            datas = []
                            utils.eprint(f"write, idx:{idx + 1}, time:{utils.now_str()}")
                        idx += 1
        if len(datas) > 0:
            writer.writerows(datas)
            utils.eprint(f"write, idx:{idx}, time:{utils.now_str()}")
    utils.eprint("处理完毕")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='数据预处理命令解析器.')
    # 添加命令行参数
    parser.add_argument('-input_csv_dir', type=str, help='原始数据目录', default='../input_datas')
    parser.add_argument('-output_csv_dir', type=str, help='输出数据局目录', default='../scripts/data')
    parser.add_argument('-output_csv', type=str, help='输出文件名', default='data.csv')
    parser.add_argument('-pre_process_json', type=str, help='预处理配置文件', default='../config/preprocess.json')
    args = parser.parse_args()
    input_csv_dir: str = args.input_csv_dir
    output_csv_dir: str = args.output_csv_dir
    output_csv: str = args.output_csv
    pre_process_json: str = args.pre_process_json
    utils.mkdir(output_csv_dir)
    pre_process(input_csv_dir, f'{output_csv_dir}{os.sep}{output_csv}', pre_process_json)
