import sys
import os
import traceback
import logging
import argparse
import json
import re
from collections import defaultdict
import numpy as np
from tqdm import tqdm
import random
import utils

g_max_values = 60


def preprocess(lst_data, dct_cells):
    """data preprocess for model training

    Args:
        lst_data (TYPE): NULL
        dct_cells (TYPE): NULL
    Returns: TODO
    Raises: NULL
    out format:
        {
            "question": "XXX",
            "query": "SQL",
            "db_id": "",
            "schema": "create table city (\n    City_ID number primary key,'
                                     '\n    Official_Name text,\n    Status text,\n    Area_km_2 number,'
                                     '\n    Population number,\n    Census_Ranking text\n)\ncreate table farm (\n    '
                                     'Farm_ID number primary key,\n    Year number,\n    Total_Horses number,'
                                     '\n    Working_Horses number,\n    Total_Cattle number,\n    Oxen number,'
                                     '\n    Bulls number,\n    Cows number,\n    Pigs number,\n    Sheep_and_Goats '
                                     'number\n)\ncreate table farm_competition (\n    Competition_ID number primary '
                                     'key,\n    Year number,\n    Theme text,\n    Host_city_ID number references '
                                     'city(City_ID),\n    Hosts text\n)\ncreate table competition_record (\n    '
                                     'Competition_ID number primary key references farm_competition(Competition_ID),'
                                     '\n    Farm_ID number references farm(Farm_ID),\n    Rank number\n)",
            "schema_values": ""
        }
        """
    out_list = []
    for entry in tqdm(lst_data):
        db_schema = dct_cells[entry["db_id"]]
        _schema = ""
        _schema_v = ""
        for kk, vv in db_schema['tables'].items():
            _schema += f"create table {vv['table_name']} (\n"
            _schema_v += f"create table {vv['table_name']} (\n"
            for col, _type in zip(vv['header'], vv['type']):
                _schema += f"{col} {_type},\n"
                _schema_v += f"{col} {_type},\n"
            _schema += ")\n"
            _schema_v += ")\n"
            if len(vv['cell']) > 3:
                top_k = 3
            else:
                top_k = len(vv['cell'])
            for _cell in random.sample(vv['cell'], top_k):
                _schema_v += f"insert into {vv['table_name']} values ( f{', '.join(_cell)} );\n"

        out_list.append({
            "question": entry["question"],
            "query": entry["query"],
            "db_id": entry["db_id"],
            "schema": _schema,
            "schema_values": _schema_v
        })

    return out_list


if __name__ == '__main__':
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('-d', '--data-path', required=True, help='dataset')
    arg_parser.add_argument('-f', '--flag', default='false', help='train data or not')
    arg_parser.add_argument('-t', '--table-path', required=True, help='table dataset')
    arg_parser.add_argument('-c', '--cell-path', required=True, help='cell dataset')
    # arg_parser.add_argument('-g', '--grammar-path', required=True, help='grammar file path')
    arg_parser.add_argument('-m', '--max-len', required=False, nargs=3, type=int, default=(12, 40, g_max_values),
                            help='max len of table and column. like "-m 20 60 100"')
    arg_parser.add_argument('-o', '--output', required=True, help='output data')
    args = arg_parser.parse_args()

    logging.basicConfig(level=logging.DEBUG,
                        format='%(levelname)s: %(asctime)s %(filename)s'
                               ' [%(funcName)s:%(lineno)d][%(process)d] %(message)s',
                        datefmt='%m-%d %H:%M:%S',
                        filename=None,
                        filemode='a')

    g_max_values = args.max_len[2]

    # loading dataSets
    lst_data, tables, dct_cells = utils.load_dataset(args.data_path, args.table_path, args.cell_path, args.max_len[0])
    # print(tables)

    # g_parser = sql2grammar.Parser(args.grammar_path, args.max_len[0], args.max_len[1])

    # process datasets
    # is_train = args.flag.lower() == 'true'
    process_result = preprocess(lst_data, dct_cells)
    with open(args.output, 'w') as ofs:
        json.dump(process_result, ofs, indent=2, ensure_ascii=False)
