import os
import sys

root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import json
import random
import time
from tqdm import tqdm
import sqlite3
import argparse


def dict_factory(cursor, row):
    d = {}
    for idx, col in enumerate(cursor.description):
        d[col[0]] = row[idx]
    return d


def main_args():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--dataset', default='spider', type=str, help='dataset name')
    arg_parser.add_argument('--gpt', default='gpt-3.5-turbo', type=str, help='GPT model')
    arg_parser.add_argument('--seed', default=42, type=int, help='random seed')
    arg_parser.add_argument('--batch_size', default=64, type=int, help='batch size for preprocessing')
    arg_parser.add_argument('--cluster_method', default='random', type=str,
                            choices=['kmeans', 'agglomerative', 'random'], help='clustering method')
    arg_parser.add_argument('--cluster_num', default=2, type=int, help='number of clusters')
    arg_parser.add_argument('--dynamic_num', default=2, type=int, help='number of dynamic shots')
    arg_parser.add_argument('--encoding', default='question', type=str, choices=['question', 'query'],
                            help='according to question or query encoding')
    arg_parser.add_argument('--cot', action='store_true', help='use chain of thought')
    arg_parser.add_argument('--tot_k', default=3, type=int, help='k for tree of thought')
    arg_parser.add_argument('--tot_b', default=1, type=int, help='b for tree of thought')
    arg_parser.add_argument('--tot_t', default=1.5, type=float, help='temperature for tree of thought')
    arg_parser.add_argument('--reflection', action='store_true', help='use self-reflection')
    arg_parser.add_argument('--ref_shot', action='store_true', help='few-shot for self-reflection')
    arg_parser.add_argument('--oracle', action='store_true', help='given queries in the dev dataset')
    arg_parser.add_argument('--two_phase', action='store_true', help='use two phase method')
    arg_parser.add_argument('--hard_and_extra', action='store_true', help='only test hard and extra hard examples')
    arg_parser.add_argument('--content', default=3, type=int, help='number of database records')
    arg_parser.add_argument('--api_doc', action='store_true', help='write schema according to api doc')
    arg_parser.add_argument('--pf', default='eoc', type=str, choices=['no', 'eoc', 'eot'],
                            help='format of primary and foreign keys')
    args = arg_parser.parse_args()
    return args


class PromptMaker:
    def __init__(self, args):
        with open(os.path.join(root, 'data', args.dataset, 'tables.json'), 'r', encoding='utf-8') as file:
            dbs = json.load(file)
        self.db_prompts = {}
        for db in dbs:
            db_id = db['db_id']
            tabs = db['table_names_original']
            cols = db['column_names_original']
            self.db_prompts[db_id] = [''] * (args.content + 1)
            for c_num in range(args.content + 1):
                for i in range(len(tabs)):
                    if args.api_doc:
                        self.db_prompts[db_id][
                            c_num] += f"# {tabs[i]}({', '.join([col[1] for col in cols if col[0] == i])})\n"
                    else:
                        self.db_prompts[db_id][c_num] += f'create table {tabs[i]} (\n'
                        for j in range(len(cols)):
                            if cols[j][0] == i:
                                self.db_prompts[db_id][c_num] += f"    {cols[j][1]} {db['column_types'][j]}"
                                if args.pf == 'eoc':
                                    if j in db['primary_keys']:
                                        self.db_prompts[db_id][c_num] += ' primary key'
                                    for fk in db['foreign_keys']:
                                        if fk[0] == j:
                                            self.db_prompts[db_id][
                                                c_num] += f' references {tabs[cols[fk[1]][0]]}({cols[fk[1]][1]})'
                                self.db_prompts[db_id][c_num] += ',\n'
                        if args.pf == 'eot':
                            pks = [cols[pk][1] for pk in db['primary_keys'] if cols[pk][0] == i]
                            if len(pks) > 0:
                                self.db_prompts[db_id][c_num] += f"    primary key ({', '.join(pks)}),\n"
                            for fk in db['foreign_keys']:
                                if cols[fk[0]][0] == i:
                                    self.db_prompts[db_id][
                                        c_num] += f'    foreign key ({cols[fk[0]][1]}) references {tabs[cols[fk[1]][0]]}({cols[fk[1]][1]}),\n'
                        self.db_prompts[db_id][c_num] = self.db_prompts[db_id][c_num][:-2] + '\n)\n'
                    db_path = os.path.join(root, 'data', args.dataset, 'database', db_id, db_id + '.sqlite')
                    if c_num > 0 and os.path.exists(db_path):
                        conn = sqlite3.connect(db_path)
                        conn.row_factory = dict_factory
                        cursor = conn.cursor()
                        db_contents = cursor.execute(f'SELECT * FROM {tabs[i]} LIMIT {c_num}').fetchall()
                        self.db_prompts[db_id][c_num] += '/*\n'
                        self.db_prompts[db_id][
                            c_num] += f"{len(db_contents)} example row{'s' if len(db_contents) > 1 else ''} from table {tabs[i]}:\n"
                        self.db_prompts[db_id][c_num] += '\t'.join([col[1] for col in cols if col[0] == i]) + '\n'
                        for record in db_contents:
                            self.db_prompts[db_id][c_num] += '\t'.join(
                                [str(record[col[1]]) for col in cols if col[0] == i]) + '\n'
                        self.db_prompts[db_id][c_num] += '*/\n'
                if args.api_doc and args.pf != 'no':
                    self.db_prompts[db_id][
                        c_num] += f"# primary keys = [{', '.join([tabs[cols[pk][0]] + '.' + cols[pk][1] for pk in db['primary_keys']])}]\n"
                    self.db_prompts[db_id][
                        c_num] += f"# foreign keys = [{', '.join([tabs[cols[fk[0]][0]] + '.' + cols[fk[0]][1] + ' = ' + tabs[cols[fk[1]][0]] + '.' + cols[fk[1]][1] for fk in db['foreign_keys']])}]\n"
                self.db_prompts[db_id][c_num] = self.db_prompts[db_id][c_num][:-1]


args = main_args()
prompt_maker = PromptMaker(args=args)
print(prompt_maker)

if __name__ == "__main__":

    sql_list = []
    # 通过cot prompt进行生成 train_spider
    with open("../data/spider/train_spider.json") as train_sample, \
            open("../data/train_data/spider_train.json", 'w') as post_train_sample:

        query_list = json.load(train_sample)
        for item in tqdm(query_list):
            sql_list.append({
                "db_id": item['db_id'],
                "question": item['question'],
                "schema": f"""{prompt_maker.db_prompts[item['db_id']][0]}""",
                "query": item['query'],
                "schema_values": f"""{prompt_maker.db_prompts[item['db_id']][2]}"""
            })
        print(len(sql_list))
        p_sql_list = json.dumps(sql_list, ensure_ascii=False, indent=2)
        post_train_sample.write(p_sql_list)
        post_train_sample.close()
