"""
merge the pre-process datasets  => instruction-tuning data format
"""

import json
import random
import time
from tqdm import tqdm

ROLE_SQL_zh = [
    "你是一个SQL编写专家, 请根据给定的schema、question编写正确的SQL，不做解释。",
    "你是一个数据库专家, 请你根据schema和question编写正确的查询SQL，不做解释。"
]

ROLE_SQL = [
    "Given the database schema, you need to translate the question into the SQL query without explanation.",
    "You are an sqlite SQL programmer. Given the database schema, question, complete sqlite SQL query with no explanation."
]

if __name__ == "__main__":
    s_t = time.time()
    train_list = [
        "../data/train_data/spider_train.json",
        "../data/train_data/xuetong.json",
        "../data/train_data/dusql_train.json"
    ]

    dev_list = [
        "../data/train_data/spider_dev.json",
        "../data/train_data/dusql_dev.json"
    ]

    output_file_t = "../data/train_data/copilot1.05_train.json"
    output_file_d = "../data/train_data/copilot1.0_dev.json"

    copilot_train, copilot_dev = [], []

    for train_f in train_list:
        with open(train_f) as train_sample:
            train_data = json.load(train_sample)
            for idx, _item in tqdm(enumerate(train_data)):
                if "spider" in train_sample:
                    _question = random.choice(ROLE_SQL)
                else:
                    _question = random.choice(ROLE_SQL_zh)
                copilot_train.append({
                    "id": f"{_item['db_id']}_{str(idx)}",
                    "conversations": [
                        {
                            "from": "user",
                            "value": f"{_question}\nDatabase schema:\n{_item['schema']}\nQuestion:{_item['question']}\nSQL:"
                        },
                        {
                            "from": "assistant",
                            "value": _item["query"]
                        }
                    ]
                })

    with open(output_file_t, 'w') as post_train_sample:
        copilot_train = json.dumps(copilot_train, ensure_ascii=False, indent=2)
        post_train_sample.write(copilot_train)
        post_train_sample.close()

    for dev_f in dev_list:
        with open(dev_f) as dev_sample:
            dev_data = json.load(dev_sample)
            for idx, _item in tqdm(enumerate(dev_data)):
                if "spider" in dev_sample:
                    _question = random.choice(ROLE_SQL)
                else:
                    _question = random.choice(ROLE_SQL_zh)
                copilot_dev.append({
                    "id": f"{_item['db_id']}_{str(idx)}",
                    "conversations": [
                        {
                            "from": "user",
                            "value": f"{_question}\nDatabase schema:\n{_item['schema']}\nQuestion:{_item['question']}\nSQL:"
                        },
                        {
                            "from": "assistant",
                            "value": _item["query"]
                        }
                    ]
                })

    with open(output_file_d, 'w') as post_dev_sample:
        copilot_dev = json.dumps(copilot_dev, ensure_ascii=False, indent=2)
        post_dev_sample.write(copilot_dev)
        post_dev_sample.close()

    e_t = time.time()
    print(f"time cost {e_t - s_t}s")
