import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import GenerationConfig, PreTrainedTokenizer
# from qwen_generation_utils import make_context, decode_tokens, get_stop_words_ids
import transformers
from dataclasses import dataclass, field
from typing import Dict, Optional, List, Tuple
from peft import PeftModelForCausalLM
import json
import random
from tqdm import tqdm
import os
from qwen_generation_utils import make_context, decode_tokens, get_stop_words_ids


@dataclass
class ModelArguments:
    model_name_or_path: Optional[str] = field(default="Qwen/Qwen-7B")
    use_lora: Optional[bool] = field(default=False)


@dataclass
class DataArguments:
    data_path: str = field(
        default=None, metadata={"help": "Path to the training data."}
    )
    eval_data_path: str = field(
        default=None, metadata={"help": "Path to the evaluation data."}
    )
    lazy_preprocess: bool = False
    output_dir: str = field(
        default=None, metadata={"help": "Path to the predicting results."}
    )


@dataclass
class LoraArguments:
    lora_r: int = 64
    lora_alpha: int = 16
    lora_dropout: float = 0.05
    lora_target_modules: List[str] = field(
        default_factory=lambda: ["c_attn", "c_proj", "w1", "w2"]
    )
    lora_weight_path: str = ""
    lora_bias: str = "none"
    q_lora: bool = False


parser = transformers.HfArgumentParser(
    (ModelArguments, DataArguments, LoraArguments)
)
(
    model_args,
    data_args,
    lora_args,
) = parser.parse_args_into_dataclasses()

# To generate attention masks automatically, it is necessary to assign distinct
# token_ids to pad_token and eos_token, and set pad_token_id in the generation_config.

# Set RoPE scaling factor
config = transformers.AutoConfig.from_pretrained(
    model_args.model_name_or_path,
    trust_remote_code=True
)
# 加载tokenizer
tokenizer = AutoTokenizer.from_pretrained(
    model_args.model_name_or_path,
    model_max_length=800,
    padding_side='left',
    trust_remote_code=True
)
tokenizer.pad_token_id = tokenizer.eod_id

model = AutoModelForCausalLM.from_pretrained(
    model_args.model_name_or_path,
    config=config,
    # pad_token_id=tokenizer.pad_token_id,
    device_map="auto",
    trust_remote_code=True
).eval()

if model_args.use_lora:
    model = PeftModelForCausalLM.from_pretrained(model, lora_args.lora_weight_path, device_map="auto",
                                                 torch_dtype=torch.float16)
    model.to(dtype=torch.float16)

model.generation_config = GenerationConfig.from_pretrained(model_args.model_name_or_path,
                                                           pad_token_id=tokenizer.pad_token_id)

# start generate SQL
ROLE_SQL_zh = [
    "你是一个SQL编写专家, 请根据给定的schema、question编写正确的SQL，不做解释。",
    "你是一个数据库专家, 请你根据schema和question编写正确的查询SQL，不做解释。"
]

ROLE_SQL = [
    "Given the database schema, you need to translate the question into the SQL query without explanation.",
    "You are an sqlite SQL programmer. Given the database schema, question, complete sqlite SQL query with no explanation."
]

dev_list = [
    "./data/train_data/spider_dev.json",
    "./data/train_data/dusql_dev.json"
]


def single_infer():
    out_dusql = 'pred_dusql0.sql'
    out_spider = 'pred_spider0.sql'
    spider_res, dusql_res = [], []

    for dev_f in dev_list:
        if "spider" in dev_f:
            is_spider = True
            _role = random.choice(ROLE_SQL)
        else:
            is_spider = False
            _role = random.choice(ROLE_SQL_zh)

        with open(dev_f) as dev_sample:
            dev_data = json.load(dev_sample)

            for idx, _item in tqdm(enumerate(dev_data)):

                response, _ = model.chat(tokenizer,
                                         f"{_role}\nDatabase schema:\n{_item['schema']}\nQuestion:{_item['question']}\nSQL:",
                                         history=None)
                print(response)
                if is_spider:
                    spider_res.append((response, _item['db_id']))
                else:
                    dusql_res.append((response, _item['db_id']))

    print(f"spider: {len(spider_res)}")
    print(f"dusql: {len(dusql_res)}")

    with open(os.path.join(data_args.output_dir, out_dusql), "w") as g:
        for item in dusql_res:
            g.write(f"{item[0]}\t{item[1]}\n")
        g.close()

    with open(os.path.join(data_args.output_dir, out_spider), "w") as g:
        for item in spider_res:
            g.write(f"{item[0]}\t{item[1]}\n")
        g.close()


def batch_infer(batch_size=48):
    out_dusql = 'pred_dusql0.sql'
    out_spider = 'pred_spider0.sql'
    spider_res, dusql_res = [], []

    for dev_f in dev_list:
        if "spider" in dev_f:
            is_spider = True
            _role = random.choice(ROLE_SQL)
        else:
            is_spider = False
            _role = random.choice(ROLE_SQL_zh)

        with open(dev_f) as dev_sample:
            dev_data = json.load(dev_sample)
            total = int(len(dev_data) / batch_size) + 1

            for idx in tqdm(range(total)):
                end_id = batch_size * (idx + 1)
                if end_id > len(dev_data):
                    end_id = len(dev_data)
                ele_input = dev_data[batch_size * idx: end_id]
                batch_raw_text = []

                for _item in ele_input:
                    raw_text, _ = make_context(
                        tokenizer,
                        f"{_role}\nDatabase schema:\n{_item['schema']}\nQuestion:{_item['question']}\nSQL:",
                        system="You are a SQL specialist.",
                        max_window_size=model.generation_config.max_window_size,
                        chat_format=model.generation_config.chat_format,
                    )
                    batch_raw_text.append(raw_text)

                batch_input_ids = tokenizer(batch_raw_text, padding='longest')
                batch_input_ids = torch.LongTensor(batch_input_ids['input_ids']).to(model.device)
                batch_out_ids = model.generate(
                    batch_input_ids,
                    return_dict_in_generate=False,
                    generation_config=model.generation_config
                )
                padding_lens = [batch_input_ids[i].eq(tokenizer.pad_token_id).sum().item() for i in range(batch_input_ids.size(0))]

                batch_response = [
                    (decode_tokens(
                        batch_out_ids[i][padding_lens[i]:],
                        tokenizer,
                        raw_text_len=len(batch_raw_text[i]),
                        context_length=(batch_input_ids[i].size(0) - padding_lens[i]),
                        chat_format="chatml",
                        verbose=False,
                        errors='replace'
                    ), _item['db_id']) for i, _item in enumerate(ele_input)
                ]
                if is_spider:
                    spider_res += batch_response
                else:
                    dusql_res += batch_response
                print(batch_response)

        print(f"spider: {len(spider_res)}")
        print(f"dusql: {len(dusql_res)}")

        with open(os.path.join(data_args.output_dir, out_dusql), "w") as g:
            for item in dusql_res:
                g.write(f"{item[0]}\t{item[1]}\n")
            g.close()

        with open(os.path.join(data_args.output_dir, out_spider), "w") as g:
            for item in spider_res:
                g.write(f"{item[0]}\t{item[1]}\n")
            g.close()


# batch_infer()
single_infer()