import aiohttp
from tqdm import tqdm
import asyncio
from argparse import ArgumentParser
import evaluate
import os
import json
from datasets import load_dataset

from config import evaluate_dir, data_path, seed
from rag_util import search_similar


def naive_prompt(example: dict) -> str:
    prompt = example.get("input", "")
    if len(prompt) == 0:
        prompt = example.get("instruction", "")
    return prompt


EMBD_MODEL_NAME, EMBD_URL = None, None


def rag_prompt(example: dict) -> str:
    base_prompt = naive_prompt(example)
    assert EMBD_MODEL_NAME is not None and EMBD_URL is not None
    results = search_similar(
        base_prompt, top_k=3, embedding_url=EMBD_URL, model_name=EMBD_MODEL_NAME
    )
    content = "\n\n".join([f"[{r['file']}] {r['content']}" for r in results])
    prompt = f"""你是一名专业的中国法律助手。以下是与问题相关的法律条文：
{content}
请根据这些条文回答下列问题：
{base_prompt}
"""
    return prompt


async def async_query(
    session: aiohttp.ClientSession, url: str, model_name: str, prompt: str
) -> str:
    payload = {
        "model": model_name,
        "messages": [
            {"role": "system", "content": "You are a helpful Legal Assistant."},
            {"role": "user", "content": prompt},
        ],
        "temperature": 0.7,
        "top_p": 0.9,
        "max_tokens": 768,
        "stream": False,
        "chat_template_kwargs": {"enable_thinking": False},
    }

    try:
        async with session.post(url, json=payload, timeout=60) as response:
            response.raise_for_status()
            data = await response.json()
            pred = data["choices"][0]["message"]["content"].strip()
            return pred
    except Exception as e:
        print(f"⚠️ Request failed: {prompt[:30]}... | Error: {e}")
        return ""


async def request_one_example(session, prompt_func, example, url, model_name):
    prompt = prompt_func(example)
    pred = await async_query(session, url, model_name, prompt)
    return dict(example, prediction=pred.strip())


async def generate_responses_async(args, dataset, max_concurrency=50):
    results = []
    if args.rag:
        prompt_func = rag_prompt
        global EMBD_MODEL_NAME, EMBD_URL
        EMBD_MODEL_NAME = args.embd_model_name
        EMBD_URL = args.embd_url
    else:
        prompt_func = naive_prompt

    semaphore = asyncio.Semaphore(max_concurrency)
    results = []
    total = len(dataset)
    async with aiohttp.ClientSession() as session:

        async def sem_task(example):
            async with semaphore:
                return await request_one_example(
                    session, prompt_func, example, args.url, args.model_name
                )

        tasks = [sem_task(ex) for ex in dataset]
        for future in tqdm(
            asyncio.as_completed(tasks), total=total, desc="Generating (async)"
        ):
            result = await future
            results.append(result)
    return results


def generate_responses(args, dataset, max_concurrency=50):
    return asyncio.run(generate_responses_async(args, dataset, max_concurrency))


def evaluate_with_metrics(generations: list, references: list):
    bleu = evaluate.load("sacrebleu")
    rouge = evaluate.load("rouge")

    bleu_scores = bleu.compute(
        predictions=generations, references=[[r] for r in references]
    )
    rouge_scores = rouge.compute(predictions=generations, references=references)
    return bleu_scores, rouge_scores


def load_test_dataset(test_ratio):
    dataset = load_dataset("json", data_files=data_path)["train"]
    split_dataset = dataset.train_test_split(test_size=test_ratio / 100, seed=seed)
    # train_dataset = split_dataset["train"]
    test_dataset = split_dataset["test"]
    print(f"Test samples: {len(test_dataset)}")
    return test_dataset


def generate_results(args, res_filename):
    test_dataset = load_test_dataset(args.test_ratio)
    results = generate_responses(args, test_dataset)
    with open(os.path.join(evaluate_dir, res_filename), "w", encoding="utf-8") as f:
        json.dump(results, f, ensure_ascii=False, indent=4)
    print(f"✅ Predictions saved to {res_filename}")


def main():
    args = get_args()
    if args.rag:
        score_filename = f"{args.model_name}_rag.txt"
        res_filename = f"{args.model_name}_rag_predictions.json"
    else:
        score_filename = f"{args.model_name}.txt"
        res_filename = f"{args.model_name}_predictions.json"
    os.makedirs(evaluate_dir, exist_ok=True)
    if args.eval:
        assert os.path.exists(
            os.path.join(evaluate_dir, res_filename)
        ), "请先生成预测结果"
        with open(os.path.join(evaluate_dir, res_filename), "r", encoding="utf-8") as f:
            results = json.load(f)
        gens, refs = [], []
        for r in results:
            gens.append(r["prediction"])
            refs.append(r["output"])
        bleu_scores, rouge_scores = evaluate_with_metrics(gens, refs)

        print("==== Evaluation Results ====")
        print(f"BLEU: {bleu_scores['score']:.4f}")
        print(f"ROUGE-L: {rouge_scores['rougeL']:.4f}")
        with open(
            os.path.join(evaluate_dir, score_filename), "w", encoding="utf-8"
        ) as f:
            f.write("==== Evaluation Results ====\n")
            f.write(f"BLEU: {bleu_scores['score']:.4f}\n")
            f.write(f"ROUGE-L: {rouge_scores['rougeL']:.4f}\n")
    else:
        generate_results(args, res_filename)


def get_args():
    parser = ArgumentParser()
    parser.add_argument("--model_name", type=str, default="chat")
    parser.add_argument(
        "--url", type=str, default="http://0.0.0.0:10003/v1/chat/completions"
    )
    parser.add_argument("--embd_model_name", type=str, default="chat")
    parser.add_argument(
        "--embd_url", type=str, default="http://0.0.0.0:10001/v1/embeddings"
    )
    parser.add_argument("--rag", action="store_true", default=False)
    parser.add_argument("--eval", action="store_true", default=False)
    parser.add_argument(
        "--test_ratio",
        type=float,
        default=10.0,
        help="percentage of data to use for testing",
    )
    return parser.parse_args()


if __name__ == "__main__":
    main()
