import json
import re
import os
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
import torch
from typing import List, Dict, Tuple
from beir.retrieval.search.lexical.elastic_search import ElasticSearch
from tqdm import tqdm
import math
import argparse
parser = argparse.ArgumentParser(description='Run')

parser.add_argument('--cudan', type=int, default=0)
parser.add_argument('--output_file', type=str, default="output")
parser.add_argument('--temperature', type=float, default=0.7)

parser.add_argument('--model_path', type=str, default="<PATH_TO_SAVE>/GRIPRL_LLaMa/global_step_xxx/actor/huggingface")
parser.add_argument('--output_name', type=str, default="GRIPRL_step_xxx")
parser.add_argument('--test_file', type=str, default="WebQ_sample_10")

args = parser.parse_args()
OUTPUT_FILE = f"{args.output_file}/{args.output_name}.jsonl"
INPUT_FILE = f"test_data/formatted/{args.test_file}.jsonl"

# 设备设置：
device = torch.device(f"cuda:{args.cudan}" if torch.cuda.is_available() else "cpu")

# 检索模块 - 优化：复用连接
es_instance = None
def init_es() -> ElasticSearch:
    global es_instance
    if es_instance is None:
        config = {
            'hostname': 'localhost',
            'index_name': 'wiki',
            'keys': {'title': 'title', 'body': 'txt'},
            'timeout': 100,
            'retry_on_timeout': True,
            'number_of_shards': 'default',
            'maxsize': 24,
            'language': 'english',
        }
        es_instance = ElasticSearch(config), config['index_name'], config['keys']['title'], config['keys']['body']
    return es_instance

def ret(question: str) -> str:
    try:
        es, index_name, title_field, body_field = init_es()
        cleaned_query = re.sub(r'[^\w\s]', ' ', question).strip()
        if not cleaned_query:
            cleaned_query = question
        msearch_res = es.lexical_multisearch(texts=[cleaned_query], top_hits=3)
        if not msearch_res or len(msearch_res) == 0 or 'hits' not in msearch_res[0]:
            return ""
        doc_ids = [doc_id for doc_id, _ in msearch_res[0]["hits"]]
        docs = es.es.mget(body={"ids": doc_ids}, index=index_name)["docs"]
        doc_map = {doc["_id"]: doc.get("_source", {}) for doc in docs}
        bodies = [doc_map.get(doc_id, {}).get(body_field, "").strip()
                  for doc_id, _ in msearch_res[0]["hits"] if doc_map.get(doc_id)]
        return '\n'.join(bodies)  # 这里改
    except Exception:
        return ""

# 模型和 tokenizer 路径
base_model_path = args.model_path

# 加载 tokenizer
if "LLaMa" in base_model_path:
    tkn_base_model_path = "meta-llama/Meta-Llama-3-8B"
else:
    tkn_base_model_path = base_model_path
tokenizer = AutoTokenizer.from_pretrained(
     tkn_base_model_path, use_fast=False, trust_remote_code=True
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "left"

# 加载基础模型
model = AutoModelForCausalLM.from_pretrained(
     base_model_path,
     torch_dtype=torch.bfloat16
 )
model.to(device)
model.eval()

def generate_responses(
    questions: List[str], ref_list: List[str] = None, ret_txt_list: List[str] = None,
    max_length: int = 5120, instructions_override: bool = False
) -> List[str]:
    instruction = """
Given the question and previous answers, as well as the following retrieved text, please provide the answer. If you are very confident on your answer, you can provide you answer follow by [ANSWER], and end with [SOLVED]. If you need more external knowledge, you should generate the temp answer follow by [INTERMEDIARY], and end with [RETRIEVE]. Besides, you need to generate the new query based on the original query and current temp answer.

For example:
- Case 1:
Output: [ANSWER] Complete answer [SOLVED]

- Case 2:
Output: [INTERMEDIARY] Partial answer [RETRIEVE] New Query.

The followings are the question you need to solve:
- Original Query:
    {question}

Here is some retrieved relevant information along with some previous responses.
- Intermediary:
    {intermediary}

- Reference Text:
    {reference}
""".strip()
    batch_ref = ref_list if ref_list else [""] * len(questions)
    batch_ret = ret_txt_list if ret_txt_list else [""] * len(questions)
    prompts = []
    for q, ref, rt in zip(questions, batch_ref, batch_ret):
        messages = [{"role": "user", "content": instruction.format(question=q, intermediary=ref, reference=rt)}]
        if instructions_override:
            prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + "[ANSWER]"
        else:
            prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        prompts.append(prompt)

    inputs = tokenizer(prompts, return_tensors="pt", padding=True, truncation=True, max_length=max_length).to(device)
    try:
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=1024,
                temperature=0.7,
                top_p=0.9,
                do_sample=True,
                stop_strings=["<|endoftext|>", "<|end_of_text|>"],
                tokenizer=tokenizer,
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id,
            )
        all_outputs = []
        for i, out in enumerate(outputs):
            text = tokenizer.decode(out, skip_special_tokens=False)
            prompt_len = len(tokenizer.decode(inputs['input_ids'][i], skip_special_tokens=False))
            response = text[prompt_len:].replace("<|endoftext|>", "").replace("<|end_of_text|>", "").strip()
            if "[ANSWER]" not in response and "[INTERMEDIARY]" not in response:
                all_outputs.append("[ANSWER]" + response)
            else:
                all_outputs.append(response)
        return all_outputs
    finally:
        del inputs
        if 'outputs' in locals():
            del outputs
        torch.cuda.empty_cache()

class QuestionState:
    def __init__(self, question: str, index: int):
        self.question = question
        self.index = index
        self.ref = ""
        self.ret_txt = ""
        self.round = 0
        self.is_completed = False
        self.final_answer = ""
        self.completed_round = -1
        self.answers = []
        self.new_querys = []
        self.raw_res = []

def extract_answer_from_response(response: str) -> str:
    """从响应中提取答案内容"""
    # 优先提取[ANSWER]...[SOLVED]之间的内容
    answer_match = re.search(r"\[ANSWER\](.*?)\[SOLVED\]", response, re.DOTALL)
    if answer_match:
        return answer_match.group(1).strip()
    
    # 否则提取[INTERMEDIARY]...[RETRIEVE]之间的内容
    intermediary_match = re.search(r"\[INTERMEDIARY\](.*?)\[RETRIEVE\]", response, re.DOTALL)
    if intermediary_match:
        return intermediary_match.group(1).strip()
    
    # 如果都没有，返回整个响应
    return response.strip()

def extract_retriv_from_response(response: str) -> str:
    """从响应中提取答案内容"""
    # 否则提取[INTERMEDIARY]...[RETRIEVE]之间的内容
    if "[RETRIEVE]" in response:
        retrieve_match = re.search(r"\[RETRIEVE\](.*)", response, re.DOTALL)
        if retrieve_match:
            return retrieve_match.group(1).strip()
        else:
            return ""
    else:
        return ""

def process_batch_round(batch_states: List[QuestionState], current_round: int, max_round_n: int) -> List[QuestionState]:
    active_states = [state for state in batch_states if not state.is_completed]
    if not active_states:
        return batch_states

    questions = [state.question for state in active_states]
    refs = [state.ref for state in active_states]
    ret_txts = [state.ret_txt for state in active_states]
    
    # 根据轮次选择不同的 prompt
    if current_round == max_round_n:
        responses = generate_responses(questions, refs, ret_txts, instructions_override=True)
    else:
        responses = generate_responses(questions, refs, ret_txts)
    retrieve_queries = []
    retrieve_indices = []
    
    for i, (state, response) in enumerate(zip(active_states, responses)):
        # 提取当前轮次的答案并保存到数组
        current_answer = extract_answer_from_response(response)
        current_retriv = extract_retriv_from_response(response)
        state.answers.append(current_answer)
        state.new_querys.append(current_retriv)
        state.raw_res.append(response)
        
        # 检查是否为最终答案
        if response.startswith("[ANSWER]") and "[SOLVED]" in response:
            answer_match = re.search(r"\[ANSWER\](.*?)\[SOLVED\]", response, re.DOTALL)
            if answer_match:
                state.final_answer = answer_match.group(1).strip()
                state.is_completed = True
                state.completed_round = current_round  # 记录完成的轮次
                continue

        # 检查是否包含检索请求
        if "[RETRIEVE]" in response:
            # 提取[INTERMEDIARY]...[RETRIEVE]之间的内容作为新的ref
            intermediary_match = re.search(r"\[INTERMEDIARY\](.*?)\[RETRIEVE\]", response, re.DOTALL)
            if intermediary_match:
                state.ref = "[INTERMEDIARY]" + intermediary_match.group(1) + "[RETRIEVE]"
            else:
                state.ref = response

            # 提取[RETRIEVE]之后的内容作为检索查询
            retrieve_match = re.search(r"\[RETRIEVE\](.*)", response, re.DOTALL)
            if retrieve_match:
                new_query = retrieve_match.group(1).strip()
                if new_query:
                    retrieve_queries.append(new_query)
                    retrieve_indices.append(i)
        else:
            # 没有检索请求但也不是最终答案，更新ref
            state.ref = response
            state.ret_txt = ""

        state.round += 1
    
    # 批量检索 - 添加错误处理
    if retrieve_queries:
        for query, idx in zip(retrieve_queries, retrieve_indices):
            try:
                ret_result = ret(query)
                active_states[idx].ret_txt = ret_result
            except Exception as e:
                print(f"Error retrieving for query '{query}': {str(e)}")
                active_states[idx].ret_txt = ""

    return batch_states


def process_questions(
     input_questions: List[str], output_file, max_rounds: int = 3, batch_size: int = 16
 ) -> List[dict]:
    results = []
    
    # 新增：统计每轮完成的答案数量
    round_completion_counts = [0] * max_rounds  # 索引0对应第1轮，索引1对应第2轮，索引2对应第3轮
    
    # 计算总批次数
    num_batches = math.ceil(len(input_questions) / batch_size)
    
    # 打开文件一次，避免重复打开关闭
    with open(output_file, 'a', encoding='utf-8') as fout:
        # 按批次处理，带进度条
        for batch_idx in tqdm(range(0, len(input_questions), batch_size), 
                            desc="Processing batches", unit="batch", total=num_batches):
            
            # 获取当前批次的数据
            batch_end = min(batch_idx + batch_size, len(input_questions))
            batch_questions = input_questions[batch_idx:batch_end]
            
            # 初始化批次状态
            batch_states = [QuestionState(q, i) for i, q in enumerate(batch_questions)]
            
            # 多轮推理
            for round_idx in range(max_rounds):
                current_round = round_idx + 1  # 1-based round numbering
                batch_states = process_batch_round(batch_states, current_round, max_rounds)
                
                # 检查是否所有问题都已完成
                if all(state.is_completed for state in batch_states):
                    break
            
            # 处理未完成的问题
            for state in batch_states:
                if not state.is_completed:
                    # 提取最终答案
                    if "[INTERMEDIARY]" in state.ref and "[RETRIEVE]" in state.ref:
                        intermediary_match = re.search(r"\[INTERMEDIARY\](.*?)\[RETRIEVE\]", state.ref, re.DOTALL)
                        if intermediary_match:
                            final_answer = intermediary_match.group(1).strip()
                        else:
                            final_answer = state.ref
                    else:
                        final_answer = state.ref
                    
                    # 如果答案数组中还没有这个最终答案，添加进去
                    if final_answer not in state.answers:
                        state.answers.append(final_answer)
                    
                    state.final_answer = final_answer
                    state.completed_round = max_rounds + 1  # 标记为超过最大轮次完成
            
            # 统计当前批次每轮完成的答案数量
            for state in batch_states:
                if 1 <= state.completed_round <= max_rounds:
                    round_completion_counts[state.completed_round - 1] += 1
            
            # 写入当前批次的所有结果
            batch_results = []
            for state in batch_states:
                record = {
                    "Question": state.question, 
                    "Output": state.raw_res,
                    "Intermediate Answer": state.answers,
                    "Retrieved Context": state.new_querys,
                }
                results.append(record)
                batch_results.append(record)
                fout.write(json.dumps(record, ensure_ascii=False) + "\n")
                if (batch_idx // batch_size) % 4 == 0 or batch_end == len(input_questions):
                    fout.flush()
    
    # 输出统计信息
    print("\n" + "="*50)
    print("各轮回答完成统计：")
    print("="*50)
    total_completed = sum(round_completion_counts)
    for i, count in enumerate(round_completion_counts):
        round_num = i + 1
        percentage = (count / len(input_questions)) * 100 if len(input_questions) > 0 else 0
        print(f"第{round_num}轮完成答案数量: {count:>6} ({percentage:>5.1f}%)")
    
    unfinished_count = len(input_questions) - total_completed
    unfinished_percentage = (unfinished_count / len(input_questions)) * 100 if len(input_questions) > 0 else 0
    print(f"超过{max_rounds}轮未完成: {unfinished_count:>6} ({unfinished_percentage:>5.1f}%)")
    print("-" * 50)
    print(f"总问题数量: {len(input_questions):>6}")
    print(f"总完成数量: {total_completed:>6}")
    print("="*50)
    
    return results

# 脚本入口
def main(input_file: str, output_file: str, batch_size: int = 16):
    with open(input_file, 'r', encoding='utf-8') as fin:
        input_questions = [json.loads(line).get("question", "") for line in fin]
    
    print(f"Total questions: {len(input_questions)}")
    print(f"Batch size: {batch_size}")
    print(f"Estimated batches: {math.ceil(len(input_questions) / batch_size)}")
    
    # 测试Elasticsearch连接
    try:
        test_result = ret("test query")
        print(f"Elasticsearch connection test: {'SUCCESS' if test_result is not None else 'FAILED'}")
    except Exception as e:
        print(f"Elasticsearch connection test FAILED: {str(e)}")
        print("Continuing with empty retrieval results...")
        
    process_questions(input_questions, output_file, max_rounds=args.max_rounds, batch_size=batch_size)
    
# 建议减少batch_size以避免内存问题和提高稳定性
main(INPUT_FILE, OUTPUT_FILE, batch_size=8)