# run_experiment1.py
import abc
import argparse
import json
import logging
import time
import uuid
from typing import List, Dict, Tuple
import argparse
import concurrent.futures
import re  # <-- 新增
import tiktoken  # <-- 新增
import os  # <-- 新增

import requests
from openai import OpenAI
from tqdm import tqdm

# --- 1. 配置 ---
try:
    from config import (
        AGGREGATION_MODEL_API_BASE, AGGREGATION_MODEL_API_KEY, AGGREGATION_MODEL_NAME, EMBEDDING_MODEL_NAME
)
except ImportError:
    print("错误: 无法从 config.py 导入配置。请确保该文件存在且路径正确。")
    exit()

MEMTREE_API_BASE_URL = "http://127.0.0.1:8060"
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

clients = {}
MAX_INPUT_SIZE = 512
MAX_NEW_TOKENS = 512
MAX_TOKENS_PER_CHUNK = 135  # 句子/块 的token上限


def initialize_clients():
    """在每个子进程中初始化MemTree所需的API客户端"""
    global clients
    if "memtree" not in clients:
        clients["memtree"] = OpenAI(base_url=AGGREGATION_MODEL_API_BASE, api_key=AGGREGATION_MODEL_API_KEY)


def get_client() -> OpenAI:
    """获取当前进程的客户端实例"""
    return clients.get("memtree")


# --- 2. 核心逻辑实现 ---
class BaseMethod(abc.ABC):
    """所有实验方法的抽象基类"""

    def __init__(self, session_id: str, **kwargs):
        self.session_id = session_id
        self.method_name = "base"

    @abc.abstractmethod
    def get_answer(self, dialog_history: List[Dict], query: str) -> Tuple[str, str]:
        pass


class MemTreeMethod(BaseMethod):
    """
    方法：MemTree
    (已修改：支持基于 --split_text 标志的条件切分)
    """

    def __init__(self, session_id: str, **kwargs):
        super().__init__(session_id)
        self.method_name = "memtree"
        self.k = kwargs.get('k', 3)
        self.split_text = kwargs.get('split_text', False)  # <-- 获取切分标志

        if self.split_text:
            # 仅在需要时初始化切分工具
            try:
                self.tokenizer = tiktoken.get_encoding("cl100k_base")
            except Exception:
                logger.warning("cl100k_base 编码器加载失败，回退到 gpt2 编码器。")
                self.tokenizer = tiktoken.get_encoding("gpt2")

    def _get_token_count(self, text: str) -> int:
        """辅助函数：计算token数量"""
        return len(self.tokenizer.encode(text))

    def _split_long_sentence(self, text: str) -> List[str]:
        """
        辅助函数：将超过max_tokens的单个句子/文本块进行硬切分。
        """
        chunks = []
        tokens = self.tokenizer.encode(text)
        for i in range(0, len(tokens), MAX_TOKENS_PER_CHUNK):
            chunk_tokens = tokens[i:i + MAX_TOKENS_PER_CHUNK]
            chunks.append(self.tokenizer.decode(chunk_tokens))
        return chunks

    def get_answer(self, dialog_history: List[str], query: str) -> Tuple[str, str]:
        """
        获取答案的核心逻辑。
        根据 self.split_text 标志选择切分或不切分。
        """

        texts_for_memtree = []

        # --- (修改) 根据 self.split_text 选择处理逻辑 ---
        if self.split_text:
            # --- 逻辑 A: 切分文本 (每句 <= 100 tokens) ---
            for i in range(0, len(dialog_history), 2):
                user_turn = dialog_history[i]
                assistant_turn = dialog_history[i + 1] if i + 1 < len(dialog_history) else ''
                full_turn_text = f"用户: {user_turn}\n助手: {assistant_turn}"

                sentences = [full_turn_text]

                for sentence in sentences:
                    sentence = sentence.strip()
                    if not sentence:
                        continue

                    num_tokens = self._get_token_count(sentence)
                    if num_tokens <= MAX_TOKENS_PER_CHUNK:
                        texts_for_memtree.append(sentence)
                    else:
                        sub_chunks = self._split_long_sentence(sentence)
                        texts_for_memtree.extend(sub_chunks)
        else:
            # --- 逻辑 B: 原始逻辑 (完整回合) ---
            for i in range(0, len(dialog_history), 2):
                user_turn = dialog_history[i]
                assistant_turn = dialog_history[i + 1] if i + 1 < len(dialog_history) else ''
                full_turn_text = f"用户: {user_turn}\n助手: {assistant_turn}"
                if full_turn_text.strip():
                    texts_for_memtree.append(full_turn_text)

        # --- 通用逻辑 ---
        if texts_for_memtree:
            self._batch_update_memtree(texts_for_memtree)

        context = self._retrieve_from_memtree(query)
        prompt = f"请仅根据所提供的上下文回答问题。\n\n上下文：\n{context if context else 'No history available.'}\n\n问题：{query}\n\n答案："
        answer = self._call_llm(prompt, AGGREGATION_MODEL_NAME)
        return answer, context

    def _batch_update_memtree(self, texts_to_update: List[str]):
        """
        批量更新MemTree。
        (修改：参数名改为 texts_to_update 以兼容两种逻辑)
        """
        url = f"{MEMTREE_API_BASE_URL}/v1/memory/sessions/{self.session_id}/batch_update"
        try:
            requests.post(url, json={"texts": texts_to_update}, timeout=1800)
        except requests.exceptions.RequestException as e:
            logger.error(f"MemTree batch update failed for session {self.session_id}: {e}")

    def _retrieve_from_memtree(self, user_input: str) -> str:
        # (此方法保持不变)
        url = f"{MEMTREE_API_BASE_URL}/v1/memory/sessions/{self.session_id}/retrieve"
        try:
            response = requests.post(url, json={"input": user_input, "k": self.k}, timeout=120)
            response.raise_for_status()
            return response.json().get('retrieved_context', "")
        except requests.exceptions.RequestException as e:
            logger.error(f"MemTree retrieve failed for session {self.session_id}: {e}")
            return ""

    def _call_llm(self, prompt: str, model: str) -> str:
        # (此方法保持不变)
        client = get_client()
        response = client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": prompt}],
            max_tokens=MAX_NEW_TOKENS,
        )
        return response.choices[0].message.content.strip()


# --- 3. 并行处理函数 (修改) ---
def process_dialogue(dialogue_tuple: Tuple[Dict, int, bool]) -> Dict:  # <-- (修改) 增加 bool
    """处理单个医疗对话的函数，用于并行化。"""
    dialogue_case, k_value, split_text = dialogue_tuple  # <-- (修改) 解包
    initialize_clients()
    session_id = str(uuid.uuid4())

    # (修改) 将 split_text 传递给方法
    system = MemTreeMethod(session_id, k=k_value, split_text=split_text)

    history = dialogue_case['history']
    query = dialogue_case['current_q']
    conversation_id = dialogue_case['id']
    answer, context = system.get_answer(history, query)
    return {
        "conversation_id": conversation_id,
        "session_id": system.session_id,
        "user_input": query,
        "model_response": answer,
        "history": history,
        "context": context
    }


# --- 4. 主执行流程 (修改) ---
def main():
    # --- 4.1. (新增) Argument Parser ---
    parser = argparse.ArgumentParser(description="Run MemTree evaluation on medical data.")
    parser.add_argument('--input_file', type=str, default="medical_data_v3_eval.json",
                        help="Path to the input dataset.")
    parser.add_argument('--output_dir', type=str, default="output", help="Directory to save results.")
    parser.add_argument('--k_value', type=int, default=3, help="K value for MemTree retrieval.")
    parser.add_argument('--workers', type=int, default=2, help="Number of parallel workers.")
    parser.add_argument('--num_test', type=int, default=100, help="Number of dialogues to test (set to -1 for all).")
    parser.add_argument('--split_text', action='store_true',
                        help="Set this flag to split text into sentence chunks (<= 100 tokens).")

    args = parser.parse_args()

    # --- 4.2. (修改) 使用解析的参数 ---
    DATASET_FILEPATH = args.input_file
    K_VALUE = args.k_value
    WORKERS = args.workers
    NUM_TEST = args.num_test
    SPLIT_TEXT = args.split_text
    OUTPUT_DIR = args.output_dir

    # (新增) 创建输出目录
    os.makedirs(OUTPUT_DIR, exist_ok=True)

    # (修改) 动态生成输出文件名
    split_suffix = "split" if SPLIT_TEXT else "fullturn"
    # (新增) 从输入文件名动态生成输出文件名
    base_input_name = os.path.basename(DATASET_FILEPATH).replace('.json', '')
    OUTPUT_FILEPATH = os.path.join(OUTPUT_DIR, f"{base_input_name}_memtree_k{K_VALUE}_{AGGREGATION_MODEL_NAME}_{EMBEDDING_MODEL_NAME}_{split_suffix}.json")

    start_time = time.time()
    logger.info(f"开始生成任务: k={K_VALUE}, workers={WORKERS}, split_text={SPLIT_TEXT}")
    logger.info(f"输入文件: {DATASET_FILEPATH}")
    logger.info(f"输出文件: {OUTPUT_FILEPATH}")
    logger.info(f"确保 MemTree 服务正在 {MEMTREE_API_BASE_URL} 运行。")

    with open(DATASET_FILEPATH, 'r', encoding='utf-8') as f:
        dialogues = json.load(f)
        if NUM_TEST > 0:
            dialogues = dialogues[:NUM_TEST]

    # (修改) 创建任务列表
    tasks = [(dialogue, K_VALUE, SPLIT_TEXT) for dialogue in dialogues]
    results = []

    with concurrent.futures.ProcessPoolExecutor(max_workers=WORKERS) as executor:
        with tqdm(total=len(tasks), desc=f"使用 {WORKERS} 个进程处理") as pbar:
            future_to_task = {executor.submit(process_dialogue, task): task for task in tasks}
            for future in concurrent.futures.as_completed(future_to_task):
                try:
                    result = future.result()
                    if result:
                        results.append(result)
                except Exception as e:
                    logger.error(f"处理任务时发生错误: {e}")
                pbar.update(1)

    with open(OUTPUT_FILEPATH, 'w', encoding='utf-8') as f_out:
        json.dump(results, f_out, ensure_ascii=False, indent=2)

    total_time = time.time() - start_time
    logger.info(f"为 {len(results)} 个对话生成完毕。")
    logger.info(f"总耗时: {total_time:.2f} 秒。")
    if results:
        logger.info(f"平均每个对话耗时: {total_time / len(results):.2f} 秒。")
    logger.info(f"结果已保存至 {OUTPUT_FILEPATH}")


if __name__ == "__main__":
    main()