try:
    # 尝试相对导入（当作为模块运行时）
    from .ctx_manager import ContextManager
    from .es_manager import EnvStateManager
    from .base_llm import ConcurrentLLM
except ImportError:
    # 如果相对导入失败，使用绝对导入（当直接运行文件时）
    import sys
    import os
    # 添加项目根目录到路径
    sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
    from ragen.llm_agent.ctx_manager import ContextManager
    from ragen.llm_agent.es_manager import EnvStateManager
    from ragen.llm_agent.base_llm import ConcurrentLLM

from vllm import LLM, SamplingParams
from verl.single_controller.ray.base import RayWorkerGroup
from transformers import AutoTokenizer, AutoModelForCausalLM
from verl import DataProto
import hydra
import os
from typing import List, Dict
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
import time

# ========================= 文本预处理工具函数 =========================
import json
import re


def _find_balanced_segments(text: str, start_ch: str, end_ch: str):
    """在文本中查找由成对括号包裹的所有平衡片段，返回(start_idx, end_idx, segment_str)列表。
    仅按字符深度匹配，不保证JSON有效性。
    """
    segments = []
    depth = 0
    start_idx = None
    for i, ch in enumerate(text):
        if ch == start_ch:
            if depth == 0:
                start_idx = i
            depth += 1
        elif ch == end_ch and depth > 0:
            depth -= 1
            if depth == 0 and start_idx is not None:
                end_idx = i + 1
                segments.append((start_idx, end_idx, text[start_idx:end_idx]))
                start_idx = None
    return segments


def _extract_largest_json_segment(text: str):
    """从文本中提取最大且可被json.loads解析的JSON片段（对象或数组）。
    返回 (start_idx, end_idx, json_text)。若未找到，返回 (None, None, None)。
    """
    candidates = []
    # 查找对象类型JSON
    for s_idx, e_idx, seg in _find_balanced_segments(text, '{', '}'):
        try:
            json.loads(seg)
            candidates.append((s_idx, e_idx, seg))
        except Exception:
            continue
    # 查找数组类型JSON
    for s_idx, e_idx, seg in _find_balanced_segments(text, '[', ']'):
        try:
            json.loads(seg)
            candidates.append((s_idx, e_idx, seg))
        except Exception:
            continue
    if not candidates:
        return None, None, None
    # 选择长度最大的JSON片段
    candidates.sort(key=lambda x: (x[1] - x[0]), reverse=True)
    return candidates[0]


def _remove_extra_tags_and_answer_blocks(text: str) -> str:
    """移除原文本中的多余标签与<answer>块：
    - 删除所有<answer>...</answer>整块内容（避免非JSON答案混入think）。
    - 删除孤立的<think>和</think>、<answer>和</answer>标签。
    - 归一化空白。
    """
    if not text:
        return ""
    # 移除所有<answer>...</answer>（大小写按小写处理）
    cleaned = re.sub(r"<answer>.*?</answer>", "", text, flags=re.DOTALL)
    # 去除残留的<think>与</think>标签，仅保留其中内容
    cleaned = re.sub(r"</?think>", "", cleaned)
    # 去除孤立的<answer>与</answer>（若存在）
    cleaned = re.sub(r"</?answer>", "", cleaned)
    # 归一化空白
    cleaned = re.sub(r"\s+", " ", cleaned).strip()
    return cleaned


def preprocess_text_into_think_answer(text: str) -> str:
    """将原始回复重排为<think>与<answer>结构：
    - 从原文本中提取“最大JSON”（对象或数组），该JSON保留原始文本格式，放入<answer></answer>。
    - 将该JSON从原文本删除后，对剩余文本移除所有<answer>块及多余标签，作为<think></think>。
    - 若未找到可解析的JSON，则<answer>为空，<think>为去标签清洗后的原文本。
    - 始终只输出一对<think>与<answer>。
    """
    if not isinstance(text, str) or not text:
        return "<think></think><answer></answer>"

    s_idx, e_idx, json_seg = _extract_largest_json_segment(text)
    if s_idx is None:
        think_part = _remove_extra_tags_and_answer_blocks(text)
        return f"<think>{think_part}</think><answer></answer>"

    # 删除JSON片段，得到剩余内容
    remaining = (text[:s_idx] + text[e_idx:])
    think_part = _remove_extra_tags_and_answer_blocks(remaining)
    answer_part = json_seg  # 保留原始JSON文本（不二次格式化）
    return f"<think>{think_part}</think><answer>{answer_part}</answer>"


# ========================= 现有类定义 =========================
class VllmWrapperWg: # Thi is a developing class for eval and test
	def __init__(self, config, tokenizer):
		self.config = config
		self.tokenizer = tokenizer
		model_name = config.actor_rollout_ref.model.path
		ro_config = config.actor_rollout_ref.rollout
		self.llm = LLM(
			model_name,
            enable_sleep_mode=True,
            tensor_parallel_size=ro_config.tensor_model_parallel_size,
            dtype=ro_config.dtype,
            enforce_eager=ro_config.enforce_eager,
            gpu_memory_utilization=ro_config.gpu_memory_utilization,
            disable_custom_all_reduce=True,
            # disable_mm_preprocessor_cache=True,
            skip_tokenizer_init=False,
            max_model_len=ro_config.max_model_len,
            disable_log_stats=ro_config.disable_log_stats,
            max_num_batched_tokens=ro_config.max_num_batched_tokens,
            enable_chunked_prefill=ro_config.enable_chunked_prefill,
            enable_prefix_caching=True,
			trust_remote_code=True,
		)
		print("LLM initialized")
		self.sampling_params = SamplingParams(
			max_tokens=ro_config.response_length,
			temperature=ro_config.val_kwargs.temperature,
			top_p=ro_config.val_kwargs.top_p,
			top_k=ro_config.val_kwargs.top_k,
			# min_p=0.1,
		)

	def generate_sequences(self, lm_inputs: DataProto):
		"""
		Convert the input ids to text, and then generate the sequences. Finally create a dataproto. 
		This aligns with the verl Worker Group interface.
		"""
		# NOTE: free_cache_engine is not used in the vllm wrapper. Only used in the verl vllm.
		# cache_action = lm_inputs.meta_info.get('cache_action', None)

		input_ids = lm_inputs.batch['input_ids']
		input_texts = self.tokenizer.batch_decode(input_ids, skip_special_tokens=False)
		input_texts = [i.replace("<|endoftext|>", "") for i in input_texts]

		outputs = self.llm.generate(input_texts, sampling_params=self.sampling_params)
		texts = [output.outputs[0].text for output in outputs]
		# 文本预处理：提取最大JSON并重排为<think>与<answer>
		texts = [preprocess_text_into_think_answer(t) for t in texts]
		lm_outputs = DataProto()
		lm_outputs.non_tensor_batch = {
			'response_texts': texts,
			'env_ids': lm_inputs.non_tensor_batch['env_ids'],
			'group_ids': lm_inputs.non_tensor_batch['group_ids']
		} # this is a bit hard-coded to bypass the __init__ check in DataProto
		lm_outputs.meta_info = lm_inputs.meta_info

		return lm_outputs
	
class ApiCallingWrapperWg:
    """Wrapper class for API-based LLM calls that fits into the VERL framework"""
    
    def __init__(self, config, tokenizer):
        self.config = config
        self.tokenizer = tokenizer
        model_info = config.model_info[config.model_config.model_name]
        
        # 获取基础的generation_kwargs（不修改原配置）
        self.base_kwargs = dict(model_info.generation_kwargs)
        
        # 获取extra_body参数（阿里云等供应商的特殊参数）
        self.extra_body = {}
        if hasattr(model_info, 'extra_body') and model_info.extra_body:
            self.extra_body = dict(model_info.extra_body)
            print(f"检测到extra_body参数: {self.extra_body}")
        
        # 获取API相关配置
        provider_name = model_info.provider_name
        model_name = model_info.model_name
        api_key = model_info.get('api_key', None)  # 从配置中获取api_key
        base_url = model_info.get('base_url', None)  # 从配置中获取base_url
        max_concurrency = config.model_config.max_concurrency
        
        self.llm = ConcurrentLLM(
            provider=provider_name,
            model_name=model_name,
            api_key=api_key,
            base_url=base_url,
            max_concurrency=max_concurrency
        )
        
        print(f'API-based LLM ({provider_name} - {model_name}) initialized')
        if base_url:
            print(f'Using custom base_url: {base_url}')
        if self.extra_body.get('enable_thinking', False):
            print(f'🧠 思考模式已启用! enable_thinking={self.extra_body["enable_thinking"]}')
            if 'thinking_budget' in self.extra_body:
                print(f'💭 思考预算: {self.extra_body["thinking_budget"]} tokens')


    def generate_sequences(self, lm_inputs: DataProto) -> DataProto:
        """
        Convert the input ids to text, make API calls to generate responses, 
        and create a DataProto with the results.
        """

        messages_list = lm_inputs.non_tensor_batch['messages_list'].tolist()
        
        # 合并基础参数和extra_body参数
        final_kwargs = self.base_kwargs.copy()
        final_kwargs.update(self.extra_body)
        
        # 打印调试信息
        print(f'[DEBUG] 基础参数 (generation_kwargs): {self.base_kwargs}')
        print(f'[DEBUG] Extra Body参数: {self.extra_body}')
        print(f'[DEBUG] 合并后的最终参数: {final_kwargs}')
        
        results, failed_messages = self.llm.run_batch(
            messages_list=messages_list,
            **final_kwargs
        )
        assert not failed_messages, f"Failed to generate responses for the following messages: {failed_messages}"

        texts = [result["response"] for result in results]
        print(f'[DEBUG] 生成的回复数量: {len(texts)}')
        # 如果启用了思考模式，打印第一个回复的前200字符作为示例
        if texts and self.extra_body.get('enable_thinking', False):
            preview = texts[0][:200] + "..." if len(texts[0]) > 200 else texts[0]
            print(f'[DEBUG] 思考模式回复预览: {preview}')
        
        # 文本预处理：提取最大JSON并重排为<think>与<answer>
        texts = [preprocess_text_into_think_answer(t) for t in texts]
        
        lm_outputs = DataProto()
        lm_outputs.non_tensor_batch = {
			'response_texts': texts,
			'env_ids': lm_inputs.non_tensor_batch['env_ids'],
			'group_ids': lm_inputs.non_tensor_batch['group_ids']
		} # this is a bit hard-coded to bypass the __init__ check in DataProto
        lm_outputs.meta_info = lm_inputs.meta_info
        
        return lm_outputs

class LLMAgentProxy:
	"""
	The proxy means the llm agent is trying to generate some rollout **at this time**, **at this model state**, **at this env state from the env config**
	"""
	def __init__(self, config, actor_rollout_wg, tokenizer):
		self.config = config
		self.train_ctx_manager = ContextManager(config, tokenizer, mode="train")
		self.train_es_manager = EnvStateManager(config, mode="train")
		self.val_ctx_manager = ContextManager(config, tokenizer, mode="val")
		self.val_es_manager = EnvStateManager(config, mode="val")
		self.actor_wg = actor_rollout_wg
		self.tokenizer = tokenizer
		

	def generate_sequences(self, lm_inputs: DataProto):
		# TODO: add kv cache both for the vllm wrapper here and for verl vllm.
		max_retries = 3
		retry_delay = 1.0  # seconds
		
		for attempt in range(max_retries + 1):
			try:
				if isinstance(self.actor_wg, RayWorkerGroup):
					padded_lm_inputs, pad_size = pad_dataproto_to_divisor(lm_inputs, self.actor_wg.world_size)
					padded_lm_outputs = self.actor_wg.generate_sequences(padded_lm_inputs)
					lm_outputs = unpad_dataproto(padded_lm_outputs, pad_size=pad_size)
					lm_outputs.meta_info = lm_inputs.meta_info
					lm_outputs.non_tensor_batch = lm_inputs.non_tensor_batch
				elif isinstance(self.actor_wg, VllmWrapperWg) or isinstance(self.actor_wg, ApiCallingWrapperWg):
					lm_outputs = self.actor_wg.generate_sequences(lm_inputs)
				else:
					raise ValueError(f"Unsupported actor worker type: {type(self.actor_wg)}")
				
				return lm_outputs
			except Exception as e:
				if attempt == max_retries:
					print(f"LLMAgentProxy.generate_sequences failed after {max_retries} retries. Last error: {e}")
					raise e
				else:
					print(f"LLMAgentProxy.generate_sequences attempt {attempt + 1} failed: {e}. Retrying in {retry_delay} seconds...")
					time.sleep(retry_delay)
					retry_delay *= 2  # exponential backoff
	
	def rollout(self, dataproto: DataProto, val=False):
		es_manager = self.val_es_manager if val else self.train_es_manager
		ctx_manager = self.val_ctx_manager if val else self.train_ctx_manager
		env_outputs = es_manager.reset()

		# 获取初始的max_turn（当前所有环境中的最大值）
		current_max_turn = self.config.agent_proxy.max_turn

		for i in range(current_max_turn):
			# 动态更新max_turn，因为活跃环境可能已经改变
			current_turn = i + 1  # 回合从1开始
			lm_inputs: DataProto = ctx_manager.get_lm_inputs(env_outputs, prepare_for_update=False, current_turn=current_turn)
			lm_inputs.meta_info = dataproto.meta_info # TODO: setup vllm early stop when max length is reached. make sure this can be done
			lm_outputs: DataProto = self.generate_sequences(lm_inputs)
			
			env_inputs: List[Dict] = ctx_manager.get_env_inputs(lm_outputs)
			for env_input in env_inputs:
				env_input["turn_number"] = current_turn
			env_outputs: List[Dict] = es_manager.step(env_inputs)
			if len(env_outputs) == 0: # all finished
				break
		
		rollout_states = es_manager.get_rollout_states()
		
		rollouts = ctx_manager.formulate_rollouts(rollout_states)
		# self.tokenizer.batch_decode(rollouts.batch['input_ids'], skip_special_tokens=False) # see all the trajectories
		return rollouts

# @hydra.main(version_base=None, config_path="../../config", config_name="base")
# def main(config):
# 	# detect config name from python -m ragen.llm_agent.agent_proxy --config_name frozen_lake
# 	os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
# 	os.environ["CUDA_VISIBLE_DEVICES"] = str(config.system.CUDA_VISIBLE_DEVICES)
# 	tokenizer = AutoTokenizer.from_pretrained(config.actor_rollout_ref.model.path)
# 	actor_wg = VllmWrapperWg(config, tokenizer)
# 	proxy = LLMAgentProxy(config, actor_wg, tokenizer)
# 	import time
# 	for _ in range(3):
# 		start_time = time.time()
# 		rollouts = proxy.rollout(DataProto(batch=None, non_tensor_batch=None, meta_info={'eos_token_id': 151645, 'pad_token_id': 151643, 'recompute_log_prob': False, 'do_sample':config.actor_rollout_ref.rollout.do_sample, 'validate': True}), val=True)
# 		end_time = time.time()
# 		print(f'rollout time: {end_time - start_time} seconds')
# 		# print rollout rewards from the rm_scores
# 		rm_scores = rollouts.batch["rm_scores"]
# 		metrics = rollouts.meta_info["metrics"]
# 		avg_reward = rm_scores.sum(-1).mean().item()
# 		print(f'rollout rewards: {avg_reward}')
# 		print(f'metrics:')
# 		for k, v in metrics.items():
# 			print(f'{k}: {v}')

@hydra.main(version_base=None, config_path="../../config", config_name="evaluate_api_llm")
def main(config):
	# detect config name from python -m ragen.llm_agent.agent_proxy --config_name frozen_lake
	tokenizer = AutoTokenizer.from_pretrained(config.actor_rollout_ref.model.path)
	actor_wg = ApiCallingWrapperWg(config, tokenizer)
	proxy = LLMAgentProxy(config, actor_wg, tokenizer)
	import time
	start_time = time.time()
	rollouts = proxy.rollout(DataProto(batch=None, non_tensor_batch=None, meta_info={'eos_token_id': 151645, 'pad_token_id': 151643, 'recompute_log_prob': False, 'do_sample': False, 'validate': True}), val=True)
	print(f'[DEBUG] rollouts: {rollouts}')
	end_time = time.time()
	print(f'rollout time: {end_time - start_time} seconds')
	# print rollout rewards from the rm_scores
	rm_scores = rollouts.batch["rm_scores"]
	metrics = rollouts.meta_info["metrics"]
	avg_reward = rm_scores.sum(-1).mean().item()
	print(f'rollout rewards: {avg_reward}')
	print(f'metrics:')
	for k, v in metrics.items():
		print(f'{k}: {v}')
	
	# 将保存对话交给调用方（例如 run_agent.py）。此处不再自动保存。


if __name__ == "__main__":
	main()
