# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import os
import random
import re
import math
import requests
from openai import OpenAI
from PIL import Image
import json
import base64
import verl.utils.torch_functional as verl_F
from verl.utils.dataset.rl_dataset import RLHFDataset
from verl.utils.model import compute_position_id_with_mask

logger = logging.getLogger(__name__)


class CustomRLHFDataset(RLHFDataset):
    def __getitem__(self, item):
        """
        Note that we also return the raw_input_ids so that it can be combined with other chat template
        """
        # --- START OF FIX: DESERIALIZATION LOGIC ---

        # 1. Get the raw row, which contains JSON strings
        raw_row_dict: dict = self.dataframe[item]
        
        # 2. Create a new dictionary to hold the deserialized data
        row_dict = {}

        # 3. Iterate through all items in the raw row and deserialize where needed
        for key, value in raw_row_dict.items():
            if key in ["prompt", "reward_model", "extra_info"] and isinstance(value, str):
                try:
                    # Parse the JSON string into a Python object (list/dict)
                    row_dict[key] = json.loads(value)
                except json.JSONDecodeError:
                    # If it's not a valid JSON, just keep the original string
                    row_dict[key] = value
            elif key == self.image_key and isinstance(value, str):
                try:
                    # Special handling for the 'images' column
                    images_list_serialized = json.loads(value)
                    deserialized_images = []
                    for img_info in images_list_serialized:
                        # Decode the Base64 string back into bytes
                        img_info["bytes"] = base64.b64decode(img_info["bytes"])
                        deserialized_images.append(img_info)
                    row_dict[key] = deserialized_images
                except (json.JSONDecodeError, TypeError, base64.binascii.Error):
                    # Handle cases where deserialization fails
                    row_dict[key] = None # Or some other default
            else:
                # For all other columns, just copy the value
                row_dict[key] = value
        
        # --- END OF FIX: At this point, `row_dict` is fully deserialized ---


        # Now, the rest of your original logic can proceed without any changes,
        # because `row_dict` has the nested structure it expects.
        
        # This part of your code had the error. It will now work correctly.
        prompt_list = row_dict.get(self.prompt_key, []) # Use .get for safety
        raw_user_content =  prompt_list[1]["content"] if len(prompt_list) == 2 else (prompt_list[0]["content"] if prompt_list else "")
        
        #try to replace "Put answer letter in the \\boxed{}"to "Think first, call **tools** if needed, then answer. Format strictly as:  <think>...</think>  <tool_call>...</tool_call> (if tools needed)  <answer>...</answer> \n"
        user_prompt= re.sub(r"Put answer letter in the \\boxed\{\}", 
                            "Think first, call tools if needed, then answer. Format strictly as:  <think>...</think>  <tool_call>...</tool_call> (if tools needed,must after <think>, you can call it at most once in a turn)  <answer>...</answer> \n Put final answer letter in the \\boxed{}", 
                            raw_user_content)
        row_dict[self.prompt_key] = [
            {
                "role": "system",
                "content": (
                    "You are a helpful assistant. You can call functions to assist with the user query. "
                    "In each turn, you must think first, then decide whether to call a tool or directly answer. "
                    "Put your thinking process in <think>...</think> tags. "
                    "Important: You must call only one function at a time. After each function call, "
                    "wait for the execution result before making the next function call if needed."
                    "You can call fuctions at most 4 times in the whole conversation."                    
                ),
            },
            {
                "role": "user",
                "content":user_prompt,
            },
        ]
        messages = self._build_messages(row_dict)
        model_inputs = {}

        if self.processor is not None:
            raw_prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
            multi_modal_data = {}

            images = None
            if self.image_key in row_dict and row_dict.get(self.image_key, None) is not None:
                # The `bytes` are now correctly decoded, so this will work
                images = [Image.open(io.BytesIO(image["bytes"])) for image in row_dict.pop(self.image_key)]
                multi_modal_data["image"] = images
            else:
                raise ValueError(f"Image data missing or invalid for item {item}.")

            model_inputs = self.processor(text=[raw_prompt], images=images, return_tensors="pt")
            
            input_ids = model_inputs.pop("input_ids")
            attention_mask = model_inputs.pop("attention_mask")

            if "second_per_grid_ts" in model_inputs:
                model_inputs.pop("second_per_grid_ts")
            
            row_dict["multi_modal_data"] = multi_modal_data

            if self.return_multi_modal_inputs:
                row_dict["multi_modal_inputs"] = dict(model_inputs)
                row_dict["multi_modal_inputs"].pop("second_per_grid_ts", None)

        else:
            # (Your original tokenizer logic for text-only models)
            raw_prompt = self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
            model_inputs = self.tokenizer(raw_prompt, return_tensors="pt", add_special_tokens=False)
            input_ids = model_inputs.pop("input_ids")
            attention_mask = model_inputs.pop("attention_mask")

        input_ids, attention_mask = verl_F.postprocess_data(
            input_ids=input_ids,
            attention_mask=attention_mask,
            max_length=self.max_prompt_length,
            pad_token_id=self.tokenizer.pad_token_id,
            left_pad=True,
            truncation=self.truncation,
        )

        if self.processor is not None and "Qwen2VLImageProcessor" in self.processor.image_processor.__class__.__name__:
            from verl.models.transformers.qwen2_vl import get_rope_index
            position_ids = [
                get_rope_index(
                    self.processor,
                    input_ids=input_ids[0],
                    image_grid_thw=model_inputs.get("image_grid_thw"),
                    video_grid_thw=model_inputs.get("video_grid_thw"),
                    second_per_grid_ts=model_inputs.get("second_per_grid_ts"),
                    attention_mask=attention_mask[0],
                )
            ]
        else:
            position_ids = compute_position_id_with_mask(attention_mask)

        row_dict["input_ids"] = input_ids[0]
        row_dict["attention_mask"] = attention_mask[0]
        row_dict["position_ids"] = position_ids[0]

        raw_prompt_ids = self.tokenizer.encode(raw_prompt, add_special_tokens=False)
        if len(raw_prompt_ids) > self.max_prompt_length:
            if self.truncation == "left":
                raw_prompt_ids = raw_prompt_ids[-self.max_prompt_length :]
            elif self.truncation == "right":
                raw_prompt_ids = raw_prompt_ids[: self.max_prompt_length]
            elif self.truncation == "middle":
                left_half = self.max_prompt_length // 2
                right_half = self.max_prompt_length - left_half
                raw_prompt_ids = raw_prompt_ids[:left_half] + raw_prompt_ids[-right_half:]
            elif self.truncation == "error":
                raise RuntimeError(f"Prompt length {len(raw_prompt_ids)} is longer than {self.max_prompt_length}.")

        row_dict["raw_prompt_ids"] = raw_prompt_ids
        #if self.return_raw_chat:
        #    row_dict["raw_prompt"] = messages
#
        #if self.return_full_prompt:
        #    row_dict["full_prompts"] = raw_prompt
        row_dict["raw_prompt"]=messages
        row_dict["full_prompts"]=raw_prompt
        # Now `row_dict['extra_info']` is a dictionary, so this works
        index = row_dict.get("extra_info", {}).get("index", 0)
        

        row_dict["index"] = index
        row_dict["agent_name"] = "tool_agent"
        return row_dict

