from janus.models import MultiModalityCausalLM, VLChatProcessor
import os
import torch
import uuid

def compress_sequence(seq, min_run_len=10):
    """
    压缩列表，比如连续的10个以上相同元素压成 '10 * 0'
    """
    if not seq:
        return []

    compressed = []
    prev = seq[0]
    count = 1

    for cur in seq[1:]:
        if cur == prev:
            count += 1
        else:
            if count >= min_run_len:
                compressed.append(f"{count} * {prev}")
            else:
                compressed.extend([str(prev)] * count)
            prev = cur
            count = 1

    # 最后一组
    if count >= min_run_len:
        compressed.append(f"{count} * {prev}")
    else:
        compressed.extend([str(prev)] * count)

    return compressed

def save_debug_info(inputs_embeds, attention_mask, input_ids=None, labels=None, tokenizer=None, save_dir="./experiments/debug_forward"):
    os.makedirs(save_dir, exist_ok=True)
    debug_id = str(uuid.uuid4())[:8]
    save_path = os.path.join(save_dir, f"forward_debug_{debug_id}.txt")

    with open(save_path, "w", encoding="utf-8") as f:
        # --- inputs_embeds信息 ---
        f.write(f"inputs_embeds shape: {inputs_embeds.shape}\n")
        f.write(f"inputs_embeds mean: {inputs_embeds.mean().item():.6f}, std: {inputs_embeds.std().item():.6f}\n")
        f.write(f"inputs_embeds min: {inputs_embeds.min().item():.6f}, max: {inputs_embeds.max().item():.6f}\n\n")

        # --- attention_mask信息 ---
        if attention_mask is not None:
            f.write(f"attention_mask shape: {attention_mask.shape}\n")
            f.write(f"attention_mask sum (valid tokens): {attention_mask.sum().item()}\n")
            mask_list = attention_mask[0].cpu().tolist()
            compressed_mask = compress_sequence(mask_list)
            f.write(f"attention_mask (compressed):\n")
            for i in range(0, len(compressed_mask), 16):
                line = ', '.join(compressed_mask[i:i+16])
                f.write(line + "\n")
            f.write("\n")

        # --- input_ids信息 ---
        if input_ids is not None:
            f.write(f"==== input_ids (ids compressed) ====\n")
            input_ids_list = input_ids[0].cpu().tolist()
            compressed_ids = compress_sequence(input_ids_list)
            for i in range(0, len(compressed_ids), 16):
                line = ', '.join(compressed_ids[i:i+16])
                f.write(line + "\n")
            f.write("\n")

            if tokenizer is not None:
                f.write(f"==== input_ids (decoded) ====\n")
                for i in range(0, len(input_ids_list), 32):
                    token_ids = input_ids_list[i:i+32]
                    decoded = tokenizer.decode(token_ids, skip_special_tokens=False)
                    f.write(f"[{i:04d}-{i+len(token_ids)-1:04d}]: {decoded}\n")
                f.write("\n")

        # --- labels信息 ---
        if labels is not None:
            f.write(f"==== labels (ids compressed) ====\n")
            label_list = labels[0].cpu().tolist()
            compressed_labels = compress_sequence(label_list)
            for i in range(0, len(compressed_labels), 16):
                line = ', '.join(compressed_labels[i:i+16])
                f.write(line + "\n")
            f.write("\n")

            if tokenizer is not None:
                f.write(f"==== labels (decoded) ====\n")
                for i in range(0, len(label_list), 32):
                    token_ids = label_list[i:i+32]
                    decoded = tokenizer.decode(token_ids, skip_special_tokens=False)
                    f.write(f"[{i:04d}-{i+len(token_ids)-1:04d}]: {decoded}\n")
                f.write("\n")

    print(f"[Debug] 保存了forward调试信息: {save_path}")
    
class EyeKnowner(MultiModalityCausalLM):
    def forward(self, input_ids=None, attention_mask=None, pixel_values=None,
                images_seq_mask=None, images_emb_mask=None, labels=None,  **kwargs):
        kwargs.pop('sft_format', None)
        kwargs.pop('loss_mask', None)
        inputs_embeds = None
  
        if pixel_values is not None:
            
            pixel_values = pixel_values.to(dtype=self.dtype)
            inputs_embeds = self.prepare_inputs_embeds(
                input_ids=input_ids,
                pixel_values=pixel_values,
                images_seq_mask=images_seq_mask,
                images_emb_mask=images_emb_mask,
            )
            # save_debug_info(
            #     inputs_embeds=inputs_embeds,
            #     attention_mask=attention_mask,
            #     input_ids=input_ids,
            #     labels=labels,
            #     tokenizer=getattr(self, "tokenizer", None),  # 注意：需要模型里有self.tokenizer
            # )

        if inputs_embeds is not None:
            kwargs.pop("inputs_embeds", None)
            outputs = self.language_model(
                input_ids=None,
                inputs_embeds=inputs_embeds,
                attention_mask=attention_mask,
                labels=labels,
                **kwargs,
            )
        else:
            outputs = self.language_model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels,
                **kwargs,
            )
        # raise
        return outputs
