import { z } from "zod/v4";

/**
 * Schema for tool/function definitions in ChatML.
 * Used to define available tools for LLM function calling.
 */
const ToolDefinitionSchema = z.object({
  name: z.string(),
  description: z.string().optional(),
  parameters: z.record(z.string(), z.any()).optional(),
});

/**
 * Schema for tool/function call invocations in ChatML.
 * Represents an LLM's request to call a specific tool.
 */
const ToolCallSchema = z.object({
  id: z.string(),
  name: z.string(),
  arguments: z.string(), // JSON string of arguments
  type: z.string().optional(),
  index: z.number().optional(),
});

/**
 * Parsed Langfuse media reference.
 * Extracted from magic string format: @@@langfuseMedia:type=X|id=Y|source=Z@@@
 */
export const ParsedMediaReferenceSchema = z.object({
  type: z.string(),
  id: z.string(),
  source: z.string(),
  referenceString: z.string(),
});
export type ParsedMediaReferenceType = z.infer<
  typeof ParsedMediaReferenceSchema
>;

/**
 * Schema that parses Langfuse media reference magic strings.
 * Format: @@@langfuseMedia:type=image/jpeg|id=<uuid>|source=base64@@@
 *
 * Note: This schema uses transforms that can throw errors during validation
 * if the magic string format is invalid. Always use with try-catch or safeParse.
 */
export const MediaReferenceStringSchema = z
  .string()
  .transform((str, ctx) => {
    // @@@langfuseMedia:type=image/jpeg|id=cc48838a-3da8-4ca4-a007-2cf8df930e69|source=base64@@@
    const magicStringPattern = /^@@@langfuseMedia:(.*)@@@$/;

    const match = str.match(magicStringPattern);
    if (!match) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        message: "Invalid langfuseMedia magic string format",
      });
      return z.NEVER;
    }

    const content = match[1];
    const parts = content.split("|").filter(Boolean);

    const metadata: Record<string, string> = {
      referenceString: str,
    };

    for (const part of parts) {
      const [key, value] = part.split("=");
      if (key && value !== undefined) {
        metadata[key.trim()] = value.trim();
      } else {
        ctx.addIssue({
          code: z.ZodIssueCode.custom,
          message: `Invalid key-value pair: ${part}`,
        });
        return z.NEVER;
      }
    }
    return metadata;
  })
  .pipe(ParsedMediaReferenceSchema);

/**
 * OpenAI input audio content part.
 * Used for audio inputs in multimodal chat completions.
 */
const OpenAIInputAudioContentPart = z.object({
  type: z.literal("input_audio"),
  input_audio: z.object({
    data: MediaReferenceStringSchema,
  }),
});

/**
 * OpenAI text content part.
 * Used in content arrays for text portions of messages.
 * Defined as per https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages
 */
const OpenAITextContentPart = z.object({
  type: z.union([
    z.literal("text"),
    z.literal("input_text"),
    z.literal("output_text"),
  ]),
  text: z.string(),
});
export type OpenAITextContentPartType = z.infer<typeof OpenAITextContentPart>;

/**
 * URL for image content (http/https only).
 */
export const OpenAIUrlImageUrl = z.string().regex(/^https?:/);

/**
 * Base64-encoded image data URL.
 * Supported formats: png, jpeg, jpg, gif, webp
 */
const OpenAIBase64ImageUrl = z
  .string()
  .regex(/^data:image\/(png|jpeg|jpg|gif|webp);base64,/);

/**
 * OpenAI image content part for vision-enabled models.
 * Supports URL, Langfuse media reference, or base64-encoded images.
 */
const OpenAIImageContentPart = z.object({
  type: z.literal("image_url"),
  image_url: z.object({
    url: z.union([
      OpenAIUrlImageUrl,
      MediaReferenceStringSchema,
      OpenAIBase64ImageUrl,
    ]),
    detail: z.enum(["low", "high", "auto"]).optional(), // Controls how the model processes the image. Defaults to "auto". [https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding]
  }),
});
export type OpenAIImageContentPartType = z.infer<typeof OpenAIImageContentPart>;

/**
 * Array of OpenAI content parts (text, image, audio).
 * Used when message content is structured with multiple parts.
 */
export const OpenAIContentParts = z.array(
  z.union([
    OpenAITextContentPart,
    OpenAIImageContentPart,
    OpenAIInputAudioContentPart,
  ]),
);

/**
 * OpenAI output audio schema.
 * Represents audio generated by the model.
 */
const OpenAIOutputAudioSchema = z.object({
  data: MediaReferenceStringSchema,
  transcript: z.string().optional(),
});
export type OpenAIOutputAudioType = z.infer<typeof OpenAIOutputAudioSchema>;

/**
 * OpenAI content schema for message content field.
 * Can be either a simple string or an array of structured content parts.
 * Nullable to support messages without content (e.g., tool call only).
 */
export const OpenAIContentSchema = z
  .union([z.string(), OpenAIContentParts])
  .nullable();
export type OpenAIContentSchema = z.infer<typeof OpenAIContentSchema>;

export const isOpenAITextContentPart = (
  content: any,
): content is z.infer<typeof OpenAITextContentPart> => {
  return OpenAITextContentPart.safeParse(content).success;
};

export const isOpenAIImageContentPart = (
  content: any,
): content is z.infer<typeof OpenAIImageContentPart> => {
  return OpenAIImageContentPart.safeParse(content).success;
};

/**
 * Base ChatML message schema with full OpenAI API support.
 *
 * This schema validates messages from OpenAI's Chat Completions API and similar formats.
 * It includes support for:
 * - Text, image, and audio content (multimodal)
 * - Tool/function calling (definitions and invocations)
 * - Additional framework-specific metadata (via passthrough)
 *
 * Used primarily for frontend rendering where full message structure is needed.
 * For simple content extraction, use SimpleChatMessageSchema instead.
 *
 */
export const BaseChatMlMessageSchema = z
  .object({
    role: z.string().optional(),
    name: z.string().optional(),
    content: z
      .union([
        z.record(z.string(), z.any()),
        z.string(),
        z.array(z.any()),
        OpenAIContentSchema,
      ])
      .nullish(),
    audio: OpenAIOutputAudioSchema.optional(),
    additional_kwargs: z.record(z.string(), z.any()).optional(),
    tools: z.array(ToolDefinitionSchema).optional(),
    tool_calls: z.array(ToolCallSchema).optional(),
    tool_call_id: z.string().optional(),
  })
  .passthrough();

/**
 * Minimal ChatML message schema for backend content extraction.
 *
 * This schema is intentionally permissive and lightweight, designed for:
 * - Fast validation during compact representation extraction
 * - Accepting multiple chat formats (OpenAI, LangChain, Anthropic, etc.)
 * - Avoiding expensive validation of media, audio, and tool structures
 *
 * Only validates the minimal structure needed to extract displayable content:
 * - Presence of content field (string, array, or object)
 * - Optional role field (not strictly required)
 *
 * Use this for backend operations. For frontend rendering, use BaseChatMlMessageSchema.
 */
const SimpleChatMessageSchema = z
  .object({
    role: z.string().optional(),
    content: z
      .union([
        z.string(), // Simple string content
        z.array(z.any()), // Content parts array (text/image/audio)
        z.record(z.string(), z.any()), // Structured content object
      ])
      .nullish(),
  })
  .loose(); // Allow any extra fields

/**
 * Array of minimal ChatML messages.
 * Used for backend extraction - validates array has at least one message.
 */
export const SimpleChatMlArraySchema = z.array(SimpleChatMessageSchema).min(1);
