package context

import (
	"context"
	"sync"

	"github.com/yaoapp/gou/plan"
	"github.com/yaoapp/gou/store"
	"github.com/yaoapp/yao/openapi/oauth/types"
	traceTypes "github.com/yaoapp/yao/trace/types"
)

// Accept the accept of the request, it will be used to identify the accept of the request.
type Accept string

// Referer the referer of the request, it will be used to identify the referer of the request.
type Referer string

// Client represents the client information from HTTP request
type Client struct {
	Type      string `json:"type,omitempty"`       // Client type: web, android, ios, windows, macos, linux, agent, jssdk
	UserAgent string `json:"user_agent,omitempty"` // Original User-Agent header
	IP        string `json:"ip,omitempty"`         // Client IP address
}

const (
	// AcceptStandard standard response format compatible with OpenAI API and general chat UIs (default)
	AcceptStandard = "standard"

	// AcceptWebCUI web-based CUI format with action request support for Yao Chat User Interface
	AcceptWebCUI = "cui-web"

	// AccepNativeCUI native mobile/tablet CUI format with action request support
	AccepNativeCUI = "cui-native"

	// AcceptDesktopCUI desktop CUI format with action request support
	AcceptDesktopCUI = "cui-desktop"
)

// ValidAccepts is the map of valid accept types
var ValidAccepts = map[string]bool{
	AcceptStandard:   true,
	AcceptWebCUI:     true,
	AccepNativeCUI:   true,
	AcceptDesktopCUI: true,
}

const (
	// RefererAPI request from HTTP API endpoint
	RefererAPI = "api"

	// RefererProcess request from Yao Process call
	RefererProcess = "process"

	// RefererMCP request from MCP (Model Context Protocol) server
	RefererMCP = "mcp"

	// RefererJSSDK request from JavaScript SDK
	RefererJSSDK = "jssdk"

	// RefererAgent request from agent-to-agent recursive call (assistant calling another assistant)
	RefererAgent = "agent"

	// RefererTool request from tool/function execution
	RefererTool = "tool"

	// RefererHook request from hook trigger (on_message, on_error, etc.)
	RefererHook = "hook"

	// RefererSchedule request from scheduled task or cron job
	RefererSchedule = "schedule"

	// RefererScript request from custom script execution
	RefererScript = "script"

	// RefererInternal request from internal system call
	RefererInternal = "internal"
)

// ValidReferers is the map of valid referer types
var ValidReferers = map[string]bool{
	RefererAPI:      true,
	RefererProcess:  true,
	RefererMCP:      true,
	RefererJSSDK:    true,
	RefererAgent:    true,
	RefererTool:     true,
	RefererHook:     true,
	RefererSchedule: true,
	RefererScript:   true,
	RefererInternal: true,
}

const (
	// StackStatusPending stack is created but not started yet
	StackStatusPending = "pending"

	// StackStatusRunning stack is currently executing
	StackStatusRunning = "running"

	// StackStatusCompleted stack completed successfully
	StackStatusCompleted = "completed"

	// StackStatusFailed stack failed with error
	StackStatusFailed = "failed"

	// StackStatusTimeout stack execution timeout
	StackStatusTimeout = "timeout"
)

// ValidStackStatus is the map of valid stack status types
var ValidStackStatus = map[string]bool{
	StackStatusPending:   true,
	StackStatusRunning:   true,
	StackStatusCompleted: true,
	StackStatusFailed:    true,
	StackStatusTimeout:   true,
}

// Interrupt Types and Constants
// ===============================

// InterruptType represents the type of interrupt
type InterruptType string

const (
	// InterruptGraceful waits for current step to complete before handling interrupt
	InterruptGraceful InterruptType = "graceful"

	// InterruptForce immediately cancels current operation and handles interrupt
	InterruptForce InterruptType = "force"
)

// InterruptAction represents the action to take after interrupt is handled
type InterruptAction string

const (
	// InterruptActionContinue appends new messages and continues execution
	InterruptActionContinue InterruptAction = "continue"

	// InterruptActionRestart restarts execution with only new messages
	InterruptActionRestart InterruptAction = "restart"

	// InterruptActionAbort terminates the request
	InterruptActionAbort InterruptAction = "abort"
)

// InterruptSignal represents an interrupt signal with new messages from user
type InterruptSignal struct {
	Type      InterruptType          `json:"type"`               // Interrupt type: graceful or force
	Messages  []Message              `json:"messages"`           // User's new messages (can be multiple)
	Timestamp int64                  `json:"timestamp"`          // Interrupt timestamp in milliseconds
	Metadata  map[string]interface{} `json:"metadata,omitempty"` // Additional metadata
}

// InterruptHandler is the function signature for handling interrupts
// This handler is registered in the InterruptController and called when interrupt signal is received
// Parameters:
//   - ctx: The context being interrupted
//   - signal: The interrupt signal (contains Type and Messages)
//
// Returns:
//   - error: Error if interrupt handling failed
type InterruptHandler func(ctx *Context, signal *InterruptSignal) error

// InterruptController manages interrupt handling for a context
// All interrupt-related fields are encapsulated in this type
type InterruptController struct {
	queue           chan *InterruptSignal `json:"-"` // Queue to receive interrupt signals
	current         *InterruptSignal      `json:"-"` // Current interrupt being processed
	pending         []*InterruptSignal    `json:"-"` // Pending interrupts in queue
	mutex           sync.RWMutex          `json:"-"` // Protects current and pending
	ctx             context.Context       `json:"-"` // Interrupt control context (independent from HTTP context)
	cancel          context.CancelFunc    `json:"-"` // Cancel function for force interrupt
	listenerStarted bool                  `json:"-"` // Whether listener goroutine is started
	handler         InterruptHandler      `json:"-"` // Handler to process interrupt signals
	contextID       string                `json:"-"` // Context ID to retrieve the parent context
}

// Context the context
type Context struct {

	// Context
	context.Context
	ID     string             `json:"id"` // Context ID for external interrupt identification
	Space  plan.Space         `json:"-"`  // Shared data space, it will be used to share data between the request and the call
	Cache  store.Store        `json:"-"`  // Cache store, it will be used to store the message cache, default is "__yao.agent.cache"
	Stack  *Stack             `json:"-"`  // Stack, current active stack of the request
	Stacks map[string]*Stack  `json:"-"`  // Stacks, all stacks in this request (for trace logging)
	Writer Writer             `json:"-"`  // Writer, it will be used to write response data to the client
	trace  traceTypes.Manager `json:"-"`  // Trace manager, lazy initialized on first access

	// Interrupt control (all interrupt-related logic is encapsulated in InterruptController)
	Interrupt *InterruptController `json:"-"` // Interrupt controller for handling user interrupts during streaming

	// Authorized information
	Authorized  *types.AuthorizedInfo `json:"authorized,omitempty"`   // Authorized information
	ChatID      string                `json:"chat_id,omitempty"`      // Chat ID, use to select chat
	AssistantID string                `json:"assistant_id,omitempty"` // Assistant ID, use to select assistant
	Sid         string                `json:"sid" yaml:"-"`           // Session ID (Deprecated, use Authorized instead)
	Connector   string                `json:"connector,omitempty"`    // Connector, use to select the connector of the LLM Model, Default is Assistant.Connector
	Search      *bool                 `json:"search,omitempty"`       // Search mode, default is true

	// Arguments for call
	Args       []interface{} `json:"args,omitempty"`        // Arguments for call, it will be used to pass data to the call
	Retry      bool          `json:"retry,omitempty"`       // Retry mode
	RetryTimes uint8         `json:"retry_times,omitempty"` // Retry times

	// Locale information
	Locale string `json:"locale,omitempty"` // Locale
	Theme  string `json:"theme,omitempty"`  // Theme

	// Request information
	Client  Client `json:"client,omitempty"`  // Client information from HTTP request
	Referer string `json:"referer,omitempty"` // Request source: api, process, mcp, jssdk, agent, tool, hook, schedule, script, internal
	Accept  Accept `json:"accept,omitempty"`  // Response format: standard, cui-web, cui-native, cui-desktop

	// CUI Context information
	Route    string                 `json:"route,omitempty"`    // The route of the request, it will be used to identify the route of the request
	Metadata map[string]interface{} `json:"metadata,omitempty"` // The metadata of the request, it will be used to pass data to the page

	Silent bool `json:"silent,omitempty"` // Silent mode (Deprecated, use Referer instead)
}

// Stack represents the call stack node for tracing agent-to-agent calls
// Uses a flat structure to avoid circular references and memory overhead
type Stack struct {
	// Identity
	ID      string `json:"id"`       // Unique stack node ID, used to identify this specific call
	TraceID string `json:"trace_id"` // Shared trace ID for entire call tree, inherited from root

	// Call context
	AssistantID string `json:"assistant_id"`      // Assistant handling this call
	Referer     string `json:"referer,omitempty"` // Call source: api, agent, tool, process, etc.
	Depth       int    `json:"depth"`             // Call depth in the tree (0=root)

	// Relationships
	ParentID string   `json:"parent_id,omitempty"` // Parent stack ID (empty for root call)
	Path     []string `json:"path"`                // Full path from root: [root_id, parent_id, ..., this_id]

	// Tracking
	CreatedAt   int64  `json:"created_at"`             // Unix timestamp in milliseconds
	CompletedAt *int64 `json:"completed_at,omitempty"` // Unix timestamp when completed (nil if ongoing)
	Status      string `json:"status"`                 // Status: pending, running, completed, failed, timeout
	Error       string `json:"error,omitempty"`        // Error message if failed

	// Metrics
	DurationMs *int64 `json:"duration_ms,omitempty"` // Duration in milliseconds (calculated when completed)
}

// Response the response
// 100% compatible with the OpenAI API
type Response struct {
	Create     *HookCreateResponse   `json:"create,omitempty"`
	MCP        *ResponseHookMCP      `json:"mcp,omitempty"`
	Done       *ResponseHookDone     `json:"done,omitempty"`
	Failback   *ResponseHookFailback `json:"failback,omitempty"`
	Completion *CompletionResponse   `json:"completion,omitempty"`
}

// HookCreateResponse the response of the create hook
type HookCreateResponse struct {

	// Messages to be sent to the assistant
	Messages []Message `json:"messages,omitempty"`

	// Audio configuration (for models that support audio output)
	Audio *AudioConfig `json:"audio,omitempty"`

	// Generation parameters
	Temperature         *float64 `json:"temperature,omitempty"`
	MaxTokens           *int     `json:"max_tokens,omitempty"`
	MaxCompletionTokens *int     `json:"max_completion_tokens,omitempty"`

	// Context adjustments - allow hook to modify context fields
	AssistantID string                 `json:"assistant_id,omitempty"` // Override assistant ID
	Connector   string                 `json:"connector,omitempty"`    // Override connector
	Locale      string                 `json:"locale,omitempty"`       // Override locale
	Theme       string                 `json:"theme,omitempty"`        // Override theme
	Route       string                 `json:"route,omitempty"`        // Override route
	Metadata    map[string]interface{} `json:"metadata,omitempty"`     // Override or merge metadata
}

// ResponseHookDone the response of the done hook
type ResponseHookDone struct{}

// ResponseHookMCP the response of the mcp hook
type ResponseHookMCP struct{}

// ResponseHookFailback the response of the failback hook
type ResponseHookFailback struct{}

// HookInterruptedResponse the response of the interrupted hook
type HookInterruptedResponse struct {
	// Action to take after interrupt is handled
	Action InterruptAction `json:"action"` // continue, restart, or abort

	// Messages to use for next execution (if action is continue or restart)
	Messages []Message `json:"messages,omitempty"`

	// Context adjustments - allow hook to modify context fields
	AssistantID string                 `json:"assistant_id,omitempty"` // Override assistant ID
	Connector   string                 `json:"connector,omitempty"`    // Override connector
	Locale      string                 `json:"locale,omitempty"`       // Override locale
	Theme       string                 `json:"theme,omitempty"`        // Override theme
	Route       string                 `json:"route,omitempty"`        // Override route
	Metadata    map[string]interface{} `json:"metadata,omitempty"`     // Override or merge metadata

	// Notice to send to client
	Notice string `json:"notice,omitempty"` // Message to display to user (e.g., "Processing your new question...")
}

// Message Structure ( OpenAI Chat Completion Input Message Structure, https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages )
// ===============================

// MessageRole represents the role of a message author
type MessageRole string

// Message role constants
const (
	RoleDeveloper MessageRole = "developer" // Developer-provided instructions (o1 models and newer)
	RoleSystem    MessageRole = "system"    // System instructions
	RoleUser      MessageRole = "user"      // User messages
	RoleAssistant MessageRole = "assistant" // Assistant responses
	RoleTool      MessageRole = "tool"      // Tool responses
)

// Message represents a message in the conversation, compatible with OpenAI's chat completion API
// Supports message types: developer, system, user, assistant, and tool
type Message struct {
	// Common fields for all message types
	Role    MessageRole `json:"role"`              // Required: message author role
	Content interface{} `json:"content,omitempty"` // string or array of ContentPart; Required for most types, optional for assistant with tool_calls
	Name    *string     `json:"name,omitempty"`    // Optional: participant name to differentiate between participants of the same role

	// Tool message specific fields
	ToolCallID *string `json:"tool_call_id,omitempty"` // Required for tool messages: tool call that this message is responding to

	// Assistant message specific fields
	ToolCalls []ToolCall `json:"tool_calls,omitempty"` // Optional for assistant: tool calls generated by the model
	Refusal   *string    `json:"refusal,omitempty"`    // Optional for assistant: refusal message (null when not refusing)
}

// ContentPartType represents the type of content part
type ContentPartType string

// Content part type constants
const (
	ContentText       ContentPartType = "text"        // Text content
	ContentImageURL   ContentPartType = "image_url"   // Image URL content (Vision)
	ContentInputAudio ContentPartType = "input_audio" // Input audio content (Audio)
)

// ContentPart represents a part of the message content (for multimodal messages)
// Used when Content is an array instead of a simple string
type ContentPart struct {
	Type       ContentPartType `json:"type"`                  // Required: content part type
	Text       string          `json:"text,omitempty"`        // For type="text": the text content
	ImageURL   *ImageURL       `json:"image_url,omitempty"`   // For type="image_url": the image URL
	InputAudio *InputAudio     `json:"input_audio,omitempty"` // For type="input_audio": the input audio data
}

// ImageDetailLevel represents the detail level for image processing
type ImageDetailLevel string

// Image detail level constants
const (
	DetailAuto ImageDetailLevel = "auto" // Let the model decide
	DetailLow  ImageDetailLevel = "low"  // Low detail (faster, cheaper)
	DetailHigh ImageDetailLevel = "high" // High detail (slower, more expensive)
)

// ImageURL represents an image URL in the message content
type ImageURL struct {
	URL    string           `json:"url"`              // Required: URL of the image or base64 encoded image data
	Detail ImageDetailLevel `json:"detail,omitempty"` // Optional: how the model processes the image
}

// InputAudio represents input audio data in the message content
type InputAudio struct {
	Data   string `json:"data"`   // Required: Base64 encoded audio data
	Format string `json:"format"` // Required: Audio format (e.g., "wav", "mp3")
}

// ToolCallType represents the type of tool call
type ToolCallType string

// Tool call type constants
const (
	ToolTypeFunction ToolCallType = "function" // Function call
)

// ToolCall represents a tool call generated by the model (for assistant messages)
type ToolCall struct {
	ID       string       `json:"id"`       // Required: unique identifier for the tool call
	Type     ToolCallType `json:"type"`     // Required: type of tool call, currently only "function"
	Function Function     `json:"function"` // Required: function call details
}

// Function represents a function call with name and arguments
type Function struct {
	Name      string `json:"name"`                // Required: name of the function to call
	Arguments string `json:"arguments,omitempty"` // Optional: arguments to pass to the function, as a JSON string
}

// Completion Request Structure ( OpenAI Chat Completion Request, https://platform.openai.com/docs/api-reference/chat/create )
// ===============================

// CompletionRequest represents a chat completion request compatible with OpenAI's API
type CompletionRequest struct {
	// Required fields
	Model    string    `json:"model"`    // Required: ID of the model to use
	Messages []Message `json:"messages"` // Required: list of messages comprising the conversation so far

	// Audio configuration (for models that support audio output)
	Audio *AudioConfig `json:"audio,omitempty"` // Optional: audio output configuration

	// Generation parameters
	Temperature         *float64 `json:"temperature,omitempty"`           // Optional: sampling temperature (0-2), defaults to 1
	MaxTokens           *int     `json:"max_tokens,omitempty"`            // Optional: maximum number of tokens to generate (deprecated, use max_completion_tokens)
	MaxCompletionTokens *int     `json:"max_completion_tokens,omitempty"` // Optional: maximum number of tokens that can be generated in the completion

	// Streaming configuration
	Stream        *bool          `json:"stream,omitempty"`         // Optional: if true, stream partial message deltas
	StreamOptions *StreamOptions `json:"stream_options,omitempty"` // Optional: options for streaming response

	// CUI Context information
	Route    string                 `json:"route,omitempty"`    // Optional: route of the request for CUI context
	Metadata map[string]interface{} `json:"metadata,omitempty"` // Optional: metadata to pass to the page for CUI context
}

// AudioConfig represents the audio output configuration for models that support audio
type AudioConfig struct {
	Voice  string `json:"voice"`  // Required: voice to use for audio output (e.g., "alloy", "echo", "fable", "onyx", "nova", "shimmer")
	Format string `json:"format"` // Required: audio output format (e.g., "wav", "mp3", "flac", "opus", "pcm16")
}

// StreamOptions represents options for streaming responses
type StreamOptions struct {
	IncludeUsage bool `json:"include_usage,omitempty"` // If true, include usage statistics in the final chunk
}
