package providers

import (
	"context"

	"xagent/internal/streaming"
)

// LLMProvider defines the interface that all LLM providers must implement
type LLMProvider interface {
	// Complete generates a completion for the given prompt
	Complete(ctx context.Context, prompt string, options *CompletionOptions) (string, error)

	// Chat conducts a conversation with the model
	Chat(ctx context.Context, messages []Message, options *ChatOptions) (string, error)

	// StreamChat conducts a streaming conversation with the model
	StreamChat(ctx context.Context, messages []Message, options *ChatOptions) (<-chan streaming.StreamEvent, error)

	// Embed generates embeddings for the given text
	Embed(ctx context.Context, text string) ([]float32, error)

	// StreamChatWithHandler conducts a streaming conversation with custom handler
	StreamChatWithHandler(ctx context.Context, messages []Message, options *ChatOptions, handler streaming.StreamHandler) error

	// GetConfig returns the provider configuration
	GetConfig() *ProviderConfig

	// GetName returns the name of the provider
	GetName() string
}

// Message represents a chat message
type Message struct {
	Role    string `json:"role"`    // system, user, assistant
	Content string `json:"content"` // message content
}

// StreamResponse represents a chunk of streaming response
type StreamResponse struct {
	Content string
	Done    bool
	Error   error
}

// CompletionOptions contains options for completion requests
type CompletionOptions struct {
	Model           string   // model to use for completion
	MaxTokens       int      // maximum number of tokens to generate
	Temperature     float32  // controls randomness (0.0-2.0)
	TopP            float32  // controls diversity via nucleus sampling
	PresencePenalty float32  // penalizes new tokens based on their presence
	Stop            []string // stop sequences
}

// ChatOptions contains options for chat requests
type ChatOptions struct {
	Model           string   // model to use for chat
	MaxTokens       int      // maximum number of tokens to generate
	Temperature     float32  // controls randomness (0.0-2.0)
	TopP            float32  // controls diversity via nucleus sampling
	PresencePenalty float32  // penalizes new tokens based on their presence
	Stop            []string // stop sequences
}

// ProviderConfig contains configuration for LLM providers
type ProviderConfig struct {
	Type    string                 // provider type (e.g., "openai", "anthropic")
	Model   string                 // default model to use
	APIKey  string                 // API key for authentication
	BaseURL string                 // base URL for API requests
	Headers map[string]string      // additional headers to include in requests
	Options map[string]interface{} // provider-specific options
}

// NewDefaultCompletionOptions creates default completion options
func NewDefaultCompletionOptions() *CompletionOptions {
	return &CompletionOptions{
		Model:           "gpt-3.5-turbo",
		MaxTokens:       2000,
		Temperature:     0.7,
		TopP:            1.0,
		PresencePenalty: 0.0,
		Stop:            []string{},
	}
}

// NewDefaultChatOptions creates default chat options
func NewDefaultChatOptions() *ChatOptions {
	return &ChatOptions{
		Model:           "gpt-3.5-turbo",
		MaxTokens:       2000,
		Temperature:     0.7,
		TopP:            1.0,
		PresencePenalty: 0.0,
		Stop:            []string{},
	}
}
