package providers

import (
	"context"
	"xagent/internal/streaming"
)

// MockLLM provides a mock implementation of LLMProvider for testing
type MockLLM struct {
	responses map[string]string
}

// NewMockLLM creates a new mock LLM provider
func NewMockLLM() *MockLLM {
	return &MockLLM{
		responses: make(map[string]string),
	}
}

// SetResponse sets a mock response for a given prompt
func (m *MockLLM) SetResponse(prompt, response string) {
	m.responses[prompt] = response
}

// Complete implements LLMProvider
func (m *MockLLM) Complete(ctx context.Context, prompt string, options *CompletionOptions) (string, error) {
	if response, ok := m.responses[prompt]; ok {
		return response, nil
	}
	return "Mock response", nil
}

// Chat implements LLMProvider
func (m *MockLLM) Chat(ctx context.Context, messages []Message, options *ChatOptions) (string, error) {
	return "Mock chat response", nil
}

// StreamChat implements LLMProvider
func (m *MockLLM) StreamChat(ctx context.Context, messages []Message, options *ChatOptions) (<-chan StreamResponse, error) {
	ch := make(chan StreamResponse)
	go func() {
		defer close(ch)
		ch <- StreamResponse{
			Content: "Mock streaming response",
			Done:    true,
		}
	}()
	return ch, nil
}

// Embed implements LLMProvider
func (m *MockLLM) Embed(ctx context.Context, text string) ([]float32, error) {
	return []float32{0.1, 0.2, 0.3}, nil
}

// StreamChatWithHandler implements LLMProvider
func (m *MockLLM) StreamChatWithHandler(ctx context.Context, messages []Message, options *ChatOptions, handler streaming.StreamHandler) error {
	return handler.OnEvent(&streaming.StreamEvent{
		Type:    "token",
		Content: "Mock streaming response",
		Done:    false,
	})
}

// GetConfig implements LLMProvider
func (m *MockLLM) GetConfig() *ProviderConfig {
	return &ProviderConfig{
		Type:  "mock",
		Model: "mock-model",
	}
}
