package llm

import (
	"context"
	"fmt"
	"sync"
	"time"

	"github.com/google/uuid"
	"github.com/sirupsen/logrus"

	"xagent/internal/knowledge"
)

// ModelProvider represents an LLM provider
type ModelProvider string

const (
	ProviderOpenAI    ModelProvider = "openai"
	ProviderAnthropic ModelProvider = "anthropic"
	ProviderLocal     ModelProvider = "local"
)

// ModelConfig represents configuration for an LLM model
type ModelConfig struct {
	Provider    ModelProvider
	Model       string
	MaxTokens   int
	Temperature float64
	TopP        float64
	ApiKey      string
	BaseURL     string
	Timeout     time.Duration
	RetryCount  int
	RetryDelay  time.Duration
}

// CompletionRequest represents a completion request
type CompletionRequest struct {
	Prompt      string
	MaxTokens   int
	Temperature float64
	TopP        float64
	Stop        []string
	Stream      bool
}

// CompletionResponse represents a completion response
type CompletionResponse struct {
	ID        string
	Text      string
	Usage     TokenUsage
	CreatedAt time.Time
	Error     error
}

// TokenUsage represents token usage information
type TokenUsage struct {
	PromptTokens     int
	CompletionTokens int
	TotalTokens      int
}

// Manager handles LLM interactions and caching
type Manager struct {
	configs   map[string]*ModelConfig
	cache     *Cache
	knowledge *knowledge.RAGEngine
	metrics   *Metrics
	logger    *logrus.Entry
	mutex     sync.RWMutex
}

// Cache represents a response cache
type Cache struct {
	entries map[string]*CacheEntry
	mutex   sync.RWMutex
}

// CacheEntry represents a cached response
type CacheEntry struct {
	Response   *CompletionResponse
	ExpiresAt  time.Time
	AccessedAt time.Time
	HitCount   int
}

// Metrics tracks LLM usage metrics
type Metrics struct {
	RequestCount   int64
	TokensUsed     int64
	CacheHits      int64
	CacheMisses    int64
	AverageLatency time.Duration
	ErrorCount     int64
	mutex          sync.RWMutex
}

// NewManager creates a new LLM manager
func NewManager(knowledge *knowledge.RAGEngine) *Manager {
	return &Manager{
		configs: make(map[string]*ModelConfig),
		cache: &Cache{
			entries: make(map[string]*CacheEntry),
		},
		knowledge: knowledge,
		metrics:   &Metrics{},
		logger:    logrus.WithField("component", "llm_manager"),
	}
}

// AddModel adds a new model configuration
func (m *Manager) AddModel(name string, config *ModelConfig) error {
	m.mutex.Lock()
	defer m.mutex.Unlock()

	if _, exists := m.configs[name]; exists {
		return fmt.Errorf("model %s already exists", name)
	}

	m.configs[name] = config
	return nil
}

// Complete generates a completion using the specified model
func (m *Manager) Complete(ctx context.Context, modelName string, request *CompletionRequest) (*CompletionResponse, error) {
	startTime := time.Now()
	defer func() {
		m.updateMetrics(startTime)
	}()

	// Check cache first
	if cached := m.checkCache(request.Prompt); cached != nil {
		m.metrics.mutex.Lock()
		m.metrics.CacheHits++
		m.metrics.mutex.Unlock()
		return cached.Response, nil
	}

	m.metrics.mutex.Lock()
	m.metrics.CacheMisses++
	m.metrics.mutex.Unlock()

	// Get model config
	config, err := m.getModelConfig(modelName)
	if err != nil {
		return nil, err
	}

	// Enhance prompt with knowledge if available
	if m.knowledge != nil {
		enhancedPrompt, err := m.enhancePrompt(ctx, request.Prompt)
		if err != nil {
			m.logger.WithError(err).Warn("Failed to enhance prompt with knowledge")
		} else {
			request.Prompt = enhancedPrompt
		}
	}

	// Generate completion
	response, err := m.generateCompletion(ctx, config, request)
	if err != nil {
		m.metrics.mutex.Lock()
		m.metrics.ErrorCount++
		m.metrics.mutex.Unlock()
		return nil, err
	}

	// Cache response
	m.cacheResponse(request.Prompt, response)

	return response, nil
}

// getModelConfig retrieves a model configuration
func (m *Manager) getModelConfig(name string) (*ModelConfig, error) {
	m.mutex.RLock()
	defer m.mutex.RUnlock()

	config, exists := m.configs[name]
	if !exists {
		return nil, fmt.Errorf("model %s not found", name)
	}

	return config, nil
}

// enhancePrompt enhances a prompt with relevant knowledge
func (m *Manager) enhancePrompt(ctx context.Context, prompt string) (string, error) {
	relevant, err := m.knowledge.Search(ctx, prompt, 3)
	if err != nil {
		return prompt, err
	}

	enhancedPrompt := prompt
	for _, doc := range relevant {
		enhancedPrompt += "\n\nRelevant context:\n" + doc.Document.Content
	}

	return enhancedPrompt, nil
}

// generateCompletion generates a completion using the specified provider
func (m *Manager) generateCompletion(ctx context.Context, config *ModelConfig, request *CompletionRequest) (*CompletionResponse, error) {
	var response *CompletionResponse

	// Use config parameters for the request
	if request.MaxTokens == 0 {
		request.MaxTokens = config.MaxTokens
	}
	if request.Temperature == 0 {
		request.Temperature = config.Temperature
	}
	if request.TopP == 0 {
		request.TopP = config.TopP
	}

	// TODO: Implement actual provider-specific completion generation
	// This is a placeholder implementation
	response = &CompletionResponse{
		ID:        uuid.NewString(),
		Text:      "Sample response",
		CreatedAt: time.Now(),
		Usage: TokenUsage{
			PromptTokens:     100,
			CompletionTokens: 50,
			TotalTokens:      150,
		},
	}

	m.metrics.mutex.Lock()
	m.metrics.TokensUsed += int64(response.Usage.TotalTokens)
	m.metrics.RequestCount++
	m.metrics.mutex.Unlock()

	return response, nil
}

// checkCache checks if a response is cached
func (m *Manager) checkCache(prompt string) *CacheEntry {
	m.cache.mutex.RLock()
	defer m.cache.mutex.RUnlock()

	entry, exists := m.cache.entries[prompt]
	if !exists || time.Now().After(entry.ExpiresAt) {
		return nil
	}

	entry.AccessedAt = time.Now()
	entry.HitCount++
	return entry
}

// cacheResponse caches a response
func (m *Manager) cacheResponse(prompt string, response *CompletionResponse) {
	m.cache.mutex.Lock()
	defer m.cache.mutex.Unlock()

	m.cache.entries[prompt] = &CacheEntry{
		Response:   response,
		ExpiresAt:  time.Now().Add(1 * time.Hour),
		AccessedAt: time.Now(),
		HitCount:   1,
	}
}

// updateMetrics updates performance metrics
func (m *Manager) updateMetrics(startTime time.Time) {
	duration := time.Since(startTime)

	m.metrics.mutex.Lock()
	defer m.metrics.mutex.Unlock()

	m.metrics.AverageLatency = (m.metrics.AverageLatency + duration) / 2
}

// GetMetrics returns current metrics
func (m *Manager) GetMetrics() *Metrics {
	m.metrics.mutex.RLock()
	defer m.metrics.mutex.RUnlock()

	return &Metrics{
		RequestCount:   m.metrics.RequestCount,
		TokensUsed:     m.metrics.TokensUsed,
		CacheHits:      m.metrics.CacheHits,
		CacheMisses:    m.metrics.CacheMisses,
		AverageLatency: m.metrics.AverageLatency,
		ErrorCount:     m.metrics.ErrorCount,
	}
}

// CleanCache removes expired cache entries
func (m *Manager) CleanCache() {
	m.cache.mutex.Lock()
	defer m.cache.mutex.Unlock()

	now := time.Now()
	for prompt, entry := range m.cache.entries {
		if now.After(entry.ExpiresAt) {
			delete(m.cache.entries, prompt)
		}
	}
}

// GetCacheStats returns cache statistics
func (m *Manager) GetCacheStats() map[string]interface{} {
	m.cache.mutex.RLock()
	defer m.cache.mutex.RUnlock()

	stats := make(map[string]interface{})
	stats["total_entries"] = len(m.cache.entries)

	var totalHits int
	var avgHitCount float64
	for _, entry := range m.cache.entries {
		totalHits += entry.HitCount
	}

	if len(m.cache.entries) > 0 {
		avgHitCount = float64(totalHits) / float64(len(m.cache.entries))
	}

	stats["total_hits"] = totalHits
	stats["average_hits"] = avgHitCount

	return stats
}

// Stream streams completions from the model
func (m *Manager) Stream(ctx context.Context, modelName string, request *CompletionRequest) (<-chan *CompletionResponse, error) {
	responseChan := make(chan *CompletionResponse)

	go func() {
		defer close(responseChan)

		// Get model configuration
		modelConfig, err := m.getModelConfig(modelName)
		if err != nil {
			responseChan <- &CompletionResponse{Error: err}
			return
		}

		// Set up streaming request
		request.Stream = true

		// Call provider's streaming API
		response, err := m.generateCompletion(ctx, modelConfig, request)
		if err != nil {
			responseChan <- &CompletionResponse{Error: err}
			return
		}

		responseChan <- response
	}()

	return responseChan, nil
}

// SetKnowledgeBase sets the knowledge base for prompt enhancement
func (m *Manager) SetKnowledgeBase(kb *knowledge.RAGEngine) {
	m.mutex.Lock()
	defer m.mutex.Unlock()
	m.knowledge = kb
}

// GetSupportedModels returns all supported models
func (m *Manager) GetSupportedModels() []string {
	m.mutex.RLock()
	defer m.mutex.RUnlock()

	models := make([]string, 0, len(m.configs))
	for name := range m.configs {
		models = append(models, name)
	}
	return models
}

// GetModelConfig returns the configuration for a model
func (m *Manager) GetModelConfig(name string) (*ModelConfig, error) {
	return m.getModelConfig(name)
}
