package service

import (
	"bytes"
	"encoding/json"
	"fmt"
	"io"
	"k8s-service/models"
	"net/http"
	"strings"
	"sync"
	"time"

	"github.com/gin-gonic/gin"
)

// ModelConfigCache 模型配置缓存项
type ModelConfigCache struct {
	data     map[string]map[string]*models.ModelConfig // [modelType][modelName]modelConfig
	lastTime time.Time
	mu       sync.RWMutex
	cacheTTL time.Duration
}

// ProxyService 代理服务
type ProxyService struct {
	modelConfigService *ModelConfigService
	configCache        *ModelConfigCache
}

// NewProxyService 创建代理服务实例
func NewProxyService(modelConfigService *ModelConfigService) *ProxyService {
	return &ProxyService{
		modelConfigService: modelConfigService,
		configCache: &ModelConfigCache{
			data:     make(map[string]map[string]*models.ModelConfig),
			cacheTTL: 5 * time.Minute, // 缓存5分钟
		},
	}
}

// ChatCompletionRequest OpenAI 聊天完成请求结构
type ChatCompletionRequest struct {
	Model       string    `json:"model" binding:"required"`
	Messages    []Message `json:"messages" binding:"required"`
	Temperature *float64  `json:"temperature,omitempty"`
	MaxTokens   *int      `json:"max_tokens,omitempty"`
	Stream      *bool     `json:"stream,omitempty"`
}

// Message 聊天消息结构
type Message struct {
	Role    string `json:"role" binding:"required"`
	Content string `json:"content" binding:"required"`
}

// EmbeddingRequest OpenAI 嵌入请求结构
type EmbeddingRequest struct {
	Input interface{} `json:"input" binding:"required"` // 支持string或[]string
	Model string      `json:"model" binding:"required"`
}

// RerankerRequest Reranker 请求结构
type RerankerRequest struct {
	Model     string   `json:"model" binding:"required"`
	Query     string   `json:"query" binding:"required"`
	Documents []string `json:"documents" binding:"required"`
}

// ProcessChatCompletion 处理聊天完成请求的业务逻辑
func (s *ProxyService) ProcessChatCompletion(req *ChatCompletionRequest) (*models.ModelConfig, error) {
	// 从数据库中查找模型配置
	modelConfig, err := s.findModelConfigByName(req.Model, "llm")
	if err != nil {
		return nil, fmt.Errorf("model '%s' not found or not enabled", req.Model)
	}

	return modelConfig, nil
}

// ProcessEmbedding 处理嵌入请求的业务逻辑
func (s *ProxyService) ProcessEmbedding(req *EmbeddingRequest) (*models.ModelConfig, error) {
	// 验证input格式
	if err := s.validateEmbeddingInput(req.Input); err != nil {
		return nil, fmt.Errorf("invalid input format: %v", err)
	}

	// 从数据库中查找嵌入模型配置
	modelConfig, err := s.findModelConfigByName(req.Model, "embedding")
	if err != nil {
		return nil, fmt.Errorf("embedding model '%s' not found or not enabled", req.Model)
	}

	return modelConfig, nil
}

// ProcessRerank 处理重排序请求的业务逻辑
func (s *ProxyService) ProcessRerank(req *RerankerRequest) (*models.ModelConfig, error) {
	// 验证请求参数
	if len(req.Documents) == 0 {
		return nil, fmt.Errorf("documents array cannot be empty")
	}

	// 从数据库中查找重排序模型配置
	modelConfig, err := s.findModelConfigByName(req.Model, "reranker")
	if err != nil {
		return nil, fmt.Errorf("reranker model '%s' not found or not enabled", req.Model)
	}

	return modelConfig, nil
}

// ProxyRequest 代理请求到目标服务
func (s *ProxyService) ProxyRequest(c *gin.Context, modelConfig *models.ModelConfig, requestBody interface{}, timeout time.Duration) error {
	// 序列化请求体
	bodyBytes, err := json.Marshal(requestBody)
	if err != nil {
		return fmt.Errorf("failed to marshal request: %v", err)
	}

	// 创建 HTTP 请求
	httpReq, err := http.NewRequest("POST", modelConfig.ModelPath, bytes.NewBuffer(bodyBytes))
	if err != nil {
		return fmt.Errorf("failed to create request: %v", err)
	}

	// 设置请求头
	httpReq.Header.Set("Content-Type", "application/json")

	// 解析并添加配置中的请求头
	if modelConfig.RequestHeaders != "" {
		var headers map[string]string
		if err := json.Unmarshal([]byte(modelConfig.RequestHeaders), &headers); err == nil {
			for key, value := range headers {
				httpReq.Header.Set(key, value)
			}
		}
	}

	// 创建 HTTP 客户端
	client := &http.Client{
		Timeout: timeout,
	}

	// 发送请求
	resp, err := client.Do(httpReq)
	if err != nil {
		return fmt.Errorf("failed to send request: %v", err)
	}
	defer resp.Body.Close()

	// 检查是否为流式响应
	isStream := false
	if chatReq, ok := requestBody.(*ChatCompletionRequest); ok {
		isStream = chatReq.Stream != nil && *chatReq.Stream
	}
	contentType := resp.Header.Get("Content-Type")
	if strings.Contains(contentType, "text/event-stream") {
		isStream = true
	}

	// 设置响应头
	for key, values := range resp.Header {
		for _, value := range values {
			c.Header(key, value)
		}
	}
	c.Status(resp.StatusCode)

	if isStream {
		// 流式响应 - 直接转发数据流
		return s.streamResponse(c, resp)
	} else {
		// 非流式响应 - 读取完整响应
		return s.regularResponse(c, resp)
	}
}

// findModelConfigByName 根据模型名称和类型查找配置（支持缓存）
func (s *ProxyService) findModelConfigByName(modelName, modelType string) (*models.ModelConfig, error) {
	// 先检查缓存
	if config := s.getCachedModelConfig(modelType, modelName); config != nil {
		return config, nil
	}

	// 缓存未命中，从数据库获取所有启用的模型配置
	configs, err := s.modelConfigService.GetEnabledModelConfigs()

	if err != nil {
		return nil, fmt.Errorf("failed to get model configs: %v", err)
	}

	// 更新缓存
	s.updateConfigCache(configs)

	// 查找匹配的模型
	for _, config := range configs {
		// 检查模型类型是否匹配
		if config.ModelType != modelType {
			continue
		}

		// 检查模型名称是否匹配
		if config.ModelName != nil && *config.ModelName == modelName {
			return &config, nil
		}

		// 如果没有设置 ModelName，使用 VendorEN 作为模型名称进行匹配
		if config.ModelName == nil && config.VendorEN == modelName {
			return &config, nil
		}
	}

	return nil, fmt.Errorf("model not found: %s", modelName)
}

// getCachedModelConfig 从缓存中获取模型配置
func (s *ProxyService) getCachedModelConfig(modelType, modelName string) *models.ModelConfig {
	s.configCache.mu.RLock()
	defer s.configCache.mu.RUnlock()

	// 检查缓存是否过期
	if time.Since(s.configCache.lastTime) > s.configCache.cacheTTL {
		return nil
	}

	if typeMap, exists := s.configCache.data[modelType]; exists {
		if config, found := typeMap[modelName]; found {
			return config
		}
	}

	return nil
}

// updateConfigCache 更新配置缓存
func (s *ProxyService) updateConfigCache(configs []models.ModelConfig) {
	s.configCache.mu.Lock()
	defer s.configCache.mu.Unlock()

	// 清空旧缓存
	s.configCache.data = make(map[string]map[string]*models.ModelConfig)

	// 重建缓存
	for i := range configs {
		modelType := configs[i].ModelType
		if _, exists := s.configCache.data[modelType]; !exists {
			s.configCache.data[modelType] = make(map[string]*models.ModelConfig)
		}

		// 使用 ModelName 作为键
		if configs[i].ModelName != nil {
			s.configCache.data[modelType][*configs[i].ModelName] = &configs[i]
		}

		// 如果没有 ModelName，使用 VendorEN 作为键
		if configs[i].ModelName == nil {
			s.configCache.data[modelType][configs[i].VendorEN] = &configs[i]
		}
	}

	s.configCache.lastTime = time.Now()
}

// InvalidateCache 清除缓存（在模型配置更新时调用）
func (s *ProxyService) InvalidateCache() {
	s.configCache.mu.Lock()
	defer s.configCache.mu.Unlock()

	s.configCache.data = make(map[string]map[string]*models.ModelConfig)
	s.configCache.lastTime = time.Time{}
}

// validateEmbeddingInput 验证嵌入输入格式
func (s *ProxyService) validateEmbeddingInput(input interface{}) error {
	switch v := input.(type) {
	case string:
		if v == "" {
			return fmt.Errorf("input string cannot be empty")
		}
		return nil
	case []interface{}:
		if len(v) == 0 {
			return fmt.Errorf("input array cannot be empty")
		}
		for i, item := range v {
			if str, ok := item.(string); !ok || str == "" {
				return fmt.Errorf("input array item at index %d must be a non-empty string", i)
			}
		}
		return nil
	case []string:
		if len(v) == 0 {
			return fmt.Errorf("input array cannot be empty")
		}
		for i, str := range v {
			if str == "" {
				return fmt.Errorf("input array item at index %d cannot be empty", i)
			}
		}
		return nil
	default:
		return fmt.Errorf("input must be a string or an array of strings")
	}
}

// streamResponse 处理流式响应
func (s *ProxyService) streamResponse(c *gin.Context, resp *http.Response) error {
	// 设置流式响应头
	c.Header("Content-Type", "text/event-stream")
	c.Header("Cache-Control", "no-cache")
	c.Header("Connection", "keep-alive")
	c.Header("Access-Control-Allow-Origin", "*")

	// 获取响应写入器
	writer := c.Writer
	flusher, ok := writer.(http.Flusher)
	if !ok {
		return fmt.Errorf("streaming not supported")
	}

	// 创建缓冲区读取器
	buffer := make([]byte, 1024)

	for {
		n, err := resp.Body.Read(buffer)
		if n > 0 {
			// 写入数据到客户端
			if _, writeErr := writer.Write(buffer[:n]); writeErr != nil {
				return fmt.Errorf("failed to write response: %v", writeErr)
			}
			flusher.Flush()
		}

		if err == io.EOF {
			break
		}
		if err != nil {
			return fmt.Errorf("failed to read response: %v", err)
		}
	}

	return nil
}

// regularResponse 处理常规响应
func (s *ProxyService) regularResponse(c *gin.Context, resp *http.Response) error {
	// 读取响应体
	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return fmt.Errorf("failed to read response: %v", err)
	}

	// 写入响应
	if _, err := c.Writer.Write(body); err != nil {
		return fmt.Errorf("failed to write response: %v", err)
	}

	return nil
}
