package embedding

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"io"
	"net/http"
	"strings"
	"sync"
	"time"

	"github.com/go-redis/redis/v8"
	"go-file-perception-model/internal/logger"
	"go.uber.org/zap"
)

// BGEModel BGE嵌入模型
type BGEModel struct {
	modelPath  string
	batchSize  int
	maxTokens  int
	httpClient *http.Client
	redis      *redis.Client
	cacheMutex sync.Mutex
	config     *BGEServiceConfig
}

// BGEServiceConfig BGE服务配置
type BGEServiceConfig struct {
	Host    string
	Port    int
	APIKey  string
	Timeout int
}

// EmbeddingRequest BGE服务请求结构
type EmbeddingRequest struct {
	Texts []string `json:"texts"`
}

// EmbeddingResponse BGE服务响应结构
type EmbeddingResponse struct {
	Embeddings [][]float32 `json:"embeddings"`
}

// NewBGEModel 创建BGE嵌入模型实例
func NewBGEModel(bgeConfig *BGEServiceConfig) (*BGEModel, error) {
    // 项目不使用 Redis，直接禁用缓存
    var rdb *redis.Client = nil

	// 创建HTTP客户端
	httpClient := &http.Client{
		Timeout: time.Duration(bgeConfig.Timeout) * time.Second,
	}

	return &BGEModel{
		modelPath:  "", // 模型路径由Python服务管理
		batchSize:  32, // 默认批处理大小
		maxTokens:  512, // 默认最大令牌数
		httpClient: httpClient,
		redis:      rdb,
		config:     bgeConfig,
	}, nil
}

// Embedding 获取文本的嵌入向量
func (m *BGEModel) Embedding(ctx context.Context, text string) ([]float32, error) {
	fmt.Printf("[BGEModel] Starting embedding for text: %.50s...\n", text)
	
	// 清理文本
	text = m.cleanText(text)
	if text == "" {
		fmt.Printf("[BGEModel] Error: Text is empty after cleaning\n")
		return nil, fmt.Errorf("text is empty after cleaning")
	}

	// 检查缓存
	if m.redis != nil {
		if cached, err := m.getFromCache(ctx, text); err == nil {
			fmt.Printf("[BGEModel] Retrieved embedding from cache\n")
			return cached, nil
		}
	}

	// 调用嵌入模型API
	requestBody := EmbeddingRequest{
		Texts: []string{text},
	}

	requestBytes, err := json.Marshal(requestBody)
	if err != nil {
		fmt.Printf("[BGEModel] Error marshaling request: %v\n", err)
		return nil, fmt.Errorf("failed to marshal request: %w", err)
	}

	// 构建请求URL
	url := fmt.Sprintf("http://%s:%d/v1/embeddings", m.config.Host, m.config.Port)
	fmt.Printf("[BGEModel] Sending request to: %s\n", url)
	
	req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(requestBytes))
	if err != nil {
		fmt.Printf("[BGEModel] Error creating request: %v\n", err)
		return nil, fmt.Errorf("failed to create request: %w", err)
	}

	// 设置请求头
	req.Header.Set("Content-Type", "application/json")
	req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", m.config.APIKey))

	resp, err := m.httpClient.Do(req)
	if err != nil {
		fmt.Printf("[BGEModel] Error sending request: %v\n", err)
		return nil, fmt.Errorf("failed to send request: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		body, _ := io.ReadAll(resp.Body)
		fmt.Printf("[BGEModel] Error response: status code %d, body: %s\n", resp.StatusCode, string(body))
		return nil, fmt.Errorf("failed to create embedding: status code: %d, body: %s", resp.StatusCode, string(body))
	}

	var response EmbeddingResponse
	if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
		fmt.Printf("[BGEModel] Error decoding response: %v\n", err)
		return nil, fmt.Errorf("failed to decode response: %w", err)
	}

	if len(response.Embeddings) == 0 {
		fmt.Printf("[BGEModel] Error: No embedding data returned\n")
		return nil, fmt.Errorf("no embedding data returned")
	}

	// 获取嵌入向量
	vector := response.Embeddings[0]
	fmt.Printf("[BGEModel] Successfully generated embedding with dimensions: %d\n", len(vector))

	// 缓存结果
	if m.redis != nil {
		if err := m.setToCache(ctx, text, vector); err != nil {
			fmt.Printf("[BGEModel] Warning: Failed to cache embedding: %v\n", err)
		}
	}

	return vector, nil
}

// BatchEmbedding 批量获取文本的嵌入向量
func (m *BGEModel) BatchEmbedding(ctx context.Context, texts []string) ([][]float32, error) {
	// 清理文本
	cleanTexts := make([]string, 0, len(texts))
	for _, text := range texts {
		cleaned := m.cleanText(text)
		if cleaned != "" {
			cleanTexts = append(cleanTexts, cleaned)
		}
	}

	if len(cleanTexts) == 0 {
		return nil, fmt.Errorf("no valid texts after cleaning")
	}

	// 分批处理
	var allEmbeddings [][]float32
	for i := 0; i < len(cleanTexts); i += m.batchSize {
		end := i + m.batchSize
		if end > len(cleanTexts) {
			end = len(cleanTexts)
		}

		batch := cleanTexts[i:end]

		// 检查缓存
		batchEmbeddings := make([][]float32, 0, len(batch))
		uncachedTexts := make([]string, 0)
		uncachedIndices := make([]int, 0)

		if m.redis != nil {
			for j, text := range batch {
				if cached, err := m.getFromCache(ctx, text); err == nil {
					batchEmbeddings = append(batchEmbeddings, cached)
				} else {
					uncachedTexts = append(uncachedTexts, text)
					uncachedIndices = append(uncachedIndices, j)
				}
			}
		} else {
			uncachedTexts = batch
			uncachedIndices = make([]int, len(batch))
			for j := range batch {
				uncachedIndices[j] = j
			}
		}

		// 处理未缓存的文本
		if len(uncachedTexts) > 0 {
			// 调用嵌入模型API
			requestBody := EmbeddingRequest{
				Texts: uncachedTexts,
			}

			requestBytes, err := json.Marshal(requestBody)
			if err != nil {
				return nil, fmt.Errorf("failed to marshal request: %w", err)
			}

			// 构建请求URL
			url := fmt.Sprintf("http://%s:%d/v1/embeddings", m.config.Host, m.config.Port)
			req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(requestBytes))
			if err != nil {
				return nil, fmt.Errorf("failed to create request: %w", err)
			}

			// 设置请求头
			req.Header.Set("Content-Type", "application/json")
			req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", m.config.APIKey))

			resp, err := m.httpClient.Do(req)
			if err != nil {
				return nil, fmt.Errorf("failed to send request: %w", err)
			}
			defer resp.Body.Close()

			if resp.StatusCode != http.StatusOK {
				body, _ := io.ReadAll(resp.Body)
				return nil, fmt.Errorf("failed to create batch embeddings: status code: %d, body: %s", resp.StatusCode, string(body))
			}

			var response EmbeddingResponse
			if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
				return nil, fmt.Errorf("failed to decode response: %w", err)
			}

			// 合并结果
			for j, embedding := range response.Embeddings {
				index := uncachedIndices[j]
				for len(batchEmbeddings) <= index {
					batchEmbeddings = append(batchEmbeddings, nil)
				}
				batchEmbeddings[index] = embedding

				// 缓存结果
				if m.redis != nil {
					if err := m.setToCache(ctx, uncachedTexts[j], embedding); err != nil {
						logger.Warn("Failed to cache embedding", zap.Error(err))
					}
				}
			}
		}

		allEmbeddings = append(allEmbeddings, batchEmbeddings...)
	}

	return allEmbeddings, nil
}

// cleanText 清理文本
func (m *BGEModel) cleanText(text string) string {
	// 去除首尾空白
	text = strings.TrimSpace(text)
	
	// 替换多个空白字符为单个空格
	text = strings.Join(strings.Fields(text), " ")
	
	// 限制文本长度
	if len(text) > m.maxTokens {
		text = text[:m.maxTokens]
	}
	
	return text
}

// getFromCache 从缓存获取嵌入向量
func (m *BGEModel) getFromCache(ctx context.Context, text string) ([]float32, error) {
	m.cacheMutex.Lock()
	defer m.cacheMutex.Unlock()

	_, err := m.redis.Get(ctx, "embedding:"+text).Result()
	if err != nil {
		return nil, err
	}

	// 这里简化处理，实际应用中需要更复杂的序列化/反序列化
	// 例如使用protobuf或其他二进制格式
	return nil, fmt.Errorf("cache deserialization not implemented")
}

// setToCache 将嵌入向量存入缓存
func (m *BGEModel) setToCache(ctx context.Context, text string, vector []float32) error {
	m.cacheMutex.Lock()
	defer m.cacheMutex.Unlock()

	// 这里简化处理，实际应用中需要更复杂的序列化/反序列化
	// 例如使用protobuf或其他二进制格式
	return fmt.Errorf("cache serialization not implemented")
}

// Close 关闭模型资源
func (m *BGEModel) Close() error {
	if m.redis != nil {
		return m.redis.Close()
	}
	return nil
}

// GetModelInfo 获取模型信息
func (m *BGEModel) GetModelInfo() map[string]interface{} {
	return map[string]interface{}{
		"model_path": m.modelPath,
		"batch_size": m.batchSize,
		"max_tokens": m.maxTokens,
	}
}