package providers

import (
	"context"
	"fmt"
	"time"

	"github.com/sashabaranov/go-openai"
	"go.uber.org/zap"

	"yanxue_ai_go/pkg/common/ai"
	"yanxue_ai_go/pkg/common/logger"
)

// OpenAIModel OpenAI模型实现
type OpenAIModel struct {
	client    *openai.Client
	modelID   string
	modelInfo ai.ModelInfo
	config    map[string]interface{}
}

// OpenAIConfig OpenAI配置
type OpenAIConfig struct {
	APIKey  string `json:"api_key"`
	BaseURL string `json:"base_url"`
	Model   string `json:"model"`
}

// NewOpenAIModel 创建OpenAI模型实例
func NewOpenAIModel(config OpenAIConfig) (ai.AIModel, error) {
	// 创建OpenAI客户端
	clientConfig := openai.DefaultConfig(config.APIKey)
	if config.BaseURL != "" {
		clientConfig.BaseURL = config.BaseURL
	}

	client := openai.NewClientWithConfig(clientConfig)

	// 构建模型信息
	modelInfo := ai.ModelInfo{
		ID:          fmt.Sprintf("openai-%s", config.Model),
		Provider:    ai.ProviderOpenAI,
		Type:        ai.TypeChat,
		Name:        config.Model,
		Description: fmt.Sprintf("OpenAI %s model", config.Model),
		MaxTokens:   getModelMaxTokens(config.Model),
		InputPrice:  getModelInputPrice(config.Model),
		OutputPrice: getModelOutputPrice(config.Model),
		Features:    []string{"chat", "streaming", "tools", "vision"},
		CreatedAt:   time.Now(),
		UpdatedAt:   time.Now(),
		Metadata: map[string]interface{}{
			"base_url": config.BaseURL,
			"model":    config.Model,
		},
	}

	configMap := map[string]interface{}{
		"api_key":  config.APIKey,
		"base_url": config.BaseURL,
		"model":    config.Model,
	}

	return &OpenAIModel{
		client:    client,
		modelID:   modelInfo.ID,
		modelInfo: modelInfo,
		config:    configMap,
	}, nil
}

// GetModelInfo 获取模型信息
func (m *OpenAIModel) GetModelInfo() ai.ModelInfo {
	return m.modelInfo
}

// GetProvider 获取提供商
func (m *OpenAIModel) GetProvider() ai.ModelProvider {
	return ai.ProviderOpenAI
}

// GetType 获取模型类型
func (m *OpenAIModel) GetType() ai.ModelType {
	return ai.TypeChat
}

// Health 健康检查
func (m *OpenAIModel) Health(ctx context.Context) error {
	// 简单的模型列表请求来检查连接
	_, err := m.client.ListModels(ctx)
	if err != nil {
		return fmt.Errorf("openai health check failed: %w", err)
	}
	return nil
}

// Chat 对话
func (m *OpenAIModel) Chat(ctx context.Context, req ai.ChatRequest) (*ai.ChatResponse, error) {
	// 转换消息格式
	messages := make([]openai.ChatCompletionMessage, len(req.Messages))
	for i, msg := range req.Messages {
		messages[i] = openai.ChatCompletionMessage{
			Role:    msg.Role,
			Content: msg.Content,
		}
	}

	// 构建OpenAI请求
	openaiReq := openai.ChatCompletionRequest{
		Model:    m.getModelName(req.Model),
		Messages: messages,
	}

	// 设置可选参数
	if req.Temperature != nil {
		openaiReq.Temperature = *req.Temperature
	}
	if req.MaxTokens != nil {
		openaiReq.MaxTokens = *req.MaxTokens
	}
	if req.TopP != nil {
		openaiReq.TopP = *req.TopP
	}
	if req.FrequencyPenalty != nil {
		openaiReq.FrequencyPenalty = *req.FrequencyPenalty
	}
	if req.PresencePenalty != nil {
		openaiReq.PresencePenalty = *req.PresencePenalty
	}
	if len(req.Stop) > 0 {
		openaiReq.Stop = req.Stop
	}

	// 处理工具调用
	if len(req.Tools) > 0 {
		tools := make([]openai.Tool, len(req.Tools))
		for i, tool := range req.Tools {
			tools[i] = openai.Tool{
				Type: openai.ToolType(tool.Type),
				Function: &openai.FunctionDefinition{
					Name:        tool.Function.Name,
					Description: tool.Function.Description,
					Parameters:  tool.Function.Parameters,
				},
			}
		}
		openaiReq.Tools = tools
	}

	// 发送请求
	resp, err := m.client.CreateChatCompletion(ctx, openaiReq)
	if err != nil {
		logger.Logger.Error("OpenAI chat completion failed",
			zap.Error(err),
			zap.String("model", m.modelInfo.Name))
		return nil, fmt.Errorf("openai chat completion failed: %w", err)
	}

	// 转换响应格式
	choices := make([]ai.Choice, len(resp.Choices))
	for i, choice := range resp.Choices {
		choices[i] = ai.Choice{
			Index: choice.Index,
			Message: ai.ChatMessage{
				Role:    choice.Message.Role,
				Content: choice.Message.Content,
			},
			FinishReason: choice.FinishReason,
		}
	}

	return &ai.ChatResponse{
		ID:      resp.ID,
		Object:  resp.Object,
		Created: resp.Created,
		Model:   resp.Model,
		Choices: choices,
		Usage: ai.Usage{
			PromptTokens:     resp.Usage.PromptTokens,
			CompletionTokens: resp.Usage.CompletionTokens,
			TotalTokens:      resp.Usage.TotalTokens,
		},
		SystemFingerprint: resp.SystemFingerprint,
	}, nil
}

// ChatStream 流式对话
func (m *OpenAIModel) ChatStream(ctx context.Context, req ai.ChatRequest) (<-chan ai.ChatResponse, error) {
	// 转换消息格式
	messages := make([]openai.ChatCompletionMessage, len(req.Messages))
	for i, msg := range req.Messages {
		messages[i] = openai.ChatCompletionMessage{
			Role:    msg.Role,
			Content: msg.Content,
		}
	}

	// 构建OpenAI流式请求
	openaiReq := openai.ChatCompletionRequest{
		Model:    m.getModelName(req.Model),
		Messages: messages,
		Stream:   true,
	}

	// 设置可选参数
	if req.Temperature != nil {
		openaiReq.Temperature = *req.Temperature
	}
	if req.MaxTokens != nil {
		openaiReq.MaxTokens = *req.MaxTokens
	}

	// 创建流式请求
	stream, err := m.client.CreateChatCompletionStream(ctx, openaiReq)
	if err != nil {
		return nil, fmt.Errorf("failed to create chat completion stream: %w", err)
	}

	// 创建响应通道
	responseChan := make(chan ai.ChatResponse, 10)

	// 启动goroutine处理流式响应
	go func() {
		defer close(responseChan)
		defer stream.Close()

		for {
			response, err := stream.Recv()
			if err != nil {
				if err.Error() == "EOF" {
					return
				}
				logger.Logger.Error("Stream receive error", zap.Error(err))
				return
			}

			// 转换响应格式
			choices := make([]ai.Choice, len(response.Choices))
			for i, choice := range response.Choices {
				choices[i] = ai.Choice{
					Index: choice.Index,
					Delta: &ai.ChatMessage{
						Role:    choice.Delta.Role,
						Content: choice.Delta.Content,
					},
					FinishReason: choice.FinishReason,
				}
			}

			aiResponse := ai.ChatResponse{
				ID:      response.ID,
				Object:  response.Object,
				Created: response.Created,
				Model:   response.Model,
				Choices: choices,
			}

			select {
			case responseChan <- aiResponse:
			case <-ctx.Done():
				return
			}
		}
	}()

	return responseChan, nil
}

// Embedding 向量嵌入
func (m *OpenAIModel) Embedding(ctx context.Context, req ai.EmbeddingRequest) (*ai.EmbeddingResponse, error) {
	// 构建OpenAI嵌入请求
	openaiReq := openai.EmbeddingRequest{
		Input: req.Input,
		Model: openai.EmbeddingModel(m.getModelName(req.Model)),
	}

	if req.EncodingFormat != "" {
		openaiReq.EncodingFormat = openai.EmbeddingEncodingFormat(req.EncodingFormat)
	}
	if req.Dimensions != nil {
		openaiReq.Dimensions = *req.Dimensions
	}
	if req.User != "" {
		openaiReq.User = req.User
	}

	// 发送请求
	resp, err := m.client.CreateEmbeddings(ctx, openaiReq)
	if err != nil {
		return nil, fmt.Errorf("openai embedding failed: %w", err)
	}

	// 转换响应格式
	data := make([]ai.Embedding, len(resp.Data))
	for i, embedding := range resp.Data {
		data[i] = ai.Embedding{
			Object:    embedding.Object,
			Index:     embedding.Index,
			Embedding: embedding.Embedding,
		}
	}

	return &ai.EmbeddingResponse{
		Object: resp.Object,
		Data:   data,
		Model:  resp.Model,
		Usage: ai.Usage{
			PromptTokens: resp.Usage.PromptTokens,
			TotalTokens:  resp.Usage.TotalTokens,
		},
	}, nil
}

// CallTool 调用工具
func (m *OpenAIModel) CallTool(ctx context.Context, toolName string, params map[string]interface{}) (interface{}, error) {
	// OpenAI工具调用通过chat completion处理
	return nil, fmt.Errorf("tool calling should be handled through chat completion")
}

// UpdateConfig 更新配置
func (m *OpenAIModel) UpdateConfig(config map[string]interface{}) error {
	// 更新配置
	for key, value := range config {
		m.config[key] = value
	}

	// 如果API key或base URL改变，重新创建客户端
	if apiKey, ok := config["api_key"].(string); ok {
		clientConfig := openai.DefaultConfig(apiKey)
		if baseURL, ok := m.config["base_url"].(string); ok && baseURL != "" {
			clientConfig.BaseURL = baseURL
		}
		m.client = openai.NewClientWithConfig(clientConfig)
	}

	return nil
}

// GetConfig 获取配置
func (m *OpenAIModel) GetConfig() map[string]interface{} {
	// 返回配置副本
	config := make(map[string]interface{})
	for k, v := range m.config {
		config[k] = v
	}
	return config
}

// getModelName 获取模型名称
func (m *OpenAIModel) getModelName(requestModel string) string {
	if requestModel != "" {
		return requestModel
	}
	if modelName, ok := m.config["model"].(string); ok {
		return modelName
	}
	return "gpt-3.5-turbo"
}

// getModelMaxTokens 获取模型最大tokens
func getModelMaxTokens(model string) int {
	switch model {
	case "gpt-4":
		return 8192
	case "gpt-4-32k":
		return 32768
	case "gpt-4-turbo", "gpt-4-turbo-preview":
		return 128000
	case "gpt-3.5-turbo":
		return 4096
	case "gpt-3.5-turbo-16k":
		return 16384
	default:
		return 4096
	}
}

// getModelInputPrice 获取模型输入价格 (每1K tokens)
func getModelInputPrice(model string) float64 {
	switch model {
	case "gpt-4":
		return 0.03
	case "gpt-4-32k":
		return 0.06
	case "gpt-4-turbo", "gpt-4-turbo-preview":
		return 0.01
	case "gpt-3.5-turbo":
		return 0.0015
	case "gpt-3.5-turbo-16k":
		return 0.003
	default:
		return 0.0015
	}
}

// getModelOutputPrice 获取模型输出价格 (每1K tokens)
func getModelOutputPrice(model string) float64 {
	switch model {
	case "gpt-4":
		return 0.06
	case "gpt-4-32k":
		return 0.12
	case "gpt-4-turbo", "gpt-4-turbo-preview":
		return 0.03
	case "gpt-3.5-turbo":
		return 0.002
	case "gpt-3.5-turbo-16k":
		return 0.004
	default:
		return 0.002
	}
}
