package openai

import (
	"bufio"
	"context"
	"encoding/json"
	"fmt"
	"shyxy-model-agent/core/config"

	"shyxy-model-agent/core/http/middleware"
	"shyxy-model-agent/pkg/model"

	"github.com/gofiber/fiber/v2"

	"shyxy-model-agent/core/schema"
	pb "shyxy-model-agent/pkg/grpc/proto"

	"time"

	"github.com/google/uuid"
	"github.com/rs/zerolog/log"
)

const (
	ChatCompletionObject      string = "chat.completion"
	ChatCompletionChunkObject string = "chat.completion.chunk"
)

func ChatEndpoint(dbLoader *config.DBLoader, modelLoader *model.ModelLoader) func(c *fiber.Ctx) error {
	var id, textContentToReturn string
	var created int

	return func(c *fiber.Ctx) error {
		// get request body from the context
		input, ok := c.Locals(middleware.CONTEXT_LOCALS_KEY_DEF_MODEL_REQUEST).(*schema.OpenAIRequest)
		if !ok || input.Model == "" {
			return fiber.ErrBadRequest
		}
		predictOptions := &pb.PredictOptions{
			Messages: extractMessages(input),
		}
		for _, message := range extractMessages(input) {
			log.Info().Msgf("message:%v,role:%v,content:%v", message, message.Role, message.Content)
		}
		grpcClient, err := modelLoader.Load(dbLoader,
			model.WithModelID(input.Model),
		)
		if err != nil {
			return fiber.NewError(fiber.StatusInternalServerError, "failed to get gRPC client: "+err.Error())
		}

		id = uuid.New().String()
		created = int(time.Now().Unix())
		if input.Stream {
			c.Set("Content-Type", "text/event-stream")
			c.Set("Cache-Control", "no-cache")
			c.Set("Connection", "keep-alive")
			c.Set("X-Correlation-ID", id)

			// 创建流式响应通道
			responses := make(chan schema.OpenAIResponse)

			// 启动 goroutine 消费 gRPC 流式响应
			go func() {
				defer close(responses)

				_ = grpcClient.PredictStream(context.Background(), predictOptions, func(reply *pb.Reply) {
					content := string(reply.Message)
					textContentToReturn += content

					chunk := &schema.OpenAIResponse{
						ID:      id, // 保持ID一致
						Object:  ChatCompletionChunkObject,
						Created: created,
						Model:   input.Model,
						Choices: []schema.Choice{
							{
								Index: 0,
								Delta: &schema.Message{
									Content: &content,
									Role:    "assistant",
								},
							},
						},
						// 流式响应通常不包含usage信息，除非是最后一个chunk
					}

					responses <- *chunk
				})
			}()

			// 开始流式写入
			c.Context().SetBodyStreamWriter(func(w *bufio.Writer) {
				toolsCalled := false

				// 发送数据chunks
				for ev := range responses {
					if len(ev.Choices) > 0 && ev.Choices[0].Delta != nil && len(ev.Choices[0].Delta.ToolCalls) > 0 {
						toolsCalled = true
					}

					// 序列化 chunk
					chunkData, _ := json.Marshal(ev)

					// 正确格式：data: <json>\n\n
					_, err := w.WriteString(fmt.Sprintf("data: %s\n\n", chunkData))
					if err != nil {
						log.Error().Err(err).Msg("Failed to write stream chunk")
						input.Cancel()
						break
					}
					w.Flush()
				}

				// 构造最终 chunk，包含usage信息和finish reason
				finishReason := "stop"
				if toolsCalled {
					if len(input.Tools) > 0 {
						finishReason = "tool_calls"
					} else {
						finishReason = "function_call"
					}
				}

				// 构造最终 chunk
				finalResponse := &schema.OpenAIResponse{
					ID:      id, // 保持ID一致
					Created: created,
					Model:   input.Model,
					Object:  ChatCompletionChunkObject,
					Choices: []schema.Choice{
						{
							Index:        0,
							FinishReason: finishReason,
							Delta:        &schema.Message{}, // 最后一个chunk的delta通常为空对象
						},
					},
					Usage: schema.OpenAIUsage{ // 最后一个chunk包含usage信息
						PromptTokens:     5,
						CompletionTokens: len(textContentToReturn) / 2,
						TotalTokens:      5 + len(textContentToReturn)/2,
					},
				}

				finalData, _ := json.Marshal(finalResponse)
				_, err := w.WriteString(fmt.Sprintf("data: %s\n\n", finalData))
				if err != nil {
					log.Error().Err(err).Msg("Failed to write final chunk")
				}

				// 发送 [DONE]
				_, err = w.WriteString("data: [DONE]\n\n")
				if err != nil {
					log.Error().Err(err).Msg("Failed to write [DONE]")
				}

				w.Flush()
			})

		}

		reply, err := grpcClient.Predict(context.Background(), predictOptions)
		if err != nil {
			return fiber.NewError(fiber.StatusInternalServerError, "failed to call vLLM: "+err.Error())
		}

		// 构造 OpenAI 兼容的响应
		content := reply.Message
		response := schema.OpenAIResponse{
			ID:      id, // 保持ID格式一致
			Created: created,
			Model:   input.Model,
			Object:  ChatCompletionObject,
			Choices: []schema.Choice{
				{
					Message: &schema.Message{ // 使用Message而不是Delta
						Role:    "assistant",
						Content: &content,
					},
					Index:        0,
					FinishReason: "stop", // 添加finish reason
				},
			},
			Usage: schema.OpenAIUsage{
				PromptTokens:     5,
				CompletionTokens: len(content) / 2, // 简单估算 token 数量
				TotalTokens:      5 + len(content)/2,
			},
		}
		return c.JSON(response)
	}

}

// extractPrompt 从 OpenAIRequest 提取 prompt 内容
func extractMessages(req *schema.OpenAIRequest) []*pb.Message {
	var messages []*pb.Message
	// 如果有系统消息，首先添加

	for _, msg := range req.Messages {
		var content string
		switch c := msg.Content.(type) {
		case string:
			content = c
		case []interface{}:
			if len(c) > 0 {
				if text, ok := c[0].(map[string]interface{})["text"].(string); ok {
					content = text
				}
			}
		default:
			content = fmt.Sprintf("%v", c)
		}
		messages = append(messages, &pb.Message{
			Role:    msg.Role,
			Content: content,
		})
	}
	return messages
}
