package openai

import (
	"bufio"
	"context"
	"encoding/json"
	"fmt"
	"github.com/gofiber/fiber/v2"
	"github.com/valyala/fasthttp"
	"zatta-gateway/http/middleware"
	"zatta-gateway/http/schema"
	"zatta-gateway/http/services"
	"zatta-gateway/pkg/db"
	pb "zatta-gateway/pkg/grpc/proto"

	"github.com/google/uuid"
	"github.com/rs/zerolog/log"
	"time"
)

const (
	ChatCompletionObject      string = "chat.completion"
	ChatCompletionChunkObject string = "chat.completion.chunk"
)

func ChatEndpoint(clientManager *db.ClientManager) func(c *fiber.Ctx) error {
	return func(c *fiber.Ctx) error {
		// get request body from the context
		input, ok := c.Locals(middleware.CONTEXT_LOCALS_KEY_DEF_MODEL_REQUEST).(*schema.OpenAIRequest)
		if !ok || input.Model == "" {
			return fiber.ErrBadRequest
		}
		predictOptions := &pb.PredictOptions{
			Messages: extractMessages(input),
		}
		model, err := services.ListModels(clientManager, input.Model)
		if err != nil {
			return fiber.ErrInternalServerError
		}
		if len(model) == 0 {
			return fiber.ErrNotFound
		}
		if len(model) > 1 {
			return fiber.ErrInternalServerError
		}
		grpcClient := model[0].GRPC(clientManager)
		_, err = grpcClient.LoadModel(context.Background(), &pb.ModelOptions{Model: input.Model})
		if err != nil {
			return fiber.NewError(fiber.StatusInternalServerError, "failed to load model: "+err.Error())
		}
		id := uuid.New().String()
		created := int(time.Now().Unix())
		if !input.Stream {
			reply, err := grpcClient.Predict(context.Background(), predictOptions)
			if err != nil {
				return fiber.NewError(fiber.StatusInternalServerError, "failed to call vLLM: "+err.Error())
			}

			// 构造 OpenAI 兼容的响应
			content := reply.Message
			response := schema.OpenAIResponse{
				ID:      id, // 保持ID格式一致
				Created: created,
				Model:   input.Model,
				Object:  ChatCompletionObject,
				Choices: []schema.Choice{
					{
						Message: &schema.Message{ // 使用Message而不是Delta
							Role:    "assistant",
							Content: &content,
						},
						Index:        0,
						FinishReason: "stop", // 添加finish reason
					},
				},
				Usage: schema.OpenAIUsage{
					PromptTokens:     5,
					CompletionTokens: len(content) / 2, // 简单估算 token 数量
					TotalTokens:      5 + len(content)/2,
				},
			}
			return c.JSON(response)
		}
		// 流式响应处理
		c.Set("Content-Type", "text/event-stream")
		c.Set("Cache-Control", "no-cache")
		c.Set("Connection", "keep-alive")
		c.Set("X-Correlation-ID", id)

		// 创建流式响应通道
		responses := make(chan schema.OpenAIResponse)
		textContentToReturn := "" // 在goroutine外部声明但在内部使用

		// 启动 goroutine 消费 gRPC 流式响应
		go func() {
			defer close(responses)

			_ = grpcClient.PredictStream(context.Background(), predictOptions, func(reply *pb.Reply) {
				content := string(reply.Message)
				textContentToReturn += content

				chunk := &schema.OpenAIResponse{
					ID:      id, // 保持ID一致
					Object:  ChatCompletionChunkObject,
					Created: created,
					Model:   input.Model,
					Choices: []schema.Choice{
						{
							Index: 0,
							Delta: &schema.Message{
								Content: &content,
								Role:    "assistant",
							},
						},
					},
					// 流式响应通常不包含usage信息，除非是最后一个chunk
				}

				responses <- *chunk
			})
		}()

		// 开始流式写入
		c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
			toolsCalled := false

			// 发送数据chunks
			for ev := range responses {
				if len(ev.Choices) > 0 && ev.Choices[0].Delta != nil && len(ev.Choices[0].Delta.ToolCalls) > 0 {
					toolsCalled = true
				}

				// 序列化 chunk
				chunkData, _ := json.Marshal(ev)

				// 正确格式：data: <json>\n\n
				_, err := w.WriteString(fmt.Sprintf("data: %s\n\n", chunkData))
				if err != nil {
					log.Error().Err(err).Msg("Failed to write stream chunk")
					input.Cancel()
					break
				}
				w.Flush()
			}

			// 构造最终 chunk，包含usage信息和finish reason
			finishReason := "stop"
			if toolsCalled {
				if len(input.Tools) > 0 {
					finishReason = "tool_calls"
				} else {
					finishReason = "function_call"
				}
			}

			// 构造最终 chunk
			finalResponse := &schema.OpenAIResponse{
				ID:      id, // 保持ID一致
				Created: created,
				Model:   input.Model,
				Object:  ChatCompletionChunkObject,
				Choices: []schema.Choice{
					{
						Index:        0,
						FinishReason: finishReason,
						Delta:        &schema.Message{}, // 最后一个chunk的delta通常为空对象
					},
				},
				Usage: schema.OpenAIUsage{ // 最后一个chunk包含usage信息
					PromptTokens:     5,
					CompletionTokens: len(textContentToReturn) / 2,
					TotalTokens:      5 + len(textContentToReturn)/2,
				},
			}

			finalData, _ := json.Marshal(finalResponse)
			_, err := w.WriteString(fmt.Sprintf("data: %s\n\n", finalData))
			if err != nil {
				log.Error().Err(err).Msg("Failed to write final chunk")
			}

			// 发送 [DONE]
			_, err = w.WriteString("data: [DONE]\n\n")
			if err != nil {
				log.Error().Err(err).Msg("Failed to write [DONE]")
			}

			w.Flush()
		}))

		return nil

	}

}

// extractPrompt 从 OpenAIRequest 提取 prompt 内容
func extractMessages(req *schema.OpenAIRequest) []*pb.Message {
	var messages []*pb.Message
	// 如果有系统消息，首先添加

	for _, msg := range req.Messages {
		var content string
		switch c := msg.Content.(type) {
		case string:
			content = c
		case []interface{}:
			if len(c) > 0 {
				if text, ok := c[0].(map[string]interface{})["text"].(string); ok {
					content = text
				}
			}
		default:
			content = fmt.Sprintf("%v", c)
		}
		messages = append(messages, &pb.Message{
			Role:    msg.Role,
			Content: content,
		})
	}
	return messages
}
