package main

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"net/http"
	"net/url"
	"shyxy-net/common/llm"
	"shyxy-net/common/llm/openai"
	"shyxy-net/common/llm/qwen"
	pb "shyxy-net/pkg/grpc/proto"
	"time"
)

func NewQWenRequest(ctx context.Context, baseUrl string, model string, apiKey string, req *pb.PredictOptions, stream bool) (*http.Request, error) {
	var messages []llm.Message
	for _, msg := range req.Messages {
		messages = append(messages, llm.Message{
			Content: msg.Content,
			Role:    msg.Role,
		})
	}

	// 确保至少有一条有效消息
	if len(messages) == 0 {
		return nil, fmt.Errorf("messages cannot be empty")
	}

	var request qwen.Request

	if stream {
		request = qwen.Request{
			Model: model,
			Input: qwen.Input{
				Messages: messages,
			},
			Parameters: qwen.Parameters{
				ResultFormat:      "message",
				IncrementalOutput: true,
			},
		}
	} else {
		request = qwen.Request{
			Model: model,
			Input: qwen.Input{
				Messages: messages,
			},
		}
	}
	body, err := json.Marshal(request)
	if err != nil {
		return nil, fmt.Errorf("failed to marshal request: %w", err)
	}
	urlPath, err := url.JoinPath(baseUrl, llm.QwenChatSuffix)
	if err != nil {
		return nil, fmt.Errorf("failed to join path: %w", err)
	}
	reqHTTP, err := http.NewRequestWithContext(ctx, "POST", urlPath, bytes.NewBuffer(body))
	if err != nil {
		return nil, fmt.Errorf("failed to create request: %w", err)
	}
	reqHTTP.Header.Set("Content-Type", "application/json")
	reqHTTP.Header.Set("Authorization", "Bearer "+apiKey)
	return reqHTTP, nil

}

// convertQWenToOpenAI 将Qwen响应转换为OpenAI格式
func convertQWenToOpenAI(resp qwen.Response, model string) openai.ChatCompletion {
	var openAIResp openai.ChatCompletion

	openAIResp.ID = resp.RequestID
	openAIResp.Object = llm.ChatCompletion
	openAIResp.Created = time.Now().Unix()
	openAIResp.Model = model

	// 构造choices
	choice := struct {
		Index        int         `json:"index"`
		Message      llm.Message `json:"message"`
		FinishReason string      `json:"finish_reason"`
	}{
		Index: 0,
		Message: llm.Message{
			Role:    "assistant",
			Content: resp.Output.Choices[0].Message.Content,
		},
		FinishReason: resp.Output.Choices[0].FinishReason,
	}
	openAIResp.Choices = append(openAIResp.Choices, choice)

	// 转换usage信息
	openAIResp.Usage.PromptTokens = resp.Usage.InputTokens
	openAIResp.Usage.CompletionTokens = resp.Usage.OutputTokens
	openAIResp.Usage.TotalTokens = resp.Usage.TotalTokens

	return openAIResp
}

// convertQWenStreamToOpenAI 将Qwen流式响应转换为OpenAI流式格式
func convertQWenStreamToOpenAI(resp qwen.Response, model, deltaContent, finishReason, requestID string) openai.ChatCompletionChunk {
	var openAIResp openai.ChatCompletionChunk

	openAIResp.ID = requestID
	openAIResp.Object = llm.ChatCompletionChunk
	openAIResp.Created = time.Now().Unix()
	openAIResp.Model = model

	// 构造choices
	choice := struct {
		Index        int         `json:"index"`
		Delta        llm.Message `json:"delta"`
		FinishReason string      `json:"finish_reason,omitempty"`
	}{
		Index: 0,
		Delta: llm.Message{
			Role:    "assistant",
			Content: deltaContent,
		},
	}

	// 只有在结束时才设置finish_reason
	if finishReason != "null" && finishReason != "" {
		choice.FinishReason = finishReason
	}

	openAIResp.Choices = append(openAIResp.Choices, choice)

	return openAIResp
}
