package main

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"io"
	"net/http"
	"net/url"
	"shyxy-model-agent/common/schema/qwen"
	"shyxy-model-agent/core/schema"
	"shyxy-model-agent/pkg/grpc/base"
	pb "shyxy-model-agent/pkg/grpc/proto"
	"strings"
	"time"

	"github.com/rs/zerolog/log"
)

func newRequest(ctx context.Context, baseUrl string, model string, apiKey string, req *pb.PredictOptions, stream bool) (*http.Request, error) {
	var messages []qwen.Message
	for _, msg := range req.Messages {
		messages = append(messages, qwen.Message{
			Role:    msg.Role,
			Content: msg.Content,
		})
	}
	if len(messages) == 0 {
		return nil, fmt.Errorf("messages cannot be empty")
	}
	var request qwen.Request
	if stream {
		request = qwen.Request{
			Model: model,
			Input: qwen.Input{
				Messages: messages,
			},
			Parameters: qwen.Parameters{
				IncrementalOutput: true,
				ResultFormat:      "json",
			}}
	} else {
		request = qwen.Request{
			Model: model,
			Input: qwen.Input{
				Messages: messages,
			},
		}
	}
	body, err := json.Marshal(request)
	if err != nil {
		return nil, fmt.Errorf("failed to marshal request: %w", err)
	}
	urlPath, err := url.JoinPath(baseUrl, qwen.ServicesAigcTextGenerationGeneration)
	if err != nil {
		return nil, fmt.Errorf("failed to join path: %w", err)
	}
	reqHTTP, err := http.NewRequestWithContext(ctx, "POST", urlPath, bytes.NewBuffer(body))
	if err != nil {
		return nil, fmt.Errorf("failed to create request: %w", err)
	}
	reqHTTP.Header.Set("Content-Type", "application/json")
	reqHTTP.Header.Set("Authorization", "Bearer "+apiKey)
	return reqHTTP, nil
}

func convertQwenToOpenAI(resp qwen.Response, model string, stream bool) schema.OpenAIResponse {
	var openAIResp schema.OpenAIResponse
	finishReason := resp.Output.Choices[0].FinishReason
	openAIResp.ID = resp.RequestID
	openAIResp.Created = int(time.Now().Unix())
	openAIResp.Model = model

	var choice schema.Choice
	choice = schema.Choice{Index: 0, Delta: &schema.Message{Role: "assistant", Content: resp.Output.Choices[0].Message}}
	if stream {
		openAIResp.Object = "chat.completion.chunk"
		if finishReason != "" && finishReason != "null" {
			choice.FinishReason = finishReason
		}
	} else {
		openAIResp.Object = "chat.completion"
		choice.FinishReason = finishReason
	}
	openAIResp.Choices = append(openAIResp.Choices, choice)
	return openAIResp
}

type LLM struct {
	base.SingleThread

	baseUrl string
	model   string
	apiKey  string
}

func (l *LLM) Predict(opts *pb.PredictOptions) (string, error) {
	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	reqHTTP, err := newRequest(ctx, l.baseUrl, l.model, l.apiKey, opts, false)
	if err != nil {
		return "", fmt.Errorf("failed to create request: %w", err)
	}
	client := http.DefaultClient
	resp, err := client.Do(reqHTTP)
	if err != nil {
		return "", fmt.Errorf("failed to call vLLM: %w", err)
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		bodyBytes, _ := io.ReadAll(resp.Body)
		return "", fmt.Errorf("qwen Server returned status code %d: %s", resp.StatusCode, bodyBytes)
	}

	// 读取响应体
	bodyBytes, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("failed to read response body: %w", err)
	}
	// 添加响应内容日志
	log.Info().Str("model", l.model).Bytes("response_body", bodyBytes).Msg("Qwen API response received") //debug
	var qwenResp qwen.Response
	if err := json.Unmarshal(bodyBytes, &qwenResp); err != nil {
		return "", fmt.Errorf("failed to unmarshal Qwen response: %w", err)
	}
	// 记录解析后的响应
	log.Info().Str("model", l.model).Interface("qwen_response", qwenResp).Msg("Parsed Qwen response") //debug
	// 转换为OpenAI格式
	openAIResp := convertQwenToOpenAI(qwenResp, l.model, false)
	// 记录转换后的响应
	log.Info().Str("model", l.model).Interface("openai_response", openAIResp).Msg("Converted to OpenAI format") //debug

	// 序列化为JSON字符串返回
	openAIBytes, err := json.Marshal(openAIResp)
	if err != nil {
		return "", fmt.Errorf("failed to marshal OpenAI response: %w", err)
	}
	// 记录最终返回的响应
	log.Info().Str("model", l.model).RawJSON("final_response", openAIBytes).Msg("Final response to be returned") //debug
	return string(openAIBytes), nil
}
func (l *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
	defer close(results)
	ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
	defer cancel()
	reqHTTP, err := newRequest(ctx, l.baseUrl, l.model, l.apiKey, opts, true)
	if err != nil {
		return err
	}
	reqHTTP.Header.Set("Accept", "text/event-stream")
	client := http.DefaultClient
	resp, err := client.Do(reqHTTP)
	if err != nil {
		return fmt.Errorf("failed to call vLLM: %w", err)
	}
	defer resp.Body.Close()
	// 记录流式响应开始
	log.Info().Str("model", l.model).Msg("Starting to process stream response") //debug

	// 处理流式响应
	scanner := bufio.NewScanner(resp.Body)
	var requestID string
	for scanner.Scan() {
		line := scanner.Text()

		// 跳过空行和注释行
		if line == "" || strings.HasPrefix(line, ":") {
			continue
		}

		// 处理data行
		if strings.HasPrefix(line, "data:") {
			jsonData := strings.TrimPrefix(line, "data:")
			jsonData = strings.TrimSpace(jsonData)

			// 添加日志记录接收到的流数据
			log.Debug().Str("model", l.model).Str("raw_data", jsonData).Msg("Received stream data") //debug

			if jsonData == "[DONE]" {
				results <- "data: [DONE]\n\n"
				break
			}

			var qwenStreamResp qwen.Response
			if err := json.Unmarshal([]byte(jsonData), &qwenStreamResp); err != nil {
				continue
			}

			if requestID == "" {
				requestID = qwenStreamResp.RequestID
			}

			var deltaContent string
			var finishReason string
			if len(qwenStreamResp.Output.Choices) > 0 {
				deltaContent = qwenStreamResp.Output.Choices[0].Message.Content
				finishReason = qwenStreamResp.Output.Choices[0].FinishReason
				log.Info().Msgf("deltaContent:%s,finishReason:%s", deltaContent, finishReason)
			}
			//// 封装OpenAI格式响应 -- 兼容 open-WebUI前端
			//openAIResp := convertQwenStreamToOpenAI(qwenStreamResp, l.model, true)
			//responseBytes, _ := json.Marshal(openAIResp)
			//results <- fmt.Sprintf("data: %s\n\n", string(responseBytes))
			//if finishReason == "stop" {
			//	results <- "data: [DONE]\n\n"
			//	break
			//
			//}
			// 直接发送原始文本 -- 兼容 rhua-chatgpt-web前端
			if deltaContent != "" {
				results <- deltaContent // 直接发送字符串
			}
			if finishReason == "stop" {
				//results <- "data: [DONE]\n\n"
				break

			}

		}
	}
	if err := scanner.Err(); err != nil {
		return fmt.Errorf("error reading stream: %w", err)
	}

	// 记录流式响应结束
	log.Info().Str("model", l.model).Msg("Finished processing stream response") //debug

	return nil
}

func (l *LLM) Load(opts *pb.ModelOptions) error {
	if ok := strings.EqualFold(opts.Model, *modelName); !ok {
		return fmt.Errorf("model name mismatch: expected %s, got %s", *modelName, opts.Model)
	}
	l.model = *modelName
	l.apiKey = *apiKey
	l.baseUrl = *baseUrl
	return nil
}
