package main

// GRPC Falcon server

// Note: this is started internally by LocalAI and a server is allocated for each model

import (
	"bufio"
	"bytes"
	"encoding/json"
	"flag"
	"fmt"
	"github.com/rs/zerolog/log"
	"golang.org/x/net/context"
	"io"
	"net/http"
	"net/url"
	"shyxy-net/common/llm"
	"shyxy-net/pkg/grpc"
	"shyxy-net/pkg/grpc/base"
	pb "shyxy-net/pkg/grpc/proto"
	"strings"
	"time"
)

var addr = flag.String("addr", "50000", "grpc server address")

const (
	Qwen3FinanceModel = "/model-qwen3-finance-model"
	FinanceLLM        = "/model-finance-LLM"
)

var baseURLMap = map[string]string{
	Qwen3FinanceModel: "http://localhost:50001/v1",
	FinanceLLM:        "http://localhost:50002/v1",
}

type LLM struct {
	base.SingleThread
	model   string
	baseUrl string
}

type vLLMRequest struct {
	Model       string        `json:"model"`
	Messages    []llm.Message `json:"messages"` // 统一使用 messages 替代 prompt
	MaxTokens   int           `json:"max_tokens,omitempty"`
	Temperature float64       `json:"temperature,omitempty"`
	TopP        float64       `json:"top_p,omitempty"`
	N           int           `json:"n,omitempty"`
	Stream      bool          `json:"stream,omitempty"`
}

func newVLLMRequest(ctx context.Context, baseUrl, model string, req *pb.PredictOptions, stream bool) (*http.Request, error) {
	var messages []llm.Message
	for _, msg := range req.Messages {
		messages = append(messages, llm.Message{
			Content: msg.Content,
			Role:    msg.Role,
		})
	}
	var maxTokens int
	// TODO 改成根据 model的配置文件自动装配
	if model == "/model-qwen3-finance-model" {
		maxTokens = 32 * 1024
	}
	if maxTokens == 0 {
		maxTokens = 1024
	}
	var temp, topP float64
	if req.Temperature > 0 {
		temp = float64(req.Temperature)
	} else {
		temp = 0.7
	}

	if req.TopP > 0 {
		topP = float64(req.TopP)
	} else {
		topP = 0.9
	}
	log.Printf("发送到 vLLM 的完整消息: %+v", messages)
	vllmReq := vLLMRequest{
		Model:       model,
		Messages:    messages,
		MaxTokens:   maxTokens,
		Temperature: temp,
		TopP:        topP,
		Stream:      stream,
	}
	body, err := json.Marshal(vllmReq)
	if err != nil {
		return nil, fmt.Errorf("failed to marshal request: %w", err)
	}
	apiURL := "chat/completions"
	urlPath, err := url.JoinPath(baseUrl, apiURL)
	if err != nil {
		return nil, fmt.Errorf("failed to join path: %w", err)
	}
	reqHTTP, err := http.NewRequestWithContext(ctx, "POST", urlPath, bytes.NewBuffer(body))
	if err != nil {
		return nil, fmt.Errorf("failed to create request: %w", err)
	}
	reqHTTP.Header.Set("Content-Type", "application/json")

	return reqHTTP, nil
}

// Predict 调用本地 vLLM 服务进行推理
func (l *LLM) Predict(opts *pb.PredictOptions) (string, error) {

	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	reqHTTP, err := newVLLMRequest(ctx, l.baseUrl, l.model, opts, false)
	if err != nil {
		return "", fmt.Errorf("failed to create request: %w", err)
	}
	client := http.DefaultClient
	resp, err := client.Do(reqHTTP)
	if err != nil {
		return "", fmt.Errorf("failed to call vLLM: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		bodyBytes, _ := io.ReadAll(resp.Body)
		return "", fmt.Errorf("vLLM returned status code %d: %s", resp.StatusCode, bodyBytes)
	}

	// 解析响应
	var result struct {
		Choices []struct {
			Message struct {
				Role    string `json:"role"`
				Content string `json:"content"`
			} `json:"message"`
		} `json:"choices"`
	}

	if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
		return "", fmt.Errorf("failed to decode response: %w", err)
	}

	if len(result.Choices) == 0 {
		return "", fmt.Errorf("no choices in response")
	}

	return result.Choices[0].Message.Content, nil
}
func (l *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {

	defer close(results)

	ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
	defer cancel()

	reqHTTP, err := newVLLMRequest(ctx, l.baseUrl, l.model, opts, true)
	if err != nil {
		return err
	}

	client := http.DefaultClient
	resp, err := client.Do(reqHTTP)
	if err != nil {
		return fmt.Errorf("failed to call vLLM: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		bodyBytes, _ := io.ReadAll(resp.Body)
		return fmt.Errorf("vLLM returned status code %d: %s", resp.StatusCode, bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
	for scanner.Scan() {
		line := scanner.Text()
		if line == "" {
			continue
		}
		jsonStr := strings.TrimPrefix(line, "data:")
		jsonStr = strings.TrimSpace(jsonStr)
		if jsonStr == "[DONE]" {
			break
		}

		var chunk struct {
			Choices []struct {
				Delta struct {
					Content string `json:"content"`
				} `json:"delta"`
			} `json:"choices"`
		}

		if err := json.Unmarshal([]byte(jsonStr), &chunk); err != nil {
			log.Printf("Failed to unmarshal chunk: %v", err)
			continue
		}

		for _, choice := range chunk.Choices {
			content := choice.Delta.Content
			if content == "" {
				continue
			}
			select {
			case results <- content:
				log.Printf("gRPC sent: %q", content) // 调试
				// heartbeat <- struct{}{} // 可选：用于保活
			case <-ctx.Done():
				return ctx.Err()
			}
		}
	}

	return nil
}

func (l *LLM) Load(opts *pb.ModelOptions) error {
	fmt.Println("grpc Loading...")
	var err error
	//TODO: implement the load method
	//oiToken := "test_token"
	backendModelPrefix := "/model-"
	l.model = backendModelPrefix + opts.Model
	baseUrl := baseURLMap[l.model]
	if baseUrl == "" {
		err = fmt.Errorf("model %s not found", opts.Model)
	}
	fmt.Printf("model: %s\n", l.model)
	return err
}

func main() {
	flag.Parse()
	if err := grpc.StartServer(":"+*addr, &LLM{}); err != nil {
		panic(err)
	}
}
