package main

// GRPC Falcon server

// Note: this is started internally by LocalAI and a server is allocated for each model

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"gateway-demo/pkg/grpc"
	"gateway-demo/pkg/grpc/base"
	pb "gateway-demo/pkg/grpc/proto"
	"github.com/joho/godotenv"
	"io"
	"log"
	"net/http"
	"net/url"
	"os"
	"strings"
	"time"
)

var (
	grpcPort     string
	vLlmEndpoint string
	defaultModel string
)

func init() {
	if os.Getenv("GRPC_PORT") == "" ||
		os.Getenv("VLLM_ENDPOINT") == "" ||
		os.Getenv("DEFAULT_MODEL") == "" {
		if err := godotenv.Load(); err != nil {
			log.Println("Error loading .env file")
		}
	}
	grpcPort = getEnv("GRPC_PORT", "50051")
	vLlmEndpoint = getEnv("VLLM_ENDPOINT", "http://localhost:8000")
	defaultModel = getEnv("DEFAULT_MODEL", "gpt-3.5-turbo")
}
func getEnv(key, defaultValue string) string {
	if value, exists := os.LookupEnv(key); exists {
		return value
	}
	return defaultValue
}

type LLM struct {
	base.SingleThread
}

// 构建 OpenAI 兼容格式的请求体
type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

type vLLMRequest struct {
	Model       string    `json:"model"`
	Messages    []Message `json:"messages"` // 统一使用 messages 替代 prompt
	MaxTokens   int       `json:"max_tokens,omitempty"`
	Temperature float64   `json:"temperature,omitempty"`
	TopP        float64   `json:"top_p,omitempty"`
	N           int       `json:"n,omitempty"`
	Stream      bool      `json:"stream,omitempty"`
}

func newVLLMRequest(ctx context.Context, req *pb.PredictOptions, stream bool) (*http.Request, error) {
	var messages []Message
	for _, msg := range req.Messages {
		messages = append(messages, Message{
			Content: msg.Content,
			Role:    msg.Role,
		})
	}
	vllmReq := vLLMRequest{
		Model:       defaultModel,
		Messages:    messages,
		MaxTokens:   512,
		Temperature: float64(req.Temperature),
		TopP:        float64(req.TopP),
		Stream:      stream,
	}
	body, err := json.Marshal(vllmReq)
	if err != nil {
		return nil, fmt.Errorf("failed to marshal request: %w", err)
	}
	var apiURL string
	if stream {
		apiURL = "/v1/chat/completions"
	} else {
		apiURL = "/v1/chat/completions"
	}
	urlPath, err := url.JoinPath(vLlmEndpoint, apiURL)
	if err != nil {
		return nil, fmt.Errorf("failed to join path: %w", err)
	}
	reqHTTP, err := http.NewRequestWithContext(ctx, "POST", urlPath, bytes.NewBuffer(body))
	if err != nil {
		return nil, fmt.Errorf("failed to create request: %w", err)
	}
	reqHTTP.Header.Set("Content-Type", "application/json")

	return reqHTTP, nil
}

// Predict 调用本地 vLLM 服务进行推理
func (l *LLM) Predict(req *pb.PredictOptions) (string, error) {
	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	reqHTTP, err := newVLLMRequest(ctx, req, false)
	if err != nil {
		return "", fmt.Errorf("failed to create request: %w", err)
	}
	client := &http.Client{}
	resp, err := client.Do(reqHTTP)
	if err != nil {
		return "", fmt.Errorf("failed to call vLLM: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		bodyBytes, _ := io.ReadAll(resp.Body)
		return "", fmt.Errorf("vLLM returned status code %d: %s", resp.StatusCode, bodyBytes)
	}

	// 解析响应
	var result struct {
		Choices []struct {
			Text string `json:"text"`
		} `json:"choices"`
	}

	if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
		return "", fmt.Errorf("failed to decode response: %w", err)
	}

	if len(result.Choices) == 0 {
		return "", fmt.Errorf("no choices in response")
	}

	return result.Choices[0].Text, nil
}
func (l *LLM) PredictStream(req *pb.PredictOptions, results chan string) error {
	defer close(results)
	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()
	// 构建 OpenAI 兼容格式的请求体

	reqHTTP, err := newVLLMRequest(ctx, req, true)
	if err != nil {
		return err
	}
	client := &http.Client{}
	resp, err := client.Do(reqHTTP)
	if err != nil {
		return fmt.Errorf("failed to call vLLM: %w", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		bodyBytes, _ := io.ReadAll(resp.Body)
		return fmt.Errorf("vLLM returned status code %d: %s", resp.StatusCode, bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
	for scanner.Scan() {
		line := scanner.Text()
		if line == "" {
			continue
		}
		jsonStr := strings.TrimPrefix(line, "data:")
		jsonStr = strings.TrimSpace(jsonStr)
		if jsonStr == "[DONE]" {
			break
		}

		var chunk struct {
			Choices []struct {
				Delta struct {
					Content string `json:"content"`
				} `json:"delta"`
			} `json:"choices"`
		}

		if err := json.Unmarshal([]byte(jsonStr), &chunk); err != nil {
			log.Printf("Failed to unmarshal response line: %v", err)
			continue
		}

		if len(chunk.Choices) > 0 {
			for _, choice := range chunk.Choices {
				content := choice.Delta.Content
				if content == "" {
					continue
				}

				select {
				case results <- content:
				case <-ctx.Done():
					return ctx.Err()
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
		log.Printf("Failed to read stream: %v", err)
	}
	return nil
}

func main() {
	addr := fmt.Sprintf(":%s", grpcPort)
	log.Printf("Starting gRPC server at %s", addr)
	if err := grpc.StartServer(addr, &LLM{}); err != nil {
		panic(err)
	}
}
