package ai

import (
	"context"
	"gitee.com/CrazyMage/blog-service/global"
	"github.com/cloudwego/eino/components/model"
	"github.com/cloudwego/eino/schema"
	"io"
	"log"
)

type ChatModel struct {
	ctx context.Context
	llm model.ToolCallingChatModel
}

func NewChatModel(ctx context.Context) *ChatModel {
	return &ChatModel{
		ctx: ctx,
		llm: CreateOllamaChatModel(ctx),
	}
}

// BlockingChat 非流式调用
func (c *ChatModel) BlockingChat(in []*schema.Message) *schema.Message {
	result, err := c.llm.Generate(c.ctx, in)
	if err != nil {
		global.Logger.Fatalf(c.ctx, "llm generate failed: %v", err)
	}
	return result
}

// StreamingChat 流式调用
func (c *ChatModel) StreamingChat(in []*schema.Message) *schema.StreamReader[*schema.Message] {
	result, err := c.llm.Stream(c.ctx, in)
	if err != nil {
		global.Logger.Fatalf(c.ctx, "llm generate failed: %v", err)
	}
	return result
}

func reportStream(sr *schema.StreamReader[*schema.Message]) {
	defer sr.Close()
	i := 0
	for {
		message, err := sr.Recv()
		if err == io.EOF {
			return
		}
		if err != nil {
			log.Fatalf("recv failed: %v", err)
		}
		log.Printf("message[%d]: %+v\n", i, message)
		i++
	}
}
