package llm

import (
	"context"
	"fmt"
	openai "github.com/sashabaranov/go-openai"
)

func NewChat(apiKey string) *Chat {
	return &Chat{
		client:   openai.NewClient(apiKey),
		endpoint: "",
		apiKey:   apiKey,
		model:    openai.GPT3Dot5Turbo,
	}
}

type Chat struct {
	client    *openai.Client
	endpoint  string
	apiKey    string
	model     string
	maxTokens int
}

func (r *Chat) Endpoint(endpoint string) {
	r.endpoint = endpoint
}
func (r *Chat) ApiKey(apiKey string) {
	r.apiKey = apiKey
}
func (r *Chat) Model(model string) {
	r.model = model
}

func (r *Chat) ChatCompletion(ctx context.Context, messages []openai.ChatCompletionMessage) *openai.ChatCompletionResponse {
	resp, err := r.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
		Model:    r.model,
		Messages: messages,
	})
	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		return nil
	}

	return &resp
}

func (r *Chat) ChatCompletionStream(ctx context.Context, messages []openai.ChatCompletionMessage) (*openai.ChatCompletionStream, error) {
	req := openai.ChatCompletionRequest{
		Model:     r.model,
		MaxTokens: r.maxTokens,
		Messages:  messages,
		Stream:    true,
	}

	return r.client.CreateChatCompletionStream(ctx, req)
}
func (r *Chat) Completion(ctx context.Context, message string) (openai.CompletionResponse, error) {
	req := openai.CompletionRequest{
		Model:     r.model,
		MaxTokens: r.maxTokens,
		Prompt:    message,
	}
	return r.client.CreateCompletion(ctx, req)
}
