package media

import (
	"beesoft.ink/application/media/app/model/media"
	"context"
	"errors"
	"fmt"
	"github.com/sashabaranov/go-openai/jsonschema"
)

var Text text

type text struct{}

func (r *text) Completion(ctx context.Context, req *media.TextRequest) (*media.TextResponse, error) {
	response := new(media.TextResponse)

	client := openai.NewClient(Org.Openai.Apikey)
	res, err := client.CreateCompletion(
		context.Background(),
		openai.CompletionRequest{
			Model:     openai.GPT3Ada,
			MaxTokens: 5,
			Prompt:    req.Prompt,
		},
	)
	if err != nil {
		return response, errors.New(fmt.Sprintf("Completion error: %v", err))
	}

	response.Content = res.Choices[0].Text

	return response, nil
}
func (r *text) CompletionTool(ctx context.Context, req *media.TextRequest) (*media.TextResponse, error) {
	response := new(media.TextResponse)

	params := jsonschema.Definition{
		Type: jsonschema.Object,
		Properties: map[string]jsonschema.Definition{
			"location": {
				Type:        jsonschema.String,
				Description: "The city and state, e.g. San Francisco, CA",
			},
			"unit": {
				Type: jsonschema.String,
				Enum: []string{"celsius", "fahrenheit"},
			},
		},
		Required: []string{"location"},
	}
	f := openai.FunctionDefinition{
		Name:        "get_current_weather",
		Description: "Get the current weather in a given location",
		Parameters:  params,
	}
	t := openai.Tool{
		Type:     openai.ToolTypeFunction,
		Function: &f,
	}

	dialogue := []openai.ChatCompletionMessage{
		{
			Role:    openai.ChatMessageRoleUser,
			Content: req.Prompt, // "What is the weather in Boston today?"
		},
	}

	client := openai.NewClient(Org.Openai.Apikey)
	resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
		Model:    openai.GPT4TurboPreview,
		Messages: dialogue,
		Tools:    []openai.Tool{t},
	})
	if err != nil || len(resp.Choices) != 1 {
		return response, errors.New(fmt.Sprintf("Completion error: err:%v len(choices):%v\n", err, len(resp.Choices)))
	}

	msg := resp.Choices[0].Message
	if len(msg.ToolCalls) != 1 {
		return response, errors.New(fmt.Sprintf("Completion error: len(toolcalls): %v\n", len(msg.ToolCalls)))
	}

	dialogue = append(dialogue, msg, openai.ChatCompletionMessage{
		Role:       openai.ChatMessageRoleTool,
		Content:    "Sunny and 80 degrees.",
		Name:       msg.ToolCalls[0].Function.Name, // msg.ToolCalls[0].Function.Arguments
		ToolCallID: msg.ToolCalls[0].ID,
	})

	// resp, err = client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
	//	Model:    openai.GPT4TurboPreview,
	//	Messages: dialogue,
	//	Tools:    []openai.Tool{t},
	// })
	// if err != nil || len(resp.Choices) != 1 {
	//	return response, errors.New(fmt.Sprintf("2nd completion error: err:%v len(choices):%v\n", err, len(resp.Choices)))
	// }

	response.Content = resp.Choices[0].Message.Content

	return response, nil
}

func (r *text) ChatCompletion(ctx context.Context, req *media.TextRequest) (*media.TextResponse, error) {
	response := new(media.TextResponse)

	client := openai.NewClient(Org.Openai.Apikey)
	res, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
		Model: openai.GPT3Dot5Turbo,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleSystem,
				Content: req.Prompt,
			},
			{
				Role:    openai.ChatMessageRoleUser,
				Content: req.Prompt,
			},
		},
	})
	if err != nil {
		return response, errors.New(fmt.Sprintf("ChatCompletion error: %v", err))
	}

	response.Content = res.Choices[0].Message.Content

	return response, nil
}
