package main

import (
	"flag"
	"fmt"
	"shyxy-net/pkg/grpc"
	"shyxy-net/pkg/grpc/base"
	"shyxy-net/pkg/llm/langchain"

	"os"
	pb "shyxy-net/pkg/grpc/proto"
)

var addr = flag.String("addr", ":50051", "grpc server address")

type LLM struct {
	base.SingleThread
	langchain *langchain.OpenAI
	model     string
}

func (llm *LLM) Load(opts *pb.ModelOptions) error {
	var err error
	oiToken := os.Getenv("OPENAI_API_TOKEN")
	if oiToken == "" {
		return fmt.Errorf("no openai api token provided")
	}
	llm.langchain, err = langchain.NewOpenAI(opts.Model, oiToken)
	llm.model = opts.Model
	return err
}

func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
	o := []langchain.PredictOption{
		langchain.SetModel(llm.model),
		langchain.SetMaxTokens(int(opts.Tokens)),
		langchain.SetTemperature(float64(opts.Temperature)),
		langchain.SetStopWords(opts.StopPrompts),
	}
	pred, err := llm.langchain.PredictOpenAI(opts.Prompt, o...)
	if err != nil {
		return "", err
	}
	return pred.Completion, nil
}

func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
	o := []langchain.PredictOption{
		langchain.SetModel(llm.model),
		langchain.SetMaxTokens(int(opts.Tokens)),
		langchain.SetTemperature(float64(opts.Temperature)),
		langchain.SetStopWords(opts.StopPrompts),
	}
	go func() {
		res, err := llm.langchain.PredictOpenAI(opts.Prompt, o...)
		if err != nil {
			fmt.Println("err:", err)
		}
		results <- res.Completion
		close(results)
	}()
	return nil
}

func main() {
	flag.Parse()
	if err := grpc.StartServer(*addr, &LLM{}); err != nil {
		panic(err)
	}
}
