package main

import (
	"context"
	"fmt"
	"io"
	"log"
	"net/http"

	"github.com/gin-gonic/gin"
	"github.com/tmc/langchaingo/llms"
	"github.com/tmc/langchaingo/llms/ollama"
)

func main() {
	router := gin.Default()

	router.GET("/ping", func(c *gin.Context) {
		c.JSON(http.StatusOK, gin.H{
			"message": "OK",
		})
	})

	router.POST("/chat", chat)

	router.Run(":8083")
}

type Prompt struct {
	Text string `json:"text"`
}

func chat(c *gin.Context) {
	var prompt Prompt
	if err := c.BindJSON(&prompt); err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
		return
	}

	var msgChan = make(chan string)

	go Generate(prompt.Text, msgChan)

	c.Stream(func(w io.Writer) bool {
		select {
		case msg, ok := <-msgChan:
			if !ok {
				// 如果msgChan被关闭，则结束流式传输
				return false
			}
			fmt.Print(msg)
			c.SSEvent("message", msg)
			return true
		case <-c.Done():
			// 如果客户端连接关闭，则结束流式传输
			return false
		}
	})
}

var llaClient *ollama.LLM

func init() {
	// Create a new Ollama instance
	// The model is set to "gemma:2b"
	// remote url is set to "http://192.168.152.129:11434"
	url := ollama.WithServerURL("http://localhost:11434")
	lla, err := ollama.New(ollama.WithModel("llama3:8b-instruct-fp16"), url)
	if err != nil {
		panic(err)
	}

	llaClient = lla

	fmt.Println("connect to ollama server successfully")
}

func Generate(prompt string, msgChan chan string) {
	// ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) // 设置超时
	// defer cancel()                                                          // 确保在函数结束时取消上下文

	callOp := llms.WithStreamingFunc(func(ctx context.Context, chunk []byte) error {
		select {
		case msgChan <- string(chunk):
		case <-ctx.Done():
			return ctx.Err() // 返回上下文的错误
		}
		return nil
	})

	_, err := llaClient.Call(context.Background(), prompt, callOp)
	if err != nil {
		log.Fatalf("Call failed: %v", err) // 处理错误，而不是 panic
	}

	// 确保在所有数据处理完毕后关闭 msgChan
	close(msgChan)
}

/***
编写一个网站
后端:
go-zero, gin, gorm, mysql
前端:
vue, element-ui, vite
部署:
docker, k8s

对标: kimi.ai
优势:
轻量, 无需翻墙, 无需部署, 无需配置
多模型选择, 无需自己训练模型
***/
