package ollama

import (
	"context"
	"fmt"
	"gitee.com/mcp-office/agent/llm"
	"io"
	"net/http"
)

// https://github.com/ollama/ollama/blob/main/docs/api.md

type OllamaProvider struct {
	llm.Provider
}

type ChatResponse struct {
	Model              string                    `json:"model"`
	CreatedAt          string                    `json:"created_at"`
	Message            llm.ChatCompletionMessage `json:"message"`
	DoneReason         string                    `json:"done_reason"`
	Done               bool                      `json:"done"`
	TotalDuration      int64                     `json:"total_duration"`
	LoadDuration       int64                     `json:"load_duration"`
	PromptEvalCount    int                       `json:"prompt_eval_count"`
	PromptEvalDuration int64                     `json:"prompt_eval_duration"`
	EvalCount          int                       `json:"eval_count"`
	EvalDuration       int64                     `json:"eval_duration"`
}

// NewQWenProvider 创建一个新的实例
func NewOllamaProvider(apiKey string) *OllamaProvider {
	var qwen OllamaProvider
	qwen.BaseUrl = "http://localhost:11434"
	qwen.ApiKey = apiKey
	return &qwen
}

func (p *OllamaProvider) ChatCompletion(ctx context.Context, req llm.ChatCompletionRequest) (string, error) {
	request, err := llm.NewRequestBuilder(p.ApiKey).
		SetMethod(http.MethodPost).
		SetBaseUrl(p.BaseUrl).
		SetPath("/api/chat").
		SetBody(req).
		Build(ctx)
	if err != nil {
		return "", err
	}
	resp, err := http.DefaultClient.Do(request)
	if err != nil || resp.StatusCode != http.StatusOK {
		return "", err
	}
	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("读取响应体失败: %v", err)
	}
	//var result ChatCompletion
	//err = json.Unmarshal(body, &result)
	//if err != nil {
	//	return "", fmt.Errorf("反序列化失败: %v", err)
	//}
	return string(body), nil
}
