package base

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"gitee.com/flycash/ai-gateway-demo/internal/domain"
	"gitee.com/flycash/ai-gateway-demo/internal/repository"
	"github.com/cbroglie/mustache"
	"github.com/google/uuid"
	"github.com/openai/openai-go"
	"github.com/openai/openai-go/option"
	"github.com/openai/openai-go/packages/ssestream"
	"io"
	"log"
	"net/http"
	"time"
)

type Service struct {
	repo     repository.PromptConfigRepo
	taskRepo repository.AsyncTaskRepo
	client   *openai.Client
	model    string
}

func (a *Service) AsyncInvoke(ctx context.Context, req domain.AsyncRequest) (domain.AsyncResponse, error) {
	// 我要存储一下请求，然后开启 goroutine 执行，而后业务方通过查询接口，来获得最终的执行结果
	taskId := uuid.New().String()
	err := a.taskRepo.Save(ctx, domain.AsyncTask{
		TaskID: taskId,
		Req:    req,
	})
	if err != nil {
		return domain.AsyncResponse{}, err
	}
	// 异步请求回调地址
	go func() {
		err = a.asyncInvoke(taskId, req)
		if err != nil {
			log.Printf("请求回调地址失败 %v", err)
		}
	}()
	return domain.AsyncResponse{
		TaskID:                  taskId,
		Status:                  domain.PendingStatus,
		EstimatedCompletionTime: time.Now().Unix(),
	}, nil
}

func (a *Service) toAsyncInvoke(ctx context.Context, req domain.SyncRequest) (domain.SyncResponse, error) {
	// 异步怎么办？
	taskId := uuid.New().String()
	err := a.taskRepo.Save(ctx, domain.AsyncTask{
		TaskID: taskId,
		Req: domain.AsyncRequest{
			Req: req,
		},
	})
	if err != nil {
		return domain.SyncResponse{}, err
	}
	return domain.SyncResponse{TaskId: taskId}, nil
}

func (a *Service) syncToAsyncInvoke(ctx context.Context, taskId string, req domain.SyncRequest) error {
	resp, err := a.syncInvoke(ctx, req)
	defer func() {
		if err != nil {
			terr := a.taskRepo.Save(ctx, domain.AsyncTask{
				TaskID: taskId,
				Req:    domain.AsyncRequest{Req: req},
				Status: 2,
			})
			log.Printf("保存失败错误 %w", terr)
		}
	}()
	if err != nil {
		return err
	}
	return a.taskRepo.Save(ctx, domain.AsyncTask{
		TaskID: taskId,
		Req:    domain.AsyncRequest{Req: req},
		Status: 1,
		Resp:   resp,
	})
}

func (a *Service) isLimited(ctx context.Context) bool {
	isLimited, ok := ctx.Value("RateLimited").(bool)
	if ok && isLimited {
		return true
	}
	return false
}

func (a *Service) asyncInvoke(taskId string, req domain.AsyncRequest) error {
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
	defer cancel()
	resp, err := a.syncInvoke(ctx, req.Req)
	defer func() {
		if err != nil {
			terr := a.taskRepo.Save(ctx, domain.AsyncTask{
				TaskID: taskId,
				Req:    req,
				Status: 2,
			})
			log.Printf("保存失败错误 %w", terr)
		}
	}()
	if err != nil {
		return err
	}
	err = a.callbackSend(req, resp)
	if err != nil {
		return err
	}
	return a.taskRepo.Save(ctx, domain.AsyncTask{
		TaskID: taskId,
		Req:    req,
		Status: 1,
		Resp:   resp,
	})
}

func (a *Service) SyncInvoke(ctx context.Context, req domain.SyncRequest) (domain.SyncResponse, error) {
	if a.isLimited(ctx) {
		// 执行异步
		return a.toAsyncInvoke(ctx, req)
	} else {
		return a.syncInvoke(ctx, req)
	}
}

func (a *Service) syncInvoke(ctx context.Context, req domain.SyncRequest) (domain.SyncResponse, error) {
	msgs, err := a.buildMsgs(ctx, req)
	if err != nil {
		return domain.SyncResponse{}, err
	}
	chatCompletion, err := a.client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
		Messages: openai.F(msgs),
		Model:    openai.F(a.model),
	})
	if err != nil {
		return domain.SyncResponse{}, err
	}

	resp := domain.SyncResponse{
		ResponseId: chatCompletion.ID,
		Content:    chatCompletion.Choices[0].Message.Content,
		Usage: domain.SyncResponseUsage{
			PromptTokens:     int32(chatCompletion.Usage.PromptTokens),
			CompletionTokens: int32(chatCompletion.Usage.CompletionTokens),
			TotalTokens:      int32(chatCompletion.Usage.TotalTokens),
		},
		CreatedTime: chatCompletion.Created,
	}
	return resp, nil
}

func (a *Service) getPrompt(ctx context.Context, req domain.SyncRequest) (string, error) {
	template, err := a.repo.FindPromptByKey(ctx, req.PromptKey)
	if err != nil {
		return "", err
	}
	return mustache.Render(template.Template, req.Variables)
}

func (a *Service) StreamInvoke(ctx context.Context, req domain.SyncRequest) (chan domain.StreamEvent, error) {
	eventCh := make(chan domain.StreamEvent, 10)
	msgs, err := a.buildMsgs(ctx, req)
	if err != nil {
		return nil, err
	}

	params := openai.ChatCompletionNewParams{
		Messages: openai.F(msgs),
		Model:    openai.F(a.model),
		StreamOptions: openai.F(openai.ChatCompletionStreamOptionsParam{
			IncludeUsage: openai.F(true),
		}),
		Temperature: openai.F(float64(req.Model.Params.Temperature)),
		TopP:        openai.F(float64(req.Model.Params.TopP)),
		MaxTokens:   openai.F(int64(req.Model.Params.MaxTokens)),
	}
	go func() {
		newCtx, cancel := context.WithTimeout(context.Background(), time.Minute*10)
		defer cancel()
		stream := a.client.Chat.Completions.NewStreaming(newCtx, params)
		a.recv(eventCh, stream)
	}()
	return eventCh, nil
}

func NewService(apikey, url string, repo repository.PromptConfigRepo, taskRepo repository.AsyncTaskRepo, model string) *Service {
	client := openai.NewClient(
		option.WithBaseURL(url),
		option.WithAPIKey(apikey),
	)
	return &Service{
		client:   client,
		repo:     repo,
		taskRepo: taskRepo,
		model:    model,
	}
}

type Delta struct {
	Content          string `json:"content"`
	ReasoningContent string `json:"reasoning_content"`
}

func (a *Service) buildMsgs(ctx context.Context, req domain.SyncRequest) ([]openai.ChatCompletionMessageParamUnion, error) {
	ans := make([]openai.ChatCompletionMessageParamUnion, 0, len(req.Messages))
	for _, msg := range req.Messages {
		switch msg.Role {
		case domain.ChatMessageTypeUser:
			ans = append(ans, openai.UserMessage(msg.Content))
		case domain.ChatMessageTypeSystem:
			ans = append(ans, openai.SystemMessage(msg.Content))
		}
	}
	prompt, err := a.getPrompt(ctx, req)
	if err != nil {
		return nil, err
	}
	ans = append(ans, openai.UserMessage(prompt))

	return ans, nil
}

func (a *Service) recv(eventCh chan domain.StreamEvent,
	stream *ssestream.Stream[openai.ChatCompletionChunk]) {
	defer close(eventCh)
	acc := openai.ChatCompletionAccumulator{}
	for stream.Next() {
		chunk := stream.Current()
		acc.AddChunk(chunk)
		// 建议在处理完 JustFinished 事件后使用数据块
		if len(chunk.Choices) > 0 {
			// 说明没结束
			if chunk.Choices[0].FinishReason == "" {
				var delta Delta
				err := json.Unmarshal([]byte(chunk.Choices[0].Delta.JSON.RawJSON()), &delta)
				if err != nil {
					eventCh <- domain.StreamEvent{
						Type: domain.ErrorStreamEvent,
						Err:  err,
					}
					return
				}
				var content string
				if delta.ReasoningContent != "" {
					content = delta.ReasoningContent
				}
				if delta.Content != "" {
					content = delta.Content
				}
				eventCh <- domain.StreamEvent{
					Type:    domain.MessageStreamEvent,
					Content: content,
				}
			}
		}
	}
	if stream.Err() != nil {
		eventCh <- domain.StreamEvent{
			Type: domain.ErrorStreamEvent,
			Err:  stream.Err(),
		}
		return
	}
	var ans string
	if len(acc.Choices) > 0 {
		ans = acc.Choices[0].Message.Content
	}
	eventCh <- domain.StreamEvent{
		Type: domain.EndStreamEvent,
		Usage: domain.SyncResponseUsage{
			PromptTokens:     int32(acc.Usage.PromptTokens),
			CompletionTokens: int32(acc.Usage.CompletionTokens),
			TotalTokens:      int32(acc.Usage.TotalTokens),
		},
		Content: ans,
	}

}

type result struct {
	Code int    `json:"code"`
	Msg  string `json:"msg"`
}

func (a *Service) callbackSend(asyncReq domain.AsyncRequest, params domain.SyncResponse) error {

	jsonData, err := json.Marshal(params)
	if err != nil {
		return fmt.Errorf("序列化数据失败 %w", err)
	}

	// 2. 创建请求
	req, err := http.NewRequest(http.MethodPost, asyncReq.CallbackConfig.NotifyUrl, bytes.NewBuffer(jsonData))
	if err != nil {
		return fmt.Errorf("创建请求失败 %w", err)
	}

	// 3. 设置请求头
	req.Header.Set("Content-Type", "application/json")

	// 4. 发送请求
	client := &http.Client{}
	resp, err := client.Do(req)
	if err != nil {
		return fmt.Errorf("发送请求失败 %w", err)
	}
	defer resp.Body.Close()

	// 5. 处理响应
	body, _ := io.ReadAll(resp.Body)
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("解析响应失败 %w", err)
	}
	fmt.Printf("响应状态码: %d\n响应内容: %s\n", resp.StatusCode, string(body))
	var res result
	err = json.Unmarshal(body, &res)
	if err != nil {
		return fmt.Errorf("序列化响应失败 %w", err)
	}
	return nil
}
