package grpc

import (
	"context"
	"fmt"
	aiv1 "gitee.com/flycash/ai-gateway-demo/api/proto/gen/ai/gateway/v1"
	"gitee.com/flycash/ai-gateway-demo/internal/domain"
	"gitee.com/flycash/ai-gateway-demo/internal/service/llm"
	"github.com/ecodeclub/ekit/slice"
	status "google.golang.org/genproto/googleapis/rpc/status"
	"google.golang.org/grpc"
	"google.golang.org/grpc/codes"
	statusv1 "google.golang.org/grpc/status"
	"google.golang.org/protobuf/types/known/timestamppb"
	"time"
)

type LLMServiceServer struct {
	// 正常我都会组合这个
	aiv1.UnimplementedAIGatewayServer
	svc llm.Service
}

func (l *LLMServiceServer) SyncInvoke(ctx context.Context, request *aiv1.SyncRequest) (*aiv1.SyncResponse, error) {
	req := l.getReq(request)
	resp, err := l.svc.SyncInvoke(ctx, req)
	if err != nil {
		return &aiv1.SyncResponse{
			Error: &status.Status{
				Code:    int32(codes.Internal),
				Message: err.Error(),
			},
		}, err
	}
	return &aiv1.SyncResponse{
		ResponseId:  resp.ResponseId,
		Content:     resp.Content,
		CreatedTime: timestamppb.New(time.Unix(resp.CreatedTime, 0)),
		Usage: &aiv1.SyncResponse_Usage{
			PromptTokens:     resp.Usage.PromptTokens,
			CompletionTokens: resp.Usage.CompletionTokens,
			TotalTokens:      resp.Usage.TotalTokens,
		},
	}, nil

}

func (l *LLMServiceServer) StreamInvoke(request *aiv1.SyncRequest, g grpc.ServerStreamingServer[aiv1.StreamResponse]) error {
	req := l.getReq(request)

	// 调用业务层获取生成器（假设业务层返回一个消息通道和错误通道）
	evtChan, err := l.svc.StreamInvoke(g.Context(), req)
	if err != nil {
		return err
	}

	// 流式发送响应
	for {
		select {
		case evt, ok := <-evtChan:
			if !ok { // 通道关闭，正常结束
				return nil
			}
			// 遇到错误
			if evt.Err != nil {
				_ = g.Send(l.getContent(evt))
				return statusv1.Error(codes.Internal, err.Error())
			}
			// 发送事件
			if err := g.Send(l.getContent(evt)); err != nil {
				return statusv1.Error(codes.Internal, fmt.Sprintf("发送流数据失败: %v", err))
			}

		case <-g.Context().Done(): // 处理客户端取消
			return statusv1.Error(codes.Canceled, "客户端请求取消")
		}
	}
}

func (l *LLMServiceServer) AsyncInvoke(ctx context.Context, request *aiv1.AsyncRequest) (*aiv1.AsyncResponse, error) {
	syncReq := l.getReq(request.GetRequest())
	var callback domain.CallbackConfig
	if request.GetCallback() != nil {
		callback.NotifyUrl = request.GetCallback().GetNotifyUrl()
		callback.RetryPolicy = request.GetCallback().GetRetryPolicy()
		callback.MessageQueue = request.GetCallback().GetMessageQueue()
	}
	resp, err := l.svc.AsyncInvoke(ctx, domain.AsyncRequest{
		Req:            syncReq,
		CallbackConfig: callback,
	})
	if err != nil {
		return nil, err
	}
	return &aiv1.AsyncResponse{
		TaskId:                  resp.TaskID,
		EstimatedCompletionTime: timestamppb.New(time.Unix(resp.EstimatedCompletionTime, 0)),
		Status:                  aiv1.AsyncTaskStatus(resp.Status),
	}, nil
}

func (l *LLMServiceServer) GetAsyncResult(ctx context.Context, request *aiv1.AsyncResultRequest) (*aiv1.AsyncResultResponse, error) {
	//TODO implement me
	panic("implement me")
}

func (l *LLMServiceServer) BatchInvoke(request *aiv1.BatchRequest, g grpc.ServerStreamingServer[aiv1.BatchResponse]) error {
	//TODO implement me
	panic("implement me")
}

func NewLLMServiceServer(svc llm.Service) *LLMServiceServer {
	return &LLMServiceServer{
		svc: svc,
	}
}

func (l *LLMServiceServer) toDomainModelSpec(req *aiv1.ModelSpec) domain.ModelSpec {
	var spec domain.ModelSpec
	if req.GetParams() != nil {
		spec = domain.ModelSpec{
			Params: domain.ModelParams{
				Temperature: req.Params.Temperature,
				TopP:        req.Params.GetTopP(),
				MaxTokens:   req.Params.MaxTokens,
				ExtraParams: req.Params.ExtraParams,
			},
		}
	}
	pbRouting := req.GetRouting()
	if req.GetRouting() != nil {
		switch v := pbRouting.(type) {
		case *aiv1.ModelSpec_Provider:
			spec.Provider = v.Provider
			spec.RoutingType = "provider"
		case *aiv1.ModelSpec_Model:
			spec.ModelID = v.Model
			spec.RoutingType = "model"
		case *aiv1.ModelSpec_Strategy:
			spec.Strategy = domain.RoutingStrategy(v.Strategy)
			spec.RoutingType = "strategy"
		default:
			spec.RoutingType = "auto" // 默认策略
		}
	}

	return spec
}

func (l *LLMServiceServer) getContent(evt domain.StreamEvent) *aiv1.StreamResponse {
	if evt.Err != nil {
		return &aiv1.StreamResponse{
			Content: &aiv1.StreamResponse_Error{
				Error: &status.Status{
					Code:    500,
					Message: evt.Err.Error(),
				},
			},
		}
	}
	switch evt.Type {
	case domain.MessageStreamEvent:
		return &aiv1.StreamResponse{
			Content: &aiv1.StreamResponse_Delta{
				evt.Content,
			},
		}
	case domain.EndStreamEvent:
		return &aiv1.StreamResponse{
			Content: &aiv1.StreamResponse_Usage{
				Usage: &aiv1.SyncResponse_Usage{
					PromptTokens:     evt.Usage.PromptTokens,
					CompletionTokens: evt.Usage.CompletionTokens,
					TotalTokens:      evt.Usage.TotalTokens,
				},
			},
		}
	}
	return &aiv1.StreamResponse{}
}

func (l *LLMServiceServer) getReq(request *aiv1.SyncRequest) domain.SyncRequest {
	return domain.SyncRequest{
		Messages: slice.Map(request.Messages, func(idx int, src *aiv1.Message) domain.Msg {
			return domain.Msg{
				Role:    domain.RoleType(src.GetRole()),
				Content: src.GetContent(),
			}
		}),
		Model:     l.toDomainModelSpec(request.GetModel()),
		PromptKey: request.PromptKey,
		Variables: request.Variables,
		RequestId: request.RequestId,
	}
}
