package ai

import (
	"app/pkg/log"
	"bufio"
	"bytes"
	"crypto/tls"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"net/http"
	"strings"
	"time"

	"github.com/gin-gonic/gin"
	"github.com/go-resty/resty/v2"
	"go.uber.org/zap"
	"golang.org/x/net/http2"
)

var OpenaiTransport = &http2.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}

type Openai struct {
	apikey  string
	baseUrl string
	model   string
}

// --------------------------------------------------------------------------------------------------------------------

func (ai Openai) StreamResponse(ctx *gin.Context, body io.ReadCloser) string {
	w := ctx.Writer
	var builder strings.Builder
	//设置stream响应头
	w.Header().Set("Content-Type", "text/event-stream")
	w.Header().Set("Cache-Control", "no-cache")
	w.Header().Set("Connection", "keep-alive")
	w.WriteHeader(http.StatusOK)

	var reader = bufio.NewReader(body)
	var headerData = []byte("data: ")
	var ticker = time.NewTicker(25 * time.Millisecond)
	defer ticker.Stop()
loop:
	for {
		select {
		case <-ctx.Request.Context().Done():
			break loop
		case <-ticker.C:
			rawLine, readErr := reader.ReadBytes('\n')
			if readErr != nil {
				continue
			}
			noSpaceLine := bytes.TrimSpace(rawLine)
			if !bytes.HasPrefix(noSpaceLine, headerData) {
				continue
			}
			noPrefixLine := bytes.TrimPrefix(noSpaceLine, headerData)
			//读取完了
			if string(noPrefixLine) == "[DONE]" {
				break loop
			}
			var occsr OpenaiChatCompletionStreamResponse
			//解析出错，跳过
			if err := json.Unmarshal(noPrefixLine, &occsr); err != nil || len(occsr.Choices) == 0 {
				continue
			}
			var content = occsr.Choices[0].Delta.Content
			if content != "" {
				_, _ = w.WriteString(content)
				w.(http.Flusher).Flush()
				builder.WriteString(content)
			}
		}
	}
	return builder.String()
}

// 组装消息,对消息长度和token的长度进行控制
func (ai Openai) buildMessage(model string, messages []*ChatMessage, ml int, tl int) (m []ConversationMessage, t int) {
	//TODO 使用tiktoken对消息进行长度控制，选择合适的模型进行聊天
	return
}

func (ai Openai) Client() *resty.Client {
	return resty.New().
		SetTimeout(time.Second * 60).
		SetLogger(zap.NewNop().Sugar()).
		SetDoNotParseResponse(true).
		SetTransport(OpenaiTransport).
		SetRetryCount(1).
		SetRetryWaitTime(3 * time.Second).
		AddRetryCondition(func(response *resty.Response, err error) bool {
			if err != nil {
				return strings.Contains(strings.ToLower(err.Error()), "timeout")
			}
			return response.StatusCode() == http.StatusTooManyRequests
		})
}

// --------------------------------------------------------------------------------------------------------------------

type ConversationMessage struct {
	Role    string `json:"role"`
	Content string `json:"content"`
	Name    string `json:"-"`
}

// ChatCompletionRequest represents a request structure for chat completion API.
type OpenaiChatCompletionRequest struct {
	Model            string                `json:"model"`
	Messages         []ConversationMessage `json:"messages"`
	MaxTokens        int                   `json:"max_tokens,omitempty"`
	Temperature      float32               `json:"temperature,omitempty"`
	TopP             float32               `json:"top_p,omitempty"`
	N                int                   `json:"n,omitempty"`
	Stream           bool                  `json:"stream,omitempty"`
	Stop             []string              `json:"stop,omitempty"`
	PresencePenalty  float32               `json:"presence_penalty,omitempty"`
	FrequencyPenalty float32               `json:"frequency_penalty,omitempty"`
	LogitBias        map[string]int        `json:"logit_bias,omitempty"`
	User             string                `json:"user,omitempty"`
}

type OpenaiChatCompletionStreamResponse struct {
	ID      string                             `json:"id"`
	Object  string                             `json:"object"`
	Created int64                              `json:"created"`
	Model   string                             `json:"model"`
	Choices []OpenaiChatCompletionStreamChoice `json:"choices"`
}

type OpenaiChatCompletionStreamChoice struct {
	Index        int                                   `json:"index"`
	Delta        OpenaiChatCompletionStreamChoiceDelta `json:"delta"`
	FinishReason string                                `json:"finish_reason"`
}
type OpenaiChatCompletionStreamChoiceDelta struct {
	Content string `json:"content,omitempty"`
	Role    string `json:"role,omitempty"`
}

type OpenaiAPIError struct {
	Error struct {
		Code           any     `json:"code,omitempty"`
		Message        string  `json:"message"`
		Param          *string `json:"param,omitempty"`
		Type           string  `json:"type"`
		HTTPStatusCode int     `json:"-"`
	} `json:"error,omitempty"`
}

// --------------------------------------------------------------------------------------------------------------------

type OpenaiAuzre struct {
	Openai
}

func NewOpenaiAuzre(url, model, apikey string) *OpenaiAuzre {
	return &OpenaiAuzre{Openai{url, model, apikey}}
}

func (ai OpenaiAuzre) Chat(ctx *gin.Context, msg []*ChatMessage) (string, error) {
	var max = 4096
	message, l := ai.buildMessage(ai.model, msg, 10, 3500)
	var (
		err      error
		response *resty.Response
		body     = OpenaiChatCompletionRequest{
			MaxTokens: max - l,
			Model:     ai.model,
			Messages:  message,
			Stream:    true,
		}
	)
	//请求初始化
	request := ai.Client().R().
		SetHeader("Content-Type", "application/json; charset=utf-8").
		SetHeader("Accept", "application/json; charset=utf-8").
		SetHeader("Accept", "text/event-stream").
		SetHeader("Cache-Control", "no-cache").
		SetHeader("api-key", ai.apikey).
		SetBody(body)

	var enableTrace bool
	//enableTrace = true
	//https://infinigoopenai.openai.azure.com/openai/deployments/chatgpt35-0301/chat/completions?api-version=2023-05-15"
	url := fmt.Sprintf("%s/openai/deployments/%s%s?api-version=%s", ai.baseUrl, ai.model, "/chat/completions", "2023-05-15")
	if enableTrace {
		response, err = request.EnableTrace().Post(url)
	} else {
		response, err = request.Post(url)
	}
	if err != nil {
		return "", errors.New("服务异常")
	}
	//获取原始响应体
	var status = response.StatusCode()
	rawBody := response.RawBody()
	defer rawBody.Close()
	//请求出错
	if status < 200 || status >= 400 {
		respBody, _ := io.ReadAll(rawBody)
		log.Logger("app").Errorf(ctx, "请求接口[%d]，错误信息: %s", status, respBody) //日志记录接口错误
		return "", errors.New("服务异常")
	}
	return ai.StreamResponse(ctx, rawBody), nil
}

// --------------------------------------------------------------------------------------------------------------------

type OpenaiOfficial struct {
	Openai
}

func NewOpenaiOfficial(url, model, apikey string) *OpenaiOfficial {
	return &OpenaiOfficial{Openai{url, model, apikey}}
}

func (ai OpenaiOfficial) Chat(ctx *gin.Context, msg []*ChatMessage) (string, error) {
	var max = 4096
	message, l := ai.buildMessage(ai.model, msg, 10, 3500)
	var (
		err      error
		response *resty.Response
		body     = OpenaiChatCompletionRequest{
			MaxTokens: max - l,
			Model:     ai.model,
			Messages:  message,
			Stream:    true,
		}
	)
	//请求初始化
	request := ai.Client().R().
		SetHeader("Content-Type", "application/json; charset=utf-8").
		SetHeader("Accept", "application/json; charset=utf-8").
		SetHeader("Accept", "text/event-stream").
		SetHeader("Cache-Control", "no-cache").
		SetHeader("Authorization", fmt.Sprintf("Bearer %s", ai.apikey)).
		SetBody(body)

	var enableTrace bool
	//enableTrace = true
	url := fmt.Sprintf("%s%s", ai.baseUrl, "/chat/completions")
	if enableTrace {
		response, err = request.EnableTrace().Post(url)
	} else {
		response, err = request.Post(url)
	}
	if err != nil {
		return "", errors.New("服务异常")
	}
	//获取原始响应体
	var status = response.StatusCode()
	rawBody := response.RawBody()
	defer rawBody.Close()
	//请求出错
	if status < 200 || status >= 400 {
		respBody, _ := io.ReadAll(rawBody)
		log.Logger("app").Errorf(ctx, "请求接口[%d]，错误信息: %s", status, respBody) //日志记录接口错误
		return "", errors.New("服务异常")
	}
	return ai.StreamResponse(ctx, rawBody), nil
}
