package chat

import (
	"bufio"
	"encoding/json"
	"fmt"
	chatModel "gxcvu-ai/app/model/chat"
	"io"
	"net/http"
	"strings"
	"time"

	"github.com/gin-gonic/gin"
)

const (
	api_key = "sk-zA7xbusaoI"
)

type Tongyi struct {
}

type Input struct {
	Messages []Message `json:"messages"`
}

type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}
type Parameter struct {
	ResultFormat string  `json:"result_format,omitempty"` // 返回数据格式：text/message
	Seed         int64   `json:"seed,omitempty"`          // 生成时使用的随机数种子，用户控制模型生成内容的随机性。seed支持无符号64位整数，默认值为1234
	MaxTokens    int64   `json:"max_tokens,omitempty"`    // 用于限制模型生成token的数量，max_tokens设置的是生成上限，并不表示一定会生成这么多的token数量。其中qwen-turbo最大值和默认值为1500，qwen-max、qwen-max-1201 、qwen-max-longcontext 和 qwen-plus最大值和默认值均为2000。
	TopP         float64 `json:"top_p,omitempty"`
	Temperature  float64 `json:"temperature,omitempty"`
	EnableSearch bool    `json:"enable_search,omitempty"` // 是否开启联网搜索
}

// 通义数据请求结构体
type TongyiRequest struct {
	Model     string    `json:"model"`
	Input     Input     `json:"input"`
	Parameter Parameter `json:"parameter,omitempty"`
}

// 通义响应结构体
type Output struct {
	FinishReason string `json:"finish_reason,omitempty"`
	Text         string `json:"text,omitempty"`
}

type Usage struct {
	OutputToken int `json:"output_tokens,omitempty"`
	InputToken  int `json:"input_tokens,omitempty"`
}

type TongyiReqponse struct {
	Output    Output `json:"output,omitempty"`
	Usage     Usage  `json:"usage,omitempty"`
	RequestId string `json:"request_id,omitempty"`
}

func formatChatMessage(data []chatModel.Message) []Message {
	chatList := []Message{
		{
			Role:    "system",
			Content: "You are a helpful assistant.",
		},
	}
	for _, v := range data {
		if v.Role == "user" {
			v.Role = "user"
		} else {
			v.Role = "assistant"
		}
		chatList = append(chatList, Message{
			Role:    v.Role,
			Content: v.Content,
		})
	}
	return chatList
}

func (e Tongyi) NewChat(data []chatModel.Message, c *gin.Context) {
	url := "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"

	payload := TongyiRequest{
		Model: "qwen-max",
		Input: Input{
			Messages: formatChatMessage(data),
		},
	}

	postForm, _ := json.Marshal(payload)
	req, _ := http.NewRequest("POST", url, strings.NewReader(string(postForm)))
	req.Header.Add("Authorization", "Bearer "+api_key)
	req.Header.Add("Content-Type", "application/json")
	req.Header.Add("X-DashScope-SSE", "enable")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
		return
	}
	defer resp.Body.Close()
	// 转发 SSE 到客户端
	_, ok := c.Writer.(http.Flusher)
	if !ok {
		http.Error(c.Writer, "Streaming unsupported!", http.StatusInternalServerError)
		return
	}
	// 创建一个新的读取器和写入器，用于复制数据
	defer resp.Body.Close()
	reader := bufio.NewReader(resp.Body)
	for {
		line, _, err := reader.ReadLine()
		if err == io.EOF {
			break
		}

		data := strings.TrimPrefix(string(line), "data:")
		var jsonData TongyiReqponse
		err = json.Unmarshal([]byte(data), &jsonData)
		if err != nil {
			fmt.Println("Error unmarshaling JSON:", err)
			continue
		}
		// 处理解析后的JSON数据...
		// fmt.Printf("Parsed JSON data: %+v\n", jsonData)
		chatResp := ChatResponse{
			IsEnd:   jsonData.Output.FinishReason == "stop",
			Result:  jsonData.Output.Text,
			Id:      jsonData.RequestId,
			Object:  "assistant",
			Created: int(time.Now().Unix()),
			Model:   "tongyi",
		}
		c.SSEvent("result", chatResp)
		c.Writer.Flush()
	}

}
