package v6

import (
	"context"
	"encoding/base64"
	"encoding/json"
	"fmt"
	"io"
	v4 "mindonmap/api/v4"
	"mindonmap/models"
	"mindonmap/pkgs/e"
	"mindonmap/pkgs/setting"
	"net/http"
	"strings"
	"time"

	"github.com/gin-gonic/gin"
	"github.com/google/uuid"

	// "github.com/openai/openai-go"
	// "github.com/google/uuid"
	"github.com/sashabaranov/go-openai"
)

// DeepThoughtRequest 定义请求结构
type DeepThoughtRequest struct {
	Question string `json:"question" binding:"required"`
}

type CreateStatus struct {
	MindId string `json:"mind_id" binding:"required"`
	TaksId string `json:"task_id" binding:"required"`
}

// StreamResponse 定义流式响应结构
type StreamResponse struct {
	Code    int    `json:"code"`
	Message string `json:"message"`
	Data    string `json:"data"`
}

type MdMindmapAi struct {
	ID         uint      `gorm:"primaryKey"`
	TaskId     string    `gorm:"uniqueIndex;not null"` // 任务ID
	CreateTime time.Time `gorm:"not null"`             // 生成时间
	// 其他需要记录的字段（如用户ID、问题内容等）
	UserId   int    `gorm:"not null"`
	Question string `gorm:"type:text"`
	// 添加Token用量字段
	PromptTokens     int `gorm:"default:0"`
	CompletionTokens int `gorm:"default:0"`
	TotalTokens      int `gorm:"default:0"`
}
type DeepThoughtConfig struct {
	EnableDeepThinking bool    // 是否开启深度思考模式
	ThinkingDepth      int     // 思考深度级别 (1-10)
	Timeout            float64 // 超时时间(秒)
}
type StreamOptions struct {
	IncludeUsage bool `json:"include_usage,omitempty"` // 是否包含Token使用情况
	MaxRetries   int  `json:"max_retries,omitempty"`   // 最大重试次数
	Timeout      int  `json:"timeout,omitempty"`       // 超时时间（秒）
	// 其他可选参数...
}

func DeepThoughtHandler(c *gin.Context) {
	var req DeepThoughtRequest
	var newC = c
	if err := c.ShouldBindJSON(&req); err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"code": 400, "msg": "参数错误"})
		return
	}
	fmt.Print("1111111111111111111111111111111111111111")
	decodedBytes, err := base64.StdEncoding.DecodeString(req.Question)
	if err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"code": 400, "msg": "参数错误"})
		return
	}
	type Tips struct {
		Isroot int    `json:"isroot"`
		Tip1   string `json:"tip1"`
		Tip2   string `json:"tip2"`
		Lang   string `json:"lang"`
		Model  int    `json:"model"`
	}
	var tips Tips
	err = json.Unmarshal(decodedBytes, &tips)
	if err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"code": 400, "msg": "解析失败"})
		return
	}
	fmt.Println(tips.Tip1)
	return
	queryStr := ""
	apiKey := setting.ApiKey
	apiUrl := setting.ApiUrl
	apiType := setting.ApiType
	// apiModel := setting.ApiModel
	apiModel := "deepseek-r1-250120"
	// apiModelList := setting.ApiModelList
	// arrModel := strings.Split(apiModelList, ",")
	// if len(arrModel) > 0 {
	// 	apiModel = arrModel[tips.Model-1]
	// }
	// // return
	// if tips.Isroot == 1 || tips.Isroot == 0 {

	// 	queryStr = setting.AIRootNodeTemplate
	// 	// 配置文件中使用%%s占位符
	// 	queryStr = fmt.Sprintf(queryStr, tips.Tip1, tips.Lang)
	// 	println(queryStr)
	// } else if tips.Isroot == 2 {

	// 	queryStr = setting.AISubNodeTemplate
	// 	// 确保参数数量正确
	// 	queryStr = fmt.Sprintf(queryStr, tips.Tip1, tips.Tip2, tips.Lang)
	// 	println(queryStr)
	// } else {
	// 	c.JSON(http.StatusBadRequest, gin.H{"code": 400, "msg": "参数错误"})
	// 	return
	// }

	var isvip = IsVip(c)

	var user_status = 0 // 是否是会员
	if isvip {
		user_status = 1
	}

	Userid, err := v4.GetUserID(c)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"code": e.ERROR_AUTH_CHECK_TOKEN_TIMEOUT,
			"msg":  e.GetMsg(e.ERROR_AUTH_CHECK_TOKEN_TIMEOUT),
			"data": make(map[string]interface{}),
		})
		return
	}

	// 检查权益
	success, code := Getinterests(c, Userid)
	if !success {
		c.JSON(http.StatusOK, gin.H{
			"code": code,
			"msg":  e.GetMsg(code),
			"data": make(map[string]interface{}),
		})
		return
	}

	if apiKey == "" {
		c.JSON(http.StatusInternalServerError, gin.H{"code": 500, "msg": "API密钥未设置"})
		return
	}
	start_time := time.Now().Unix()
	data := make(map[string]interface{})
	// 1. 生成唯一任务ID（UUID）
	taskId := uuid.New().String()

	data["taskId"] = taskId
	data["user_id"] = Userid
	data["question"] = queryStr
	models.CreateAiLog(data)

	config := openai.DefaultConfig(apiKey)
	config.BaseURL = apiUrl
	client := openai.NewClientWithConfig(config)

	if apiType == "2" {
		request := openai.ModerationRequest{
			Model: "omni-moderation-latest",
			Input: tips.Tip1 + tips.Tip2,
		}
		response, err := client.Moderations(context.Background(), request)
		if err != nil {

			datass := make(map[string]interface{})
			datass["status"] = 2
			datass["error"] = "调用Moderation API失败: %v"
			models.UpdateAiLog(datass, taskId)
			fmt.Printf("调用Moderation API失败: %v", err)
		}
		// 获取flagged字段
		if len(response.Results) > 0 {
			flagged := response.Results[0].Flagged
			if flagged {
				mapdata := make(map[string]interface{})
				mapdata["status"] = 2
				models.UpdateAiLog(mapdata, taskId)
				c.JSON(http.StatusOK, gin.H{
					"code": 502,
					"msg":  "敏感词违规",
					"data": "",
				})
				return
			}
		}
	}

	queryStr = fmt.Sprintf("请先总结以下长文本的核心内容、主要观点及关键信息，再基于总结生成思维导图。思维导图需符合：\n\t内容紧扣长文本核心，层次清晰，涵盖主要逻辑和关键细节\n\t仅使用 Markdown 的标题（# 层级）和无序列表（- 条目）两种语法，支持多层嵌套（体现内容间的包含 / 从属关系）\n\t回答只需返回思维导图内容，无需额外说明\n\n\t结果直接以 %s 国家语言输出；\n\t长文本：%s", tips.Tip1, tips.Lang)
	fmt.Println(queryStr)
	return
	// 正确使用原始类型
	reqs := openai.ChatCompletionRequest{
		Model: apiModel,
		Messages: []openai.ChatCompletionMessage{
			{Role: "user", Content: queryStr},
		},
		//Messages: []openai.ChatCompletionMessage{
		//	{Role: "system", Content: "你是一个经验丰富的专家，擅长提供详细、全面且富有创意的回答。在回答时，请尽量提供多个不同的观点，使用具体的例子和丰富的细节来支持你的观点，并按照逻辑清晰的进行结构组织。"},
		//	{Role: "user", Content: "请详细分析 [具体主题]，包括其发展历程、现状、未来趋势、优势和劣势等方面。要求分点阐述，每个部分都要有详细的解释和具体的案例支持。"},
		//},
		MaxTokens:   8000,
		Stream:      true,
		Temperature: 0.5, // 降低创造性，加快响应
		// 关键：设置StreamOptions
		StreamOptions: &openai.StreamOptions{
			IncludeUsage: true, // 必须设置为true以获取Token用量
		},
		// 其他参数...
		// ChatTemplateKwargs: map[string]any{
		// 	"enable_thinking": false, // 禁用深度思考
		// },
	}

	stream, err := client.CreateChatCompletionStream(context.Background(), reqs)
	if err != nil {
		// fmt.Printf("创建流错误: %v\n", err)
		c.JSON(http.StatusBadRequest, gin.H{"code": 400, "msg": "创建流错误"})
		return
	}
	defer stream.Close()

	// 设置 SSE 响应头
	c.Writer.Header().Set("Content-Type", "text/event-stream")
	c.Writer.Header().Set("Cache-Control", "no-cache")
	c.Writer.Header().Set("Connection", "keep-alive")
	c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
	c.Writer.Header().Set("Access-Control-Expose-Headers", "X-Token, Authorization")
	c.Writer.Header().Set("Authorization", taskId)
	c.Writer.Header().Set("X-Task-ID", taskId)
	c.Status(http.StatusOK)

	// totalTokens :=0
	// 发送 SSE 流式数据
	for {
		recv, err := stream.Recv()
		if recv.Usage != nil { // 获取token用量
			// fmt.Printf("TotalTokens tokens: %d\n", recv.Usage.TotalTokens)
			// fmt.Printf("CompletionTokens tokens: %d\n", recv.Usage.CompletionTokens)
			// fmt.Printf("PromptTokens tokens: %d\n", recv.Usage.PromptTokens)

			userLog := models.GetUserLog(Userid)
			socre := userLog.AllCreadit
			after_credit := socre - (recv.Usage.TotalTokens+9)/10
			if after_credit < 0 {
				after_credit = 0
			}
			datas := make(map[string]interface{})
			datas["total_tokens"] = (recv.Usage.TotalTokens + 9) / 10
			datas["completion_tokens"] = (recv.Usage.CompletionTokens + 9) / 10
			datas["prompt_tokens"] = (recv.Usage.PromptTokens + 9) / 10
			datas["model_type"] = "GTP4.0"
			datas["end_time"] = time.Now().Unix()
			datas["lang_time"] = time.Now().Unix() - start_time
			datas["front_credit"] = socre
			datas["after_credit"] = after_credit
			datas["status"] = 1
			datas["user_status"] = user_status
			datas["lang"] = tips.Lang
			models.UpdateAiLog(datas, taskId)
			token := (recv.Usage.TotalTokens + 9) / 10
			ConsumptionRights(newC, taskId, token, Userid)
			models.DeleteCreadit(Userid, socre-(recv.Usage.TotalTokens+9)/10)
		}

		if err == io.EOF {
			fmt.Fprint(c.Writer, "data: [DONE]\n\n") // 结束标志
			c.Writer.(http.Flusher).Flush()
			return
		}
		if err != nil {
			fmt.Fprintf(c.Writer, "data: {\"error\":\"%s\"}\n\n", err.Error())
			c.Writer.(http.Flusher).Flush()
			return
		}

		// 检查 Choices 切片长度
		if len(recv.Choices) == 0 {
			continue // 跳过空 Choices 的响应
		}
		var content string
		choice := recv.Choices[0] // 安全获取第一个元素
		// 直接使用字符串，无需检查nil和解引用
		if choice.Delta.ReasoningContent != "" {
			content += choice.Delta.ReasoningContent
		}
		//if choice.Delta.Content != "" {
		//	content += choice.Delta.Content
		//}

		var sBuilder strings.Builder
		if choice.Delta.Content != "" {
			sBuilder.WriteString(choice.Delta.Content)
		}
		content = sBuilder.String()

		if content != "" {
			// 构建 SSE 数据块（注意转义双引号）
			jsonData, _ := json.Marshal(map[string]interface{}{
				"choices": recv.Choices,
				//"model":   recv.Model,
				//"data":    content, // 提取关键内容
			})
			fmt.Fprintf(c.Writer, "data: %s\n\n", jsonData)
			c.Writer.(http.Flusher).Flush() // 实时刷新
		}
	}
}
func AiCreateStatus(c *gin.Context) {
	var req CreateStatus
	if err := c.ShouldBindJSON(&req); err != nil {
		c.JSON(http.StatusBadRequest, gin.H{"code": 400, "msg": "参数错误"})
		return
	}

	Userid, err := v4.GetUserID(c)
	if err != nil {
		c.JSON(http.StatusOK, gin.H{
			"code": e.ERROR_AUTH_CHECK_TOKEN_TIMEOUT,
			"msg":  e.GetMsg(e.ERROR_AUTH_CHECK_TOKEN_TIMEOUT),
			"data": make(map[string]interface{}),
		})
		return
	}

	mindMapAi := models.ShowAiLog(req.TaksId)
	data := make(map[string]interface{})
	data["token"] = mindMapAi.TotalTokens
	data["lang"] = mindMapAi.Lang
	success := models.UpdateAiStatus(Userid, req.TaksId, req.MindId, mindMapAi.Lang)
	code := e.SUCCESS
	if success {
		c.JSON(http.StatusOK, gin.H{
			"code": code,
			"msg":  e.GetMsg(code),
			"data": data,
		})
	} else {
		code := e.ERROR
		c.JSON(http.StatusOK, gin.H{
			"code": code,
			"msg":  e.GetMsg(code),
		})
	}

}
