package chatimpl

import (
	"chatplus/core/types"
	"chatplus/store/model"
	"chatplus/store/vo"
	"chatplus/utils"
	"context"
	"crypto/tls"
	"fmt"
	"github.com/google/generative-ai-go/genai"
	"google.golang.org/api/iterator"
	"google.golang.org/api/option"
	"html/template"
	"net/http"
	"net/url"
	"strings"
	"time"
	"unicode/utf8"
)

// 整合APIKey和代理的 Transport
type APIKeyProxyTransport struct {
	// APIKey is the API Key to set on requests.
	APIKey string

	// Transport is the underlying HTTP transport.
	// If nil, http.DefaultTransport is used.
	Transport http.RoundTripper

	// ProxyURL is the URL of the proxy server. If empty, no proxy is used.
	ProxyURL string
}

func (t *APIKeyProxyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
	rt := t.Transport
	if rt == nil {
		rt = http.DefaultTransport
	}

	// 如果提供了 ProxyURL，则对 Transport 设置代理
	if t.ProxyURL != "" {
		proxyURL, err := url.Parse(t.ProxyURL)
		if err != nil {
			return nil, err
		}
		if transport, ok := rt.(*http.Transport); ok {
			// 只有当 rt 为 *http.Transport 类型时，才设置代理
			transport.Proxy = http.ProxyURL(proxyURL)
			transport.TLSClientConfig = &tls.Config{
				InsecureSkipVerify: true,
			}
		} else {
			// 如果 rt 不是 *http.Transport 类型，则创建一个新的带代理的 http.Transport
			rt = &http.Transport{
				Proxy: http.ProxyURL(proxyURL),
				TLSClientConfig: &tls.Config{
					InsecureSkipVerify: true,
				},
			}
		}
	}

	// 克隆请求以避免修改原始请求
	newReq := *req
	args := newReq.URL.Query()
	args.Set("key", t.APIKey)
	newReq.URL.RawQuery = args.Encode()

	// 执行 HTTP 请求，并处理可能的错误
	resp, err := rt.RoundTrip(&newReq)
	if err != nil {
		// 返回网络请求中的错误
		return nil, fmt.Errorf("error during round trip: %v", err)
	}

	return resp, nil
}

func (h *ChatHandler) sendGeminiMessage(
	chatCtx []types.Message,
	req types.ApiRequest,
	userVo vo.User,
	ctx context.Context,
	session *types.ChatSession,
	role model.ChatRole,
	prompt string,
	ws *types.WsClient) error {
	promptCreatedAt := time.Now() // 记录提问时间
	start := time.Now()
	var apiKey = model.ApiKey{}
	res := h.DB.Where("platform = ?", session.Model.Platform).Where("type = ?", "chat").Where("enabled = ?", true).Order("last_used_at ASC").First(&apiKey)
	if res.Error != nil {
		utils.ReplyMessage(ws, "抱歉😔😔😔，系统已经没有可用的 API KEY，请联系管理员！")
		return nil
	}
	// 更新 API KEY 的最后使用时间
	h.DB.Model(&apiKey).UpdateColumn("last_used_at", time.Now().Unix())

	//https://github.com/google/generative-ai-go/issues/17
	var c *http.Client
	var proxyURL string
	if h.App.Config.ProxyURL != "" { // 使用代理
		proxyURL = h.App.Config.ProxyURL
		c = &http.Client{Transport: &APIKeyProxyTransport{
			APIKey:    apiKey.Value,
			Transport: nil,
			ProxyURL:  proxyURL,
		}}
	} else {
		c = http.DefaultClient
	}
	client, err := genai.NewClient(ctx, option.WithHTTPClient(c), option.WithAPIKey(apiKey.Value))
	if err != nil {
		utils.ReplyMessage(ws, "创建请求gemini 的 http client 失败！")
		return nil
	}
	m := req.Model
	gemini_model := client.GenerativeModel(m)
	gemini_model.SetTemperature(0.9)
	gemini_model.SetTopP(0.2)
	gemini_model.SetTopK(1)
	gemini_model.SetMaxOutputTokens(4000)
	gemini_model.SetCandidateCount(1)

	//https://github.com/google/generative-ai-go/issues/35
	gemini_model.SafetySettings = []*genai.SafetySetting{
		{
			Category:  genai.HarmCategoryDangerousContent,
			Threshold: genai.HarmBlockNone,
		},
		{
			Category:  genai.HarmCategorySexuallyExplicit,
			Threshold: genai.HarmBlockNone,
		},
		{
			Category:  genai.HarmCategoryHateSpeech,
			Threshold: genai.HarmBlockNone,
		},
		{
			Category:  genai.HarmCategoryHarassment,
			Threshold: genai.HarmBlockNone,
		},

		//{
		//	Category:  genai.HarmCategoryDangerous,
		//	Threshold: genai.HarmBlockNone,
		//},
		//{
		//	Category:  genai.HarmCategoryMedical,
		//	Threshold: genai.HarmBlockNone,
		//},
		//{
		//	Category:  genai.HarmCategorySexual,
		//	Threshold: genai.HarmBlockNone,
		//},
		//{
		//	Category:  genai.HarmCategoryViolence,
		//	Threshold: genai.HarmBlockNone,
		//},
		//{
		//	Category:  genai.HarmCategoryToxicity,
		//	Threshold: genai.HarmBlockNone,
		//},
		//{
		//	Category:  genai.HarmCategoryDerogatory,
		//	Threshold: genai.HarmBlockNone,
		//},
		//{
		//	Category:  genai.HarmCategoryUnspecified,
		//	Threshold: genai.HarmBlockNone,
		//},
	}
	chatSession := gemini_model.StartChat()
	iter := chatSession.SendMessageStream(ctx, genai.Text(prompt))
	logger.Info("gemini client 请求完成，耗时：", time.Now().Sub(start))
	var message = types.Message{}
	var contents = make([]string, 0)
	var content string
	replyCreatedAt := time.Now() // 记录回复时间
	for {
		nextResp, err := iter.Next()
		if err == iterator.Done {
			break
		}
		if err != nil {
			if strings.Contains(err.Error(), "context canceled") {
				logger.Info("用户取消了请求：", prompt)
				return nil
			} else if strings.Contains(err.Error(), "no available key") {
				utils.ReplyMessage(ws, "抱歉😔😔😔，系统已经没有可用的 API KEY，请联系管理员！")
				return nil
			} else {
				logger.Error(err)
			}

			utils.ReplyMessage(ws, ErrorMsg)
			utils.ReplyMessage(ws, ErrImg)
			return err
		}
		// 处理代码换行
		if len(content) == 0 {
			utils.ReplyChunkMessage(ws, types.WsMessage{Type: types.WsStart})
		}

		for _, cand := range nextResp.Candidates {
			if cand.Content != nil {
				for _, part := range cand.Content.Parts {
					content = fmt.Sprint(part)
				}
			}
		}
		contents = append(contents, content)
		utils.ReplyChunkMessage(ws, types.WsMessage{
			Type:    types.WsMiddle,
			Content: utils.InterfaceToString(content),
		})
	}
	if len(contents) > 0 {

		if message.Role == "" {
			message.Role = "assistant"
		}
		message.Content = strings.Join(contents, "")
		useMsg := types.Message{Role: "user", Content: prompt}

		// 更新上下文消息，如果是调用函数则不需要更新上下文
		if h.App.SysConfig.EnableContext {
			chatCtx = append(chatCtx, useMsg)  // 提问消息
			chatCtx = append(chatCtx, message) // 回复消息
			h.App.ChatContexts.Put(session.ChatId, chatCtx)
		}

		// 追加聊天记录
		// for prompt
		promptToken, err := utils.CalcTokens(prompt, req.Model)
		if err != nil {
			logger.Error(err)
		}
		historyUserMsg := model.ChatMessage{
			UserId:     userVo.Id,
			ChatId:     session.ChatId,
			RoleId:     role.Id,
			Type:       types.PromptMsg,
			Icon:       userVo.Avatar,
			Content:    template.HTMLEscapeString(prompt),
			Tokens:     promptToken,
			UseContext: true,
			Model:      req.Model,
		}
		historyUserMsg.CreatedAt = promptCreatedAt
		historyUserMsg.UpdatedAt = promptCreatedAt
		res := h.DB.Save(&historyUserMsg)
		if res.Error != nil {
			logger.Error("failed to save prompt history message: ", res.Error)
		}

		// 计算本次对话消耗的总 token 数量
		replyTokens, _ := utils.CalcTokens(message.Content, req.Model)
		replyTokens += getTotalTokens(req)

		historyReplyMsg := model.ChatMessage{
			UserId:     userVo.Id,
			ChatId:     session.ChatId,
			RoleId:     role.Id,
			Type:       types.ReplyMsg,
			Icon:       role.Icon,
			Content:    message.Content,
			Tokens:     replyTokens,
			UseContext: true,
			Model:      req.Model,
		}
		historyReplyMsg.CreatedAt = replyCreatedAt
		historyReplyMsg.UpdatedAt = replyCreatedAt
		res = h.DB.Create(&historyReplyMsg)
		if res.Error != nil {
			logger.Error("failed to save reply history message: ", res.Error)
		}

		// 更新用户算力
		h.subUserPower(userVo, session, promptToken, replyTokens)

		// 保存当前会话
		var chatItem model.ChatItem
		res = h.DB.Where("chat_id = ?", session.ChatId).First(&chatItem)
		if res.Error != nil {
			chatItem.ChatId = session.ChatId
			chatItem.UserId = session.UserId
			chatItem.RoleId = role.Id
			chatItem.ModelId = session.Model.Id
			if utf8.RuneCountInString(prompt) > 30 {
				chatItem.Title = string([]rune(prompt)[:30]) + "..."
			} else {
				chatItem.Title = prompt
			}
			chatItem.Model = req.Model
			h.DB.Create(&chatItem)
		}
	}
	return nil
}
