package com.neshai.webapp.controller;

import com.alibaba.fastjson.JSON;
import com.neshai.webapp.enmus.MessageType;
import com.neshai.webapp.exception.CommonException;
import com.neshai.webapp.service.UsageService;
import com.neshai.webapp.service.UserChatService;
import com.neshai.webapp.service.dto.Message;
import com.neshai.webapp.utils.R;
import com.neshai.webapp.utils.api.OpenAiWebClient;
import jakarta.servlet.http.HttpServletRequest;
import lombok.RequiredArgsConstructor;
import lombok.extern.log4j.Log4j2;

import org.springframework.http.MediaType;
import org.springframework.util.Assert;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.reactive.function.server.ServerResponse;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;

@Log4j2
@RestController
@RequestMapping({"/api"})
//@RequiredArgsConstructor 是 Lombok 提供的一个注解，用于自动生成包含所有 final 字段和带有 @NonNull 注解的字段的构造函数。
@RequiredArgsConstructor
public class OpenAiController {

    private final UserChatService userChatService;
    private final OpenAiWebClient openAiWebClient;
    private final UsageService usageService;

    private static final String ERROR_MSG = "error";

    private static final Executor EXECUTOR = Executors.newFixedThreadPool(10);
    private static final Random RANDOM = new Random();

    /**
     * 发信息
     *
     * @param prompt 提示词
     * @param sessionId
     * @return
     */
//    @GetMapping(value = "/openai/completions/stream", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
//    public Flux<String> streamCompletions(String prompt, String sessionId,String model) {
//        Assert.hasLength(sessionId, "sessionId is null");
//        Assert.hasLength(prompt, "prompt is null");
//        try {
//            return userChatService.send(MessageType.TEXT, prompt, sessionId);
//        } catch (CommonException e) {
//            log.warn("e:{}", e.getMessage());
//            e.printStackTrace();
//            return getErrorRes(e.getMessage());
//        } catch (Exception e) {
//            log.error("e:{}", e.getMessage(), e);
//            e.printStackTrace();
//            return getErrorRes(ERROR_MSG);
//        }
//    }

    /**
     * post方式，可以解决特殊符号，过长的文本等问题
     *
     * @return
     */
    @PostMapping(value = "/openai/completions/stream", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
    public Flux<String> streamCompletionsPost(@RequestBody Map<String, String> param, HttpServletRequest request) {
        String sessionId = param.get("sessionId");
        String prompt = param.get("prompt");
        String model = param.get("model");
        Assert.hasLength(sessionId, "sessionId is null");
        Assert.hasLength(prompt, "prompt is null");
        Assert.hasLength(model, "model is null");

        Object idObject = request.getAttribute("id");

        switch (model) {
            case "gpt-3.5-turbo":
                if (!usageService.useBasicService(Integer.parseInt((String) idObject)))
                    return getErrorRes("reached the limit");
                break;
            default:
                if (!usageService.usePremiumService(Integer.parseInt((String) idObject)))
                    return getErrorRes("reached the limit");
                break;
        }

        try {
            return userChatService.send(MessageType.TEXT, prompt, sessionId, model);
        } catch (CommonException e) {
            log.warn("e:{}", e.getMessage());
            e.printStackTrace();
            return getErrorRes(e.getMessage());
        } catch (Exception e) {
            log.error("e:{}", e.getMessage(), e);
            e.printStackTrace();
            return getErrorRes(ERROR_MSG);
        }
    }


    @GetMapping("/openai/checkContent")
    public Mono<ServerResponse> checkContent(@RequestParam String content) {
        log.info("req:{}", content);
        return openAiWebClient.checkContent(content);
    }

    @GetMapping(value = "/openai/history")
    public Mono<List<Message>> history(String sessionId) {
        Assert.hasLength(sessionId, "user is null");
        return Mono.just(userChatService.getHistory(sessionId));
    }

    /**
     * 对sse接口的异常处理
     * 我这里的建议是不要直接抛出异常中断sse链接，因为这样前端无法获取错误信息，只能获取到链接断开了
     * 所以建议正常返回数据，把返回的数据中的code设置为非0的值，前端根据code来判断是否是错误信息，参考 @see org.chatgptstream.openai.util.R
     *
     * @param msg
     * @return
     */
    private Flux<String> getErrorRes(String msg) {
        return Flux.create(emitter -> {
            emitter.next(" ");
            emitter.next(" ");
            EXECUTOR.execute(() -> {
                try {
                    int time = RANDOM.nextInt(200);
                    // 请注意！这里加线程池休眠是为了解决一个问题，如果你不需要则删除掉这里线程池就行
                    // 问题：假如系统使用了nginx负载均衡，然后后端这个接口遇到异常立即断开sse会导致nginx重连，进而重复请求后端
                    // 所以休眠一下再断开让nginx知道正常连接了，不要重连
                    //不延迟的话nginx会重连sse，导致nginx重复请求后端
                    Thread.sleep(Math.max(time, 100));
                } catch (InterruptedException e) {
                    log.info("e:", e);
                }
                emitter.next(JSON.toJSONString(R.fail(msg)));
                emitter.complete();
            });
        });
    }
}
