<?php
// +----------------------------------------------------------------------
// | 控制器 - APP
// +----------------------------------------------------------------------
declare(strict_types=1);

namespace app\controller\app;

use Symfony\Component\HttpClient\HttpClient;
use app\BaseController;
use Exception;
use think\Response;
use think\exception\HttpException;

/**
 * AI聊天
 * 需要本地安装LLM，目前支持 Ollama 和 LM Studio
 *
 * Class Llm
 * @package app\controller\app
 */
class Llm extends BaseController
{
    // 基础地址，规则为openai，目前已确认支持如下：
    // LM Studio: http://localhost:1234
    // Ollama: http://127.0.0.1:11434
    private const BASE_URL = 'http://localhost:1234';

    /**
     * @OA\Get(
     *   path="/api/app/llm/models",
     *   tags={"AI聊天"},
     *   summary="可用模型列表",
     *   description="获取可用模型列表",
     *   security={{"bearerAuth":{}}},
     *   @OA\Response(response=401, description="未携带TOKEN或已过期"),
     *   @OA\Response(
     *     response="200",
     *     description="Success",
     *     @OA\JsonContent(
     *       type="object",
     *       @OA\Property(property="success", type="boolean", example=true),
     *       @OA\Property(property="message", type="string", example="操作成功"),
     *       @OA\Property(
     *           property="data", 
     *           type="array",
     *           @OA\Items(
     *             type="object",
     *             @OA\Property(property="modelKey", type="string", example="qwen3:8b"),
     *             @OA\Property(property="displayName", type="string", example="qwen3:8b")
     *           )
     *       )
     *     )
     *   )
     * )
     * @return Response
     */
    public function getModels(): Response
    {
        $client = HttpClient::create();

        $url = self::BASE_URL . '/v1/models';

        $response = $client->request('GET', $url);
        if ($response->getStatusCode() !== 200) {
            // 异常处理：获取请求响应错误并抛出
            $error = json_decode($response->getContent(false));
            throw new HttpException($response->getStatusCode(), $error->error->message);
        }
        $data = $response->toArray();
        return $this->response(array_map(function ($item) {
            return [
                'modelKey' => $item['id'],
                'displayName' => $item['id']
            ];
        }, $data['data']));
    }

    /**
     * @OA\Post(
     *   path="/api/app/llm/chat/generate",
     *   tags={"AI聊天"},
     *   summary="提示词生成",
     *   description="向模型发送提示词并生成一段助手响应文本",
     *   @OA\RequestBody(
     *   @OA\MediaType(
     *     mediaType="content-type/json",
     *       @OA\Schema(
     *         @OA\Property(description="模型", property="model", type="string", example="qwen3-8b"),
     *         @OA\Property(description="提示文本", property="prompt", type="string", example="text prompt"),
     *         @OA\Property(description="模型温度", property="temperature", type="float", example=0.7),
     *         @OA\Property(description="上下文长度", property="max_tokens", type="int", example=1000),
     *         required={"model","prompt"})
     *     )
     *   ),
     *   security={{"bearerAuth":{}}},
     *   @OA\Response(response=401, description="未携带TOKEN或已过期"),
     *   @OA\Response(
     *     response="200",
     *     description="Success",
     *     @OA\JsonContent(
     *       type="object",
     *       @OA\Property(property="success", type="boolean", example=true),
     *       @OA\Property(property="message", type="string", example="操作成功"),
     *       @OA\Property(
     *         property="data",
     *         type="object",
     *         @OA\Property(property="model", type="string", example="qwen3-8b"),
     *         @OA\Property(
     *           property="usage",
     *           type="object",
     *           @OA\Property(property="prompt_tokens", type="number"),
     *           @OA\Property(property="completion_tokens", type="number"),
     *           @OA\Property(property="total_tokens", type="number")
     *         ),
     *         @OA\Property(property="content", type="string", example="返回消息"),
     *         @OA\Property(property="created_at", type="string", example="2025-01-01 12:00:00")
     *       )
     *     )
     *   )
     * )
     * @return Response
     */
    public function chatGenerate(): Response
    {
        $client = HttpClient::create();

        $url = self::BASE_URL . '/v1/chat/completions';

        // Making a POST request with a JSON payload.
        // The payload defines the model and the prompt
        $response = $client->request(
            'POST',
            $url,
            [
                'json' => array_merge($this->request->only(['model', 'temperature', 'max_tokens']), [
                    'messages' => [
                        ['role' => 'user', 'content' => $this->request->post('prompt')]
                    ]
                ]),
                // API KEY
                // 'headers' => [
                //     'Authorization' => 'Bearer API_KEY'
                // ]
            ]
        );
        if ($response->getStatusCode() !== 200) {
            // 异常处理：获取请求响应错误并抛出
            $error = json_decode($response->getContent(false));
            throw new HttpException($response->getStatusCode(), $error->error->message);
        }
        $data = $response->toArray();
        return $this->response([
            'model'     => $data['model'],
            'content'   => $data['choices'][0]['message']['content'],
            'usage'     => $data['usage'],
            'create_at' => date('Y-m-d H:i:s', $data['created'])
        ]);
    }

    /**
     * @OA\Post(
     *   path="/api/app/llm/chat/completions",
     *   tags={"AI聊天"},
     *   summary="聊天补全",
     *   description="向模型发送聊天历史以预测下一个助手响应",
     *   @OA\RequestBody(
     *   @OA\MediaType(
     *     mediaType="content-type/json",
     *       @OA\Schema(
     *         @OA\Property(description="模型", property="model", type="string", example="qwen3-8b"),
     *         @OA\Property(
     *           description="消息",
     *           property="messages",
     *           type="array",
     *           @OA\Items(
     *             type="object",
     *             @OA\Property(property="role", type="string", example="user", description="角色"),
     *             @OA\Property(property="content", type="string", example="你好", description="内容")
     *           )
     *         ),
     *         @OA\Property(description="模型温度", property="temperature", type="float", example=0.7),
     *         @OA\Property(description="上下文长度", property="max_tokens", type="int", example=1000),
     *         @OA\Property(description="stream", property="stream", type="bool", example=false),
     *         required={"model","messages"})
     *     )
     *   ),
     *   security={{"bearerAuth":{}}},
     *   @OA\Response(response=401, description="未携带TOKEN或已过期"),
     *   @OA\Response(
     *     response="200",
     *     description="Success"
     *   )
     * )
     * @return
     */
    public function chatCompletions()
    {
        // 注：
        // 当前使用的SSE输出工具为symfony/http-client，可完整输出SSE流，但仍然存在不少瑕疵
        // 比如跨域等相关配置无法识别tp8路由相关配置，并且尚未验证该功能是否能在所有服务器环境下运行
        // 此外，对于SSE本人还处于研究阶段，目前对于错误异常的处理仍存在部分问题

        // -----------------------------------

        // 检查当前缓冲区级别
        if (ob_get_level() > 0) {
            // 如果有缓冲区，则清空缓冲区
            ob_end_clean();
        }
        // 设置响应头
        header("Access-Control-Allow-Origin:*");
        header('Content-Type: text/event-stream');
        header('Cache-Control: no-cache');
        header('Connection: keep-alive');
        header('X-Accel-Buffering: no');

        try {
            // 初始化HttpClient
            $client = HttpClient::create();

            $url = self::BASE_URL . '/v1/chat/completions';

            // Making a POST request with a JSON payload.
            // The payload defines the model and the prompt
            $response = $client->request(
                'POST',
                $url,
                [
                    'json' => $this->request->only(['model', 'messages', 'temperature', 'max_tokens', 'stream']),
                    // API KEY
                    // 'headers' => [
                    //     'Authorization' => 'Bearer API_KEY'
                    // ]
                ]
            );

            // 以下处理方式目前仅适配 LM Studio

            if ($response->getStatusCode() !== 200) {
                // 异常处理：获取请求响应错误并抛出
                $error = json_decode($response->getContent(false));
                throw new HttpException($response->getStatusCode(), $error->error->message);
            } else {

                // Get the response content in chunks
                foreach ($client->stream($response) as $chunk) {
                    $event = 'message';
                    $data = $chunk->getContent();
                    echo "event: $event\n$data\n\n";
                    flush();

                    if (strpos($data, 'data: [DONE]') !== false) {
                        break;
                    } else {
                        // 防止CPU过载
                        usleep(10000); // 10毫秒
                    }
                }
            }
        } catch (Exception $e) {
            //throw new HttpException(500, '连接异常: ' . $e->getMessage());
            // 错误抛出
            // 考虑到客户端方便识别，因此采用了相同格式的错误输出处理
            $event = 'message';
            $data = json_encode([
                "id" => uniqid('chatcmpl-', true),
                "object" => "chat.completion.chunk",
                "created" => time(),
                "model" => $this->request->post('model'),
                "system_fingerprint" => $this->request->post('model'),
                "error" => $e->getMessage()
            ]);
            echo "event: $event\ndata: $data\n\n";
            echo "data: [DONE]";
            flush();
        }
    }
}
