// main.ts (Corrected Version)
// ====================================================================
// goblin-2api: Deno Edition
// A single-file, self-contained Deno script to proxy goblin.tools
// into an OpenAI-compatible API.
//
// Author: Top-Tier Software Engineer (AI)
// Version: 1.0.1
// ====================================================================

// --- 1. Configuration Namespace ---
// Replaces app/core/config.py and .env loading logic.
// Configuration is loaded directly from environment variables.
namespace Settings {
    export const APP_NAME = "goblin-2api (Deno Edition)";
    export const APP_VERSION = "1.0.1";
    export const DESCRIPTION = "一个将 goblin.tools 转换为兼容 OpenAI 格式 API 的高性能 Deno 代理。";

    // Load from .env file via `deno run --env`
    export const API_MASTER_KEY = Deno.env.get("API_MASTER_KEY") || "1";
    export const PORT = parseInt(Deno.env.get("NGINX_PORT") || "8088", 10);

    // Upstream request configuration
    export const API_REQUEST_TIMEOUT = 120 * 1000; // in milliseconds

    // Model mapping from friendly name to upstream URL
    export const MODEL_MAPPING: Record<string, string> = {
        "语气评判": "https://goblin.tools/api/ToneJudger/JudgeTone",
        "回应建议": "https://goblin.tools/api/ToneJudger/SuggestResponse",
    };
    export const DEFAULT_MODEL = "语气评判";

    export const KNOWN_MODELS = Object.keys(MODEL_MAPPING);
}

// --- 2. SSE (Server-Sent Events) Utilities ---
// Replaces app/utils/sse_utils.py
namespace SseUtils {
    const encoder = new TextEncoder();
    export const DONE_CHUNK = encoder.encode("data: [DONE]\n\n");

    /**
     * Encodes a data object into an SSE-formatted Uint8Array.
     * @param data The JSON object to send.
     * @returns The encoded SSE message.
     */
    export function createSseData(data: Record<string, any>): Uint8Array {
        return encoder.encode(`data: ${JSON.stringify(data)}\n\n`);
    }

    /**
     * Creates an OpenAI-compatible chat completion chunk.
     * @param requestId The unique ID for the request.
     * @param model The model name used.
     * @param content The text content for the chunk.
     * @param finishReason Optional reason for finishing.
     * @returns A chunk object.
     */
    export function createChatCompletionChunk(
        requestId: string,
        model: string,
        content: string,
        finishReason: string | null = null
    ): Record<string, any> {
        return {
            id: requestId,
            object: "chat.completion.chunk",
            created: Math.floor(Date.now() / 1000),
            model: model,
            choices: [
                {
                    index: 0,
                    delta: { content: content },
                    finish_reason: finishReason,
                },
            ],
        };
    }
}

// --- 3. Core Provider Logic ---
// Replaces app/providers/goblin_provider.py
class GoblinProvider {
    /**
     * Prepares the necessary headers for the upstream API request.
     * This is critical to bypass Cloudflare-like protections.
     * @returns A Headers object.
     */
    private _prepareHeaders(): Headers {
        const headers = new Headers();
        headers.set("Accept", "*/*");
        headers.set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8");
        headers.set("Content-Type", "application/json");
        headers.set("Origin", "https://goblin.tools");
        headers.set("Referer", "https://goblin.tools/Judge");
        headers.set("Cookie", "gt_lang=zh-CN");
        // Add a realistic User-Agent, which cloudscraper does automatically.
        headers.set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36");
        return headers;
    }

    /**
     * Prepares the payload for the upstream API from the incoming request data.
     * @param requestData The JSON body from the client request.
     * @returns The payload object for the upstream API.
     */
    private _preparePayload(requestData: any): { Texts: string[] } {
        const messages = requestData.messages || [];
        const lastUserMessage = messages
            .slice()
            .reverse()
            .find((m: any) => m.role === 'user')?.content;

        if (!lastUserMessage) {
            throw new Error("请求中未找到用户消息 (User message not found in request).");
        }

        return { "Texts": [lastUserMessage] };
    }

    /**
     * Handles the /v1/chat/completions endpoint.
     * Fetches the full response from the upstream API and then streams it
     * back to the client with a typewriter effect (pseudo-stream).
     * @param requestData The JSON body from the client request.
     * @returns A streaming Response object.
     */
    public async chatCompletion(requestData: any): Promise<Response> {
        const model = requestData.model || Settings.DEFAULT_MODEL;
        const upstreamUrl = Settings.MODEL_MAPPING[model];

        if (!upstreamUrl) {
            return new Response(
                JSON.stringify({ error: `不支持的模型 (Unsupported model): ${model}` }),
                { status: 400, headers: { "Content-Type": "application/json" } }
            );
        }

        const requestId = `chatcmpl-${crypto.randomUUID()}`;
        let payload;
        try {
            payload = this._preparePayload(requestData);
        } catch (e) {
            return new Response(
                JSON.stringify({ error: (e as Error).message }),
                { status: 400, headers: { "Content-Type": "application/json" } }
            );
        }
        
        const headers = this._prepareHeaders();
        const stream = new ReadableStream({
            async start(controller) {
                try {
                    console.log(`[INFO] 向 ${upstreamUrl} 发送请求...`);
                    const abortController = new AbortController();
                    const timeoutId = setTimeout(() => abortController.abort(), Settings.API_REQUEST_TIMEOUT);

                    const response = await fetch(upstreamUrl, {
                        method: "POST",
                        headers: headers,
                        body: JSON.stringify(payload),
                        signal: abortController.signal,
                    });
                    
                    clearTimeout(timeoutId);

                    console.log(`[INFO] 上游服务返回状态码: ${response.status}`);
                    if (!response.ok) {
                        const errorText = await response.text();
                        throw new Error(`上游错误 (Upstream error) ${response.status}: ${errorText}`);
                    }

                    const fullText = await response.text();
                    console.log(`[INFO] 收到完整响应: ${fullText.substring(0, 100)}...`);

                    // Pseudo-Stream-Generation: Simulate a typewriter effect
                    for (const char of fullText) {
                        const chunk = SseUtils.createChatCompletionChunk(requestId, model, char, null);
                        controller.enqueue(SseUtils.createSseData(chunk));
                        await new Promise(resolve => setTimeout(resolve, 10)); // Typewriter delay
                    }

                    // Send the final chunk with a "stop" reason
                    const finalChunk = SseUtils.createChatCompletionChunk(requestId, model, "", "stop");
                    controller.enqueue(SseUtils.createSseData(finalChunk));
                    
                    // Send the DONE signal
                    controller.enqueue(SseUtils.DONE_CHUNK);
                    console.log("[INFO] 伪流式传输完成。");

                } catch (e) {
                    console.error(`[ERROR] 处理流时发生错误: ${(e as Error).message}`);
                    const errorMessage = `内部服务器错误 (Internal Server Error): ${(e as Error).message}`;
                    const errorChunk = SseUtils.createChatCompletionChunk(requestId, model, errorMessage, "stop");
                    controller.enqueue(SseUtils.createSseData(errorChunk));
                    controller.enqueue(SseUtils.DONE_CHUNK);
                } finally {
                    controller.close();
                }
            },
        });

        return new Response(stream, {
            headers: {
                "Content-Type": "text/event-stream",
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
            },
        });
    }

    /**
     * Handles the /v1/models endpoint.
     * @returns A JSON Response with a list of available models.
     */
    public async getModels(): Promise<Response> {
        const modelData = {
            object: "list",
            data: Settings.KNOWN_MODELS.map(name => ({
                id: name,
                object: "model",
                created: Math.floor(Date.now() / 1000),
                owned_by: "goblin-2api",
            })),
        };
        return Response.json(modelData);
    }
}

// --- 4. Server and Routing ---
// Replaces main.py and FastAPI routing.
const provider = new GoblinProvider();

/**
 * The main request handler for the Deno server.
 * @param req The incoming Request object.
 * @returns A Response object.
 */
async function mainHandler(req: Request): Promise<Response> {
    const { pathname } = new URL(req.url);
    const { method } = req;

    // --- Authentication Middleware ---
    if (Settings.API_MASTER_KEY && Settings.API_MASTER_KEY !== "1") {
        // Exclude the root path from auth checks for health checks
        if (pathname !== "/") {
            const authHeader = req.headers.get("Authorization");
            if (!authHeader || !authHeader.toLowerCase().startsWith("bearer ")) {
                return new Response("需要 Bearer Token 认证 (Bearer token required).", { status: 401 });
            }
            const token = authHeader.substring(7); // "Bearer ".length
            if (token !== Settings.API_MASTER_KEY) {
                return new Response("无效的 API Key (Invalid API Key).", { status: 403 });
            }
        }
    }

    // --- Routing ---
    if (method === "GET" && pathname === "/") {
        return Response.json({
            message: `欢迎来到 ${Settings.APP_NAME} v${Settings.APP_VERSION}. 服务运行正常。`
        });
    }

    if (method === "GET" && pathname === "/v1/models") {
        return provider.getModels();
    }



    if (method === "POST" && pathname === "/v1/chat/completions") {
        try {
            // Handle cases where the request body might be empty
            if (!req.body) {
                 return new Response(`请求体为空 (Request body is empty)`, { status: 400 });
            }
            const requestData = await req.json();
            return provider.chatCompletion(requestData);
        } catch (e) {
            return new Response(`无效的 JSON 请求体 (Invalid JSON body): ${(e as Error).message}`, { status: 400 });
        }
    }

    return new Response("未找到 (Not Found)", { status: 404 });
}

// --- 5. Server Initialization ---
console.log(`[INFO] 应用启动中... ${Settings.APP_NAME} v${Settings.APP_VERSION}`);
console.log("[INFO] 服务已进入 'Deno & Pseudo-Stream' 模式。");

Deno.serve({ 
    port: Settings.PORT,
    onListen: ({ port, hostname }) => {
        console.log(`[INFO] 服务已启动，正在监听 http://${hostname}:${port}`);
    }
}, mainHandler);
