import { NextResponse } from 'next/server';
import { AI_MODEL_CONFIG } from '@/lib/config';

export async function GET() {
  try {
    // 测试AI模型服务是否可用
    const testResponse = await fetch(AI_MODEL_CONFIG.apiUrl, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
      },
      body: JSON.stringify({
        model: AI_MODEL_CONFIG.defaultModel,
        messages: [{
          role: 'user',
          content: 'test'
        }],
        max_tokens: 1,
        temperature: 0.1
      }),
      // 设置较短的超时时间用于健康检查
      signal: AbortSignal.timeout(5000)
    });

    const isHealthy = testResponse.ok;
    const statusCode = testResponse.status;
    
    return NextResponse.json({
      status: isHealthy ? 'healthy' : 'unhealthy',
      timestamp: new Date().toISOString(),
      service: {
        name: 'AI Model Service',
        url: AI_MODEL_CONFIG.apiUrl,
        model: AI_MODEL_CONFIG.defaultModel,
        statusCode,
        responseTime: null // 可以后续添加响应时间测量
      },
      config: {
        maxTokens: AI_MODEL_CONFIG.maxTokens,
        temperature: AI_MODEL_CONFIG.temperature,
        topP: AI_MODEL_CONFIG.topP
      }
    });
  } catch (error: any) {
    return NextResponse.json(
      {
        status: 'error',
        timestamp: new Date().toISOString(),
        service: {
          name: 'AI Model Service',
          url: AI_MODEL_CONFIG.apiUrl,
          model: AI_MODEL_CONFIG.defaultModel,
          statusCode: null,
          responseTime: null
        },
        error: error.message || 'Unknown error occurred',
        config: {
          maxTokens: AI_MODEL_CONFIG.maxTokens,
          temperature: AI_MODEL_CONFIG.temperature,
          topP: AI_MODEL_CONFIG.topP
        }
      },
      { status: 503 }
    );
  }
}