export default {
  "openai": [
      {
          "model": "gpt-4",
          "max_tokens": 4096,
          "max_input_tokens": 8192,
          "max_output_tokens": 4096,
          "input_cost_per_token": 0.00003,
          "output_cost_per_token": 0.00006,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4o",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 0.0000025,
          "output_cost_per_token": 0.00001,
          "input_cost_per_token_batches": 0.00000125,
          "output_cost_per_token_batches": 0.000005,
          "cache_read_input_token_cost": 0.00000125,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4.5-preview",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 0.000075,
          "output_cost_per_token": 0.00015,
          "input_cost_per_token_batches": 0.0000375,
          "output_cost_per_token_batches": 0.000075,
          "cache_read_input_token_cost": 0.0000375,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4.5-preview-2025-02-27",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 0.000075,
          "output_cost_per_token": 0.00015,
          "input_cost_per_token_batches": 0.0000375,
          "output_cost_per_token_batches": 0.000075,
          "cache_read_input_token_cost": 0.0000375,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4o-mini",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 1.5e-7,
          "output_cost_per_token": 6e-7,
          "input_cost_per_token_batches": 7.5e-8,
          "output_cost_per_token_batches": 3e-7,
          "cache_read_input_token_cost": 7.5e-8,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4o-mini-2024-07-18",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 1.5e-7,
          "output_cost_per_token": 6e-7,
          "input_cost_per_token_batches": 7.5e-8,
          "output_cost_per_token_batches": 3e-7,
          "cache_read_input_token_cost": 7.5e-8,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "o1",
          "max_tokens": 100000,
          "max_input_tokens": 200000,
          "max_output_tokens": 100000,
          "input_cost_per_token": 0.000015,
          "output_cost_per_token": 0.00006,
          "cache_read_input_token_cost": 0.0000075,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "o3-mini",
          "max_tokens": 100000,
          "max_input_tokens": 200000,
          "max_output_tokens": 100000,
          "input_cost_per_token": 0.0000011,
          "output_cost_per_token": 0.0000044,
          "cache_read_input_token_cost": 5.5e-7,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": false,
          "supports_vision": false,
          "supports_prompt_caching": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "o3-mini-2025-01-31",
          "max_tokens": 100000,
          "max_input_tokens": 200000,
          "max_output_tokens": 100000,
          "input_cost_per_token": 0.0000011,
          "output_cost_per_token": 0.0000044,
          "cache_read_input_token_cost": 5.5e-7,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": false,
          "supports_vision": false,
          "supports_prompt_caching": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "o1-2024-12-17",
          "max_tokens": 100000,
          "max_input_tokens": 200000,
          "max_output_tokens": 100000,
          "input_cost_per_token": 0.000015,
          "output_cost_per_token": 0.00006,
          "cache_read_input_token_cost": 0.0000075,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "chatgpt-4o-latest",
          "max_tokens": 4096,
          "max_input_tokens": 128000,
          "max_output_tokens": 4096,
          "input_cost_per_token": 0.000005,
          "output_cost_per_token": 0.000015,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4o-2024-05-13",
          "max_tokens": 4096,
          "max_input_tokens": 128000,
          "max_output_tokens": 4096,
          "input_cost_per_token": 0.000005,
          "output_cost_per_token": 0.000015,
          "input_cost_per_token_batches": 0.0000025,
          "output_cost_per_token_batches": 0.0000075,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4o-2024-08-06",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 0.0000025,
          "output_cost_per_token": 0.00001,
          "input_cost_per_token_batches": 0.00000125,
          "output_cost_per_token_batches": 0.000005,
          "cache_read_input_token_cost": 0.00000125,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4o-2024-11-20",
          "max_tokens": 16384,
          "max_input_tokens": 128000,
          "max_output_tokens": 16384,
          "input_cost_per_token": 0.0000025,
          "output_cost_per_token": 0.00001,
          "input_cost_per_token_batches": 0.00000125,
          "output_cost_per_token_batches": 0.000005,
          "cache_read_input_token_cost": 0.00000125,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_response_schema": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
      {
          "model": "gpt-4-turbo",
          "max_tokens": 4096,
          "max_input_tokens": 128000,
          "max_output_tokens": 4096,
          "input_cost_per_token": 0.00001,
          "output_cost_per_token": 0.00003,
          "provider": "openai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_parallel_function_calling": true,
          "supports_vision": true,
          "supports_prompt_caching": true,
          "supports_system_messages": true,
          "supports_tool_choice": true
      },
  ],
  "mistral": [
      {
          "model": "mistral-small",
          "max_tokens": 8191,
          "max_input_tokens": 32000,
          "max_output_tokens": 8191,
          "input_cost_per_token": 0.000001,
          "output_cost_per_token": 0.000003,
          "provider": "mistral",
          "supports_function_calling": true,
          "mode": "chat",
          "supports_assistant_prefill": true,
          "supports_tool_choice": true
      },
      {
          "model": "mistral-small-latest",
          "max_tokens": 8191,
          "max_input_tokens": 32000,
          "max_output_tokens": 8191,
          "input_cost_per_token": 0.000001,
          "output_cost_per_token": 0.000003,
          "provider": "mistral",
          "supports_function_calling": true,
          "mode": "chat",
          "supports_assistant_prefill": true,
          "supports_tool_choice": true
      },
      {
          "model": "mistral-large-latest",
          "max_tokens": 128000,
          "max_input_tokens": 128000,
          "max_output_tokens": 128000,
          "input_cost_per_token": 0.000002,
          "output_cost_per_token": 0.000006,
          "provider": "mistral",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_assistant_prefill": true,
          "supports_tool_choice": true
      },
      {
          "model": "open-mixtral-8x7b",
          "max_tokens": 8191,
          "max_input_tokens": 32000,
          "max_output_tokens": 8191,
          "input_cost_per_token": 7e-7,
          "output_cost_per_token": 7e-7,
          "provider": "mistral",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_assistant_prefill": true,
          "supports_tool_choice": true
      },
      {
          "model": "open-mixtral-8x22b",
          "max_tokens": 8191,
          "max_input_tokens": 65336,
          "max_output_tokens": 8191,
          "input_cost_per_token": 0.000002,
          "output_cost_per_token": 0.000006,
          "provider": "mistral",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_assistant_prefill": true,
          "supports_tool_choice": true
      }
  ],
  "deepseek": [
      {
          "model": "deepseek-reasoner",
          "max_tokens": 8192,
          "max_input_tokens": 65536,
          "max_output_tokens": 8192,
          "input_cost_per_token": 5.5e-7,
          "input_cost_per_token_cache_hit": 1.4e-7,
          "output_cost_per_token": 0.00000219,
          "provider": "deepseek",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_assistant_prefill": true,
          "supports_tool_choice": true,
          "supports_prompt_caching": true
      },
      {
          "model": "deepseek-chat",
          "max_tokens": 8192,
          "max_input_tokens": 65536,
          "max_output_tokens": 8192,
          "input_cost_per_token": 2.7e-7,
          "input_cost_per_token_cache_hit": 7e-8,
          "cache_read_input_token_cost": 7e-8,
          "cache_creation_input_token_cost": 0,
          "output_cost_per_token": 0.0000011,
          "provider": "deepseek",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_assistant_prefill": true,
          "supports_tool_choice": true,
          "supports_prompt_caching": true
      },
      {
          "model": "deepseek-coder",
          "max_tokens": 4096,
          "max_input_tokens": 128000,
          "max_output_tokens": 4096,
          "input_cost_per_token": 1.4e-7,
          "input_cost_per_token_cache_hit": 1.4e-8,
          "output_cost_per_token": 2.8e-7,
          "provider": "deepseek",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_assistant_prefill": true,
          "supports_tool_choice": true,
          "supports_prompt_caching": true
      }
  ],
  "xai": [
      {
          "model": "grok-beta",
          "max_tokens": 131072,
          "max_input_tokens": 131072,
          "max_output_tokens": 131072,
          "input_cost_per_token": 0.000005,
          "output_cost_per_token": 0.000015,
          "provider": "xai",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_vision": true,
          "supports_tool_choice": true
      },
  ],
  "groq": [
      {
          "model": "llama-3.3-70b-versatile",
          "max_tokens": 8192,
          "max_input_tokens": 128000,
          "max_output_tokens": 8192,
          "input_cost_per_token": 5.9e-7,
          "output_cost_per_token": 7.9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama2-70b-4096",
          "max_tokens": 4096,
          "max_input_tokens": 4096,
          "max_output_tokens": 4096,
          "input_cost_per_token": 7e-7,
          "output_cost_per_token": 8e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama3-8b-8192",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 5e-8,
          "output_cost_per_token": 8e-8,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.2-1b-preview",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 4e-8,
          "output_cost_per_token": 4e-8,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.2-3b-preview",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 6e-8,
          "output_cost_per_token": 6e-8,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.2-11b-text-preview",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 1.8e-7,
          "output_cost_per_token": 1.8e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.2-90b-text-preview",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 9e-7,
          "output_cost_per_token": 9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama3-70b-8192",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 5.9e-7,
          "output_cost_per_token": 7.9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.1-8b-instant",
          "max_tokens": 8000,
          "max_input_tokens": 8000,
          "max_output_tokens": 8000,
          "input_cost_per_token": 5e-8,
          "output_cost_per_token": 8e-8,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.1-70b-versatile",
          "max_tokens": 8000,
          "max_input_tokens": 8000,
          "max_output_tokens": 8000,
          "input_cost_per_token": 5.9e-7,
          "output_cost_per_token": 7.9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama-3.1-405b-reasoning",
          "max_tokens": 8000,
          "max_input_tokens": 8000,
          "max_output_tokens": 8000,
          "input_cost_per_token": 5.9e-7,
          "output_cost_per_token": 7.9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "mixtral-8x7b-32768",
          "max_tokens": 32768,
          "max_input_tokens": 32768,
          "max_output_tokens": 32768,
          "input_cost_per_token": 2.4e-7,
          "output_cost_per_token": 2.4e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "gemma-7b-it",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 7e-8,
          "output_cost_per_token": 7e-8,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "gemma2-9b-it",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 2e-7,
          "output_cost_per_token": 2e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama3-groq-70b-8192-tool-use-preview",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 8.9e-7,
          "output_cost_per_token": 8.9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      },
      {
          "model": "llama3-groq-8b-8192-tool-use-preview",
          "max_tokens": 8192,
          "max_input_tokens": 8192,
          "max_output_tokens": 8192,
          "input_cost_per_token": 1.9e-7,
          "output_cost_per_token": 1.9e-7,
          "provider": "groq",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_response_schema": true,
          "supports_tool_choice": true
      }
  ],
  "anthropic": [
      {
          "model": "claude-3-5-haiku-latest",
          "max_tokens": 8192,
          "max_input_tokens": 200000,
          "max_output_tokens": 8192,
          "input_cost_per_token": 0.000001,
          "output_cost_per_token": 0.000005,
          "cache_creation_input_token_cost": 0.00000125,
          "cache_read_input_token_cost": 1e-7,
          "provider": "anthropic",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_vision": true,
          "tool_use_system_prompt_tokens": 264,
          "supports_assistant_prefill": true,
          "supports_prompt_caching": true,
          "supports_response_schema": true,
          "deprecation_date": "2025-10-01",
          "supports_tool_choice": true
      },
      {
          "model": "claude-3-opus-latest",
          "max_tokens": 4096,
          "max_input_tokens": 200000,
          "max_output_tokens": 4096,
          "input_cost_per_token": 0.000015,
          "output_cost_per_token": 0.000075,
          "cache_creation_input_token_cost": 0.00001875,
          "cache_read_input_token_cost": 0.0000015,
          "provider": "anthropic",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_vision": true,
          "tool_use_system_prompt_tokens": 395,
          "supports_assistant_prefill": true,
          "supports_prompt_caching": true,
          "supports_response_schema": true,
          "deprecation_date": "2025-03-01",
          "supports_tool_choice": true
      },
      {
          "model": "claude-3-7-sonnet-latest",
          "max_tokens": 8192,
          "max_input_tokens": 200000,
          "max_output_tokens": 8192,
          "input_cost_per_token": 0.000003,
          "output_cost_per_token": 0.000015,
          "cache_creation_input_token_cost": 0.00000375,
          "cache_read_input_token_cost": 3e-7,
          "provider": "anthropic",
          "mode": "chat",
          "supports_function_calling": true,
          "supports_vision": true,
          "tool_use_system_prompt_tokens": 159,
          "supports_assistant_prefill": true,
          "supports_prompt_caching": true,
          "supports_response_schema": true,
          "deprecation_date": "2025-06-01",
          "supports_tool_choice": true
      },
  ],
  "gemini": [
      {
          "model": "gemini-2.0-flash",
          "max_tokens": 8192,
          "max_input_tokens": 1048576,
          "max_output_tokens": 8192,
          "max_images_per_prompt": 3000,
          "max_videos_per_prompt": 10,
          "max_video_length": 1,
          "max_audio_length_hours": 8.4,
          "max_audio_per_prompt": 1,
          "max_pdf_size_mb": 30,
          "input_cost_per_audio_token": 7e-7,
          "input_cost_per_token": 1e-7,
          "output_cost_per_token": 4e-7,
          "provider": "gemini",
          "mode": "chat",
          "rpm": 10000,
          "tpm": 10000000,
          "supports_system_messages": true,
          "supports_function_calling": true,
          "supports_vision": true,
          "supports_response_schema": true,
          "supports_audio_output": true,
          "supports_tool_choice": true,
          "source": "https://ai.google.dev/pricing#2_0flash"
      },
      {
          "model": "gemini-2.0-flash-lite",
          "max_tokens": 8192,
          "max_input_tokens": 1048576,
          "max_output_tokens": 8192,
          "max_images_per_prompt": 3000,
          "max_videos_per_prompt": 10,
          "max_video_length": 1,
          "max_audio_length_hours": 8.4,
          "max_audio_per_prompt": 1,
          "max_pdf_size_mb": 30,
          "input_cost_per_audio_token": 7.5e-8,
          "input_cost_per_token": 7.5e-8,
          "output_cost_per_token": 3e-7,
          "provider": "gemini",
          "mode": "chat",
          "rpm": 60000,
          "tpm": 10000000,
          "supports_system_messages": true,
          "supports_function_calling": true,
          "supports_vision": true,
          "supports_response_schema": true,
          "supports_audio_output": false,
          "supports_tool_choice": true,
          "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash-lite"
      },
      {
          "model": "gemini-2.0-flash-thinking-exp",
          "max_tokens": 8192,
          "max_input_tokens": 1048576,
          "max_output_tokens": 8192,
          "max_images_per_prompt": 3000,
          "max_videos_per_prompt": 10,
          "max_video_length": 1,
          "max_audio_length_hours": 8.4,
          "max_audio_per_prompt": 1,
          "max_pdf_size_mb": 30,
          "input_cost_per_image": 0,
          "input_cost_per_video_per_second": 0,
          "input_cost_per_audio_per_second": 0,
          "input_cost_per_token": 0,
          "input_cost_per_character": 0,
          "input_cost_per_token_above_128k_tokens": 0,
          "input_cost_per_character_above_128k_tokens": 0,
          "input_cost_per_image_above_128k_tokens": 0,
          "input_cost_per_video_per_second_above_128k_tokens": 0,
          "input_cost_per_audio_per_second_above_128k_tokens": 0,
          "output_cost_per_token": 0,
          "output_cost_per_character": 0,
          "output_cost_per_token_above_128k_tokens": 0,
          "output_cost_per_character_above_128k_tokens": 0,
          "provider": "gemini",
          "mode": "chat",
          "supports_system_messages": true,
          "supports_function_calling": true,
          "supports_vision": true,
          "supports_response_schema": true,
          "supports_audio_output": true,
          "tpm": 4000000,
          "rpm": 10,
          "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
          "supports_tool_choice": true
      },
  ],
  "ollama": [

  ]
}


export const providers = {
  "openai": {
    "name": "OpenAI",
    "baseURL": "https://api.openai.com/v1"
  },
  "openrouter": {
    "name": "OpenRouter",
    "baseURL": "https://openrouter.ai/api/v1"
  },
  "anthropic": {
    "name": "Anthropic",
    "baseURL": "https://api.anthropic.com/v1",
    "status": "wip"
  },
  "gemini": {
    "name": "Gemini",
    "baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
  },
  "ollama": {
    "name": "Ollama",
    "baseURL": "http://localhost:11434/v1"
  },
  "mistral": {
    "name": "Mistral",
    "baseURL": "https://api.mistral.ai/v1"
  },
  "deepseek": {
    "name": "DeepSeek",
    "baseURL": "https://api.deepseek.com"
  },
  "xai": {
    "name": "xAI",
    "baseURL": "https://api.x.ai/v1"
  },
  "groq": {
    "name": "Groq",
    "baseURL": "https://api.groq.com/openai/v1"
  }
}
