Update config.json
Browse files- config.json +9 -9
config.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
},
|
8 |
"llmModels": [
|
9 |
{
|
10 |
-
"model": "gpt-3.5-turbo
|
11 |
"name": "gpt-3.5-turbo",
|
12 |
"maxContext": 16000,
|
13 |
"maxResponse": 4000,
|
@@ -25,8 +25,8 @@
|
|
25 |
"defaultConfig": {}
|
26 |
},
|
27 |
{
|
28 |
-
"model": "
|
29 |
-
"name": "
|
30 |
"maxContext": 16000,
|
31 |
"maxResponse": 4000,
|
32 |
"quoteMaxToken": 13000,
|
@@ -62,8 +62,8 @@
|
|
62 |
"defaultConfig": {}
|
63 |
},
|
64 |
{
|
65 |
-
"model": "gpt-4
|
66 |
-
"name": "gpt-4
|
67 |
"maxContext": 125000,
|
68 |
"maxResponse": 4000,
|
69 |
"quoteMaxToken": 100000,
|
@@ -80,8 +80,8 @@
|
|
80 |
"defaultConfig": {}
|
81 |
},
|
82 |
{
|
83 |
-
"model": "gpt-
|
84 |
-
"name": "gpt-
|
85 |
"maxContext": 128000,
|
86 |
"maxResponse": 4000,
|
87 |
"quoteMaxToken": 100000,
|
@@ -98,8 +98,8 @@
|
|
98 |
"defaultConfig": {}
|
99 |
},
|
100 |
{
|
101 |
-
"model": "
|
102 |
-
"name": "
|
103 |
"maxContext": 200000,
|
104 |
"maxResponse": 4000,
|
105 |
"quoteMaxToken": 100000,
|
|
|
7 |
},
|
8 |
"llmModels": [
|
9 |
{
|
10 |
+
"model": "gpt-3.5-turbo",
|
11 |
"name": "gpt-3.5-turbo",
|
12 |
"maxContext": 16000,
|
13 |
"maxResponse": 4000,
|
|
|
25 |
"defaultConfig": {}
|
26 |
},
|
27 |
{
|
28 |
+
"model": "command",
|
29 |
+
"name": "command",
|
30 |
"maxContext": 16000,
|
31 |
"maxResponse": 4000,
|
32 |
"quoteMaxToken": 13000,
|
|
|
62 |
"defaultConfig": {}
|
63 |
},
|
64 |
{
|
65 |
+
"model": "gpt-4",
|
66 |
+
"name": "gpt-4",
|
67 |
"maxContext": 125000,
|
68 |
"maxResponse": 4000,
|
69 |
"quoteMaxToken": 100000,
|
|
|
80 |
"defaultConfig": {}
|
81 |
},
|
82 |
{
|
83 |
+
"model": "gpt-4o",
|
84 |
+
"name": "gpt-4o",
|
85 |
"maxContext": 128000,
|
86 |
"maxResponse": 4000,
|
87 |
"quoteMaxToken": 100000,
|
|
|
98 |
"defaultConfig": {}
|
99 |
},
|
100 |
{
|
101 |
+
"model": "google-gemini",
|
102 |
+
"name": "google-gemini",
|
103 |
"maxContext": 200000,
|
104 |
"maxResponse": 4000,
|
105 |
"quoteMaxToken": 100000,
|