Spaces:
Sleeping
Sleeping
dotaevo-linux-do commited on
Commit ·
9459a6f
1
Parent(s): 4551ac9
new file: .gitignore
Browse filesnew file: Dockerfile
modified: README.md
new file: package.json
new file: prompts/fix_json.txt
new file: prompts/function_call.txt
new file: server.js
- .gitignore +4 -0
- Dockerfile +32 -0
- README.md +1 -0
- package.json +14 -0
- prompts/fix_json.txt +4 -0
- prompts/function_call.txt +36 -0
- server.js +304 -0
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
node_modules
|
| 2 |
+
errors
|
| 3 |
+
.env
|
| 4 |
+
package-lock.json
|
Dockerfile
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a lightweight Node.js base image
|
| 2 |
+
FROM node:22-alpine
|
| 3 |
+
|
| 4 |
+
# Create app directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install dependencies first (better layer caching)
|
| 8 |
+
COPY package.json package-lock.json* yarn.lock* pnpm-lock.yaml* ./
|
| 9 |
+
RUN npm ci --only=production || npm install --production
|
| 10 |
+
|
| 11 |
+
# Copy source
|
| 12 |
+
COPY . .
|
| 13 |
+
|
| 14 |
+
# Copy prompts
|
| 15 |
+
COPY prompts ./prompts
|
| 16 |
+
|
| 17 |
+
# Create errors directory
|
| 18 |
+
RUN mkdir -p errors
|
| 19 |
+
|
| 20 |
+
# Set environment defaults
|
| 21 |
+
ENV PORT=3000
|
| 22 |
+
# ENV DEBUG=false
|
| 23 |
+
# ENV OPENAI_BASE_URLS=https://api.openai.com,...
|
| 24 |
+
# ENV OPENAI_KEYS=sk-xxx,...
|
| 25 |
+
# ENV MODELS=gpt5,...
|
| 26 |
+
# ENV DEFAULT_MODEL=gpt5
|
| 27 |
+
|
| 28 |
+
# Expose server port
|
| 29 |
+
EXPOSE ${PORT:-3000}
|
| 30 |
+
|
| 31 |
+
# Start the server
|
| 32 |
+
CMD ["node", "server.js"]
|
README.md
CHANGED
|
@@ -5,6 +5,7 @@ colorFrom: indigo
|
|
| 5 |
colorTo: red
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 5 |
colorTo: red
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
+
app-port: 3000
|
| 9 |
---
|
| 10 |
|
| 11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
package.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "chat2code",
|
| 3 |
+
"version": "1.0.0",
|
| 4 |
+
"type": "module",
|
| 5 |
+
"main": "server.js",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"start": "DEBUG=true OPENAI_BASE_URLS=http://gptload.maomaotou.hidns.co:3001/proxy/dhcoder OPENAI_KEYS=sk-DsfX5Q3gNVhkKj0I_b2xtzMnMHmnoWjMDuDvI2mIMBKrlVPN MODELS=gpt-4.1-dhcoder DEFAULT_MODEL=gpt-4.1-dhcoder node server.js",
|
| 8 |
+
"dev": "NODE_ENV=development node server.js"
|
| 9 |
+
},
|
| 10 |
+
"dependencies": {
|
| 11 |
+
"@anthropic-ai/tokenizer": "^0.0.4",
|
| 12 |
+
"express": "^4.19.2"
|
| 13 |
+
}
|
| 14 |
+
}
|
prompts/fix_json.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The following is the content of a json text, but due to an error in the transmission process, it can no longer be successfully resolved to a json object, please try to fix it and wrap the repaired content in '<Json></Json>' tag:
|
| 2 |
+
```
|
| 3 |
+
<$JSON$>
|
| 4 |
+
```
|
prompts/function_call.txt
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You will play as a code assistant, and the following text is for you:
|
| 2 |
+
```system_prompt
|
| 3 |
+
<$SystemPrompt$>
|
| 4 |
+
```
|
| 5 |
+
|
| 6 |
+
To help you solve problems better, you can use the following tools:
|
| 7 |
+
```json
|
| 8 |
+
<$Tools$>
|
| 9 |
+
```
|
| 10 |
+
Now, think about how the assistant should answer according to the following dialogue in json, and give your answer in json form as well:
|
| 11 |
+
```
|
| 12 |
+
<$Messages$>
|
| 13 |
+
```
|
| 14 |
+
Your answer should be formatted like this(don't forget the '<AnswerInJson>' tag):
|
| 15 |
+
```
|
| 16 |
+
<AnswerInJson>
|
| 17 |
+
{
|
| 18 |
+
"role": "assistant",
|
| 19 |
+
"content": [
|
| 20 |
+
{
|
| 21 |
+
"type": "text",
|
| 22 |
+
"text": "..."
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"type": "tool_use",
|
| 26 |
+
"name": "tool's name",
|
| 27 |
+
"input": {
|
| 28 |
+
"arg1's name": "arg1's value",
|
| 29 |
+
"arg2's name": "arg2's value",
|
| 30 |
+
...
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
]
|
| 34 |
+
}
|
| 35 |
+
</AnswerInJson>
|
| 36 |
+
```
|
server.js
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import express from 'express';
|
| 2 |
+
import fs from 'fs';
|
| 3 |
+
import path from 'path';
|
| 4 |
+
import { fileURLToPath } from 'url';
|
| 5 |
+
import crypto from 'crypto';
|
| 6 |
+
import { countTokens } from '@anthropic-ai/tokenizer';
|
| 7 |
+
|
| 8 |
+
const __filename = fileURLToPath(import.meta.url);
|
| 9 |
+
const __dirname = path.dirname(__filename);
|
| 10 |
+
const env = process.env;
|
| 11 |
+
const DEBUG = env.DEBUG === 'true';
|
| 12 |
+
const modelMap = parseModelMap();
|
| 13 |
+
const defaultModel = env.DEFAULT_MODEL || Object.keys(modelMap)[0];
|
| 14 |
+
const responseCache = {};
|
| 15 |
+
|
| 16 |
+
const FUNCTION_CALL_TPL = readPrompt('function_call');
|
| 17 |
+
const FIX_JSON_TPL = readPrompt('fix_json');
|
| 18 |
+
|
| 19 |
+
const app = express();
|
| 20 |
+
app.use(express.json({ limit: '10mb' }));
|
| 21 |
+
|
| 22 |
+
const jsonOk = (res, data, status = 200) => res.status(status).json(data);
|
| 23 |
+
const jsonError = (res, status, message) => jsonOk(res, { error: { code: status, message } }, status);
|
| 24 |
+
const getAuthHeader = (req) => req.get('authorization') || req.get('Authorization');
|
| 25 |
+
const uuidHex = (len) => crypto.randomBytes(len / 2).toString('hex');
|
| 26 |
+
const md5 = (text) => crypto.createHash('md5').update(text).digest('hex');
|
| 27 |
+
|
| 28 |
+
function parseModelMap()
|
| 29 |
+
{
|
| 30 |
+
// MODELS=gpt5,...
|
| 31 |
+
const models = env.MODELS?.split(',') || [];
|
| 32 |
+
// OPENAI_BASE_URLS=https://api.openai.com,...
|
| 33 |
+
const baseUrls = env.OPENAI_BASE_URLS?.split(',') || [];
|
| 34 |
+
// OPENAI_KEYS=sk-xxx,...
|
| 35 |
+
const keys = env.OPENAI_KEYS?.split(',') || [];
|
| 36 |
+
if (models.length !== baseUrls.length || models.length !== keys.length)
|
| 37 |
+
throw new Error('MODELS, OPENAI_BASE_URLS, OPENAI_KEYS must have the same length');
|
| 38 |
+
const map = {};
|
| 39 |
+
for (let i = 0; i < models.length; i++)
|
| 40 |
+
{
|
| 41 |
+
map[models[i]] = { baseUrl: baseUrls[i], key: keys[i] };
|
| 42 |
+
}
|
| 43 |
+
return map;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
function readPrompt(promptName)
|
| 47 |
+
{
|
| 48 |
+
const p = path.join(__dirname, `prompts/${promptName}.txt`);
|
| 49 |
+
if (fs.existsSync(p)) return fs.readFileSync(p, 'utf-8');
|
| 50 |
+
throw new Error(`${promptName}.txt not found`);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
async function safeText(res)
|
| 54 |
+
{
|
| 55 |
+
try
|
| 56 |
+
{
|
| 57 |
+
return await res.text();
|
| 58 |
+
}
|
| 59 |
+
catch (_)
|
| 60 |
+
{
|
| 61 |
+
return 'Unknown error';
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
function extractSystemText(system)
|
| 66 |
+
{
|
| 67 |
+
if (!system) return '';
|
| 68 |
+
if (Array.isArray(system)) {
|
| 69 |
+
return system
|
| 70 |
+
.filter((i) => i && i.type === 'text')
|
| 71 |
+
.map((i) => String(i.text ?? ''))
|
| 72 |
+
.join('\n');
|
| 73 |
+
}
|
| 74 |
+
if (typeof system === 'string') return system;
|
| 75 |
+
return '';
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
function serializeContentToText(content)
|
| 79 |
+
{
|
| 80 |
+
if (typeof content === 'string') return content;
|
| 81 |
+
if (Array.isArray(content))
|
| 82 |
+
{
|
| 83 |
+
const parts = [];
|
| 84 |
+
for (const item of content)
|
| 85 |
+
{
|
| 86 |
+
if (!item || typeof item !== 'object') continue;
|
| 87 |
+
if (item.type === 'text') parts.push(String(item.text ?? ''));
|
| 88 |
+
else if (item.type === 'tool_use')
|
| 89 |
+
{
|
| 90 |
+
const name = String(item.name ?? '');
|
| 91 |
+
const input = JSON.stringify(item.input ?? {});
|
| 92 |
+
parts.push(`[tool_use:${name}] ${input}`);
|
| 93 |
+
}
|
| 94 |
+
else if (item.type === 'tool_result')
|
| 95 |
+
{
|
| 96 |
+
const rtext = Array.isArray(item.content) ? item.content.map((c) => (c?.type === 'text' ? String(c.text ?? '') : '')).join('\n') : String(item.text ?? '');
|
| 97 |
+
parts.push(`[tool_result] ${rtext}`);
|
| 98 |
+
}
|
| 99 |
+
}
|
| 100 |
+
return parts.join('\n');
|
| 101 |
+
}
|
| 102 |
+
if (content && typeof content === 'object' && 'text' in content) return String(content.text ?? '');
|
| 103 |
+
return '';
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
function messagesToText(messages)
|
| 107 |
+
{
|
| 108 |
+
if (!Array.isArray(messages)) return '';
|
| 109 |
+
return messages.map((m) =>
|
| 110 |
+
{
|
| 111 |
+
const role = String(m?.role ?? 'user');
|
| 112 |
+
const contentText = serializeContentToText(m?.content);
|
| 113 |
+
return `role:${role}\n${contentText}`;
|
| 114 |
+
}).join('\n---\n');
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
function countAnthropicTokensLocal(messages, system)
|
| 118 |
+
{
|
| 119 |
+
try
|
| 120 |
+
{
|
| 121 |
+
const systemText = extractSystemText(system);
|
| 122 |
+
const messagesText = messagesToText(messages);
|
| 123 |
+
const full = [systemText, messagesText].filter(Boolean).join('\n---\n');
|
| 124 |
+
const n = countTokens(full);
|
| 125 |
+
return typeof n === 'number' && isFinite(n) ? n : 0;
|
| 126 |
+
}
|
| 127 |
+
catch (_)
|
| 128 |
+
{
|
| 129 |
+
return 0;
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
function getNowTimeText()
|
| 134 |
+
{
|
| 135 |
+
const now = new Date();
|
| 136 |
+
const yyyy = now.getFullYear();
|
| 137 |
+
const mm = String(now.getMonth() + 1).padStart(2, '0');
|
| 138 |
+
const dd = String(now.getDate()).padStart(2, '0');
|
| 139 |
+
const HH = String(now.getHours()).padStart(2, '0');
|
| 140 |
+
const MM = String(now.getMinutes()).padStart(2, '0');
|
| 141 |
+
const SS = String(now.getSeconds()).padStart(2, '0');
|
| 142 |
+
return `${yyyy}-${mm}-${dd}-${HH}:${MM}:${SS}`;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
async function tryToFixJson(auth, jsonText)
|
| 146 |
+
{
|
| 147 |
+
const jsonResp = await fetch(`${modelMap[defaultModel].baseUrl}/v1/chat/completions`, {
|
| 148 |
+
method: 'POST',
|
| 149 |
+
headers: {
|
| 150 |
+
'Content-Type': 'application/json',
|
| 151 |
+
Authorization: auth,
|
| 152 |
+
},
|
| 153 |
+
body: JSON.stringify({
|
| 154 |
+
model: defaultModel,
|
| 155 |
+
messages: [{ role: 'user', content: FIX_JSON_TPL.replace('<$JSON$>', jsonText) }],
|
| 156 |
+
stream: false,
|
| 157 |
+
}),
|
| 158 |
+
});
|
| 159 |
+
if (!jsonResp.ok) return null;
|
| 160 |
+
const openai = await jsonResp.json();
|
| 161 |
+
let content = String(openai?.choices?.[0]?.message?.content ?? '');
|
| 162 |
+
content = content.replace(/<Json>[\s\S]*?<\/Json>/g, '').replace(/\\<Json\\>/g, '<Json>').replace(/\\<\/Json\\>/g, '</Json>');
|
| 163 |
+
const match = content.match(/<Json>([\s\S]*?)<\/Json>/);
|
| 164 |
+
if (!match) return null;
|
| 165 |
+
try
|
| 166 |
+
{
|
| 167 |
+
console.log('fix json');
|
| 168 |
+
return JSON.parse(match[1].trim());
|
| 169 |
+
}
|
| 170 |
+
catch (e)
|
| 171 |
+
{
|
| 172 |
+
console.log('but failed');
|
| 173 |
+
return null;
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
app.post('/v1/messages', async (req, res) =>
|
| 178 |
+
{
|
| 179 |
+
if(!modelMap[req.body.model]) req.body.model = defaultModel;
|
| 180 |
+
const auth = getAuthHeader(req);
|
| 181 |
+
if (!auth) return jsonError(res, 401, 'missing authorization header');
|
| 182 |
+
const nowTimeText = getNowTimeText();
|
| 183 |
+
const body = req.body || {};
|
| 184 |
+
let logText = `[Request]\nUrl: ${req.url}\nMethod: ${req.method}\nBody: ${JSON.stringify(body)}\n\n`;
|
| 185 |
+
const systemText = extractSystemText(body?.system);
|
| 186 |
+
const toolsStr = JSON.stringify(body?.tools ?? []);
|
| 187 |
+
const messagesStr = JSON.stringify(body?.messages ?? []);
|
| 188 |
+
const fcPrompt = FUNCTION_CALL_TPL.replaceAll('<$SystemPrompt$>', systemText)
|
| 189 |
+
.replaceAll('<$Tools$>', toolsStr)
|
| 190 |
+
.replaceAll('<$Messages$>', messagesStr);
|
| 191 |
+
const openaiReq = {
|
| 192 |
+
model: body?.model,
|
| 193 |
+
messages: [{ role: 'user', content: fcPrompt }],
|
| 194 |
+
stream: false,
|
| 195 |
+
max_tokens: body?.max_tokens,
|
| 196 |
+
temperature: body?.temperature,
|
| 197 |
+
};
|
| 198 |
+
try
|
| 199 |
+
{
|
| 200 |
+
let answerTag = `${md5(JSON.stringify(openaiReq.messages))}-${openaiReq.model}`;
|
| 201 |
+
if (responseCache[answerTag])
|
| 202 |
+
{
|
| 203 |
+
let resp = responseCache[answerTag];
|
| 204 |
+
resp.id = uuidHex(32);
|
| 205 |
+
resp.usage.input_tokens = 0;
|
| 206 |
+
resp.usage.output_tokens = 0;
|
| 207 |
+
resp.usage.cache_read_input_tokens = 0;
|
| 208 |
+
return jsonOk(res, resp);
|
| 209 |
+
}
|
| 210 |
+
const chatResp = await fetch(`${modelMap[body.model].baseUrl}/v1/chat/completions`, {
|
| 211 |
+
method: 'POST',
|
| 212 |
+
headers: {
|
| 213 |
+
'Content-Type': 'application/json',
|
| 214 |
+
Authorization: auth,
|
| 215 |
+
},
|
| 216 |
+
body: JSON.stringify(openaiReq),
|
| 217 |
+
});
|
| 218 |
+
if (!chatResp.ok)
|
| 219 |
+
{
|
| 220 |
+
const message = await safeText(chatResp);
|
| 221 |
+
logText += `[Response]\nStatus: ${chatResp.status}\nBody: ${message}\n\n`;
|
| 222 |
+
if(DEBUG) fs.writeFileSync(`errors/${nowTimeText}.txt`, logText, 'utf-8');
|
| 223 |
+
return jsonError(res, chatResp.status, message);
|
| 224 |
+
}
|
| 225 |
+
const openai = await chatResp.json();
|
| 226 |
+
logText += `[Response]\nStatus: ${chatResp.status}\nBody: ${JSON.stringify(openai)}\n\n`;
|
| 227 |
+
let content = String(openai?.choices?.[0]?.message?.content ?? '');
|
| 228 |
+
content = content.replace(/<think>[\s\S]*<\/think>/g, '').replace(/\\<AnswerInJson\\>/g, '<AnswerInJson>').replace(/\\<\/AnswerInJson\\>/g, '</AnswerInJson>');
|
| 229 |
+
const match = content.match(/<AnswerInJson>([\s\S]*)<\/AnswerInJson>/);
|
| 230 |
+
if (!match)
|
| 231 |
+
{
|
| 232 |
+
logText += `[ParseFailed]\nParse failed : ${content}\n\n`;
|
| 233 |
+
if(DEBUG) fs.writeFileSync(`errors/${nowTimeText}.txt`, logText, 'utf-8');
|
| 234 |
+
return jsonError(res, 500, `parse failed : ${content}`);
|
| 235 |
+
}
|
| 236 |
+
let raw;
|
| 237 |
+
let answerObj;
|
| 238 |
+
try
|
| 239 |
+
{
|
| 240 |
+
raw = match[1].trim().replace(/"/, '"').replace(/\\_/, '_');
|
| 241 |
+
let headIndex = raw.indexOf('{');
|
| 242 |
+
if (headIndex > 0) raw = raw.substring(headIndex);
|
| 243 |
+
let tailIndex = raw.lastIndexOf('}');
|
| 244 |
+
if (tailIndex > 0) raw = raw.substring(0, tailIndex + 1);
|
| 245 |
+
answerObj = JSON.parse(raw);
|
| 246 |
+
}
|
| 247 |
+
catch (e)
|
| 248 |
+
{
|
| 249 |
+
answerObj = await tryToFixJson(raw);
|
| 250 |
+
if(!answerObj)
|
| 251 |
+
{
|
| 252 |
+
logText += `[JsonFailed]\nRaw:\n${raw}\nError:\n${e.message}\n`;
|
| 253 |
+
if(DEBUG) fs.writeFileSync(`errors/${nowTimeText}.txt`, logText, 'utf-8');
|
| 254 |
+
return jsonError(res, 500, `parse failed : ${content}`);
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
if (Array.isArray(answerObj?.content))
|
| 258 |
+
{
|
| 259 |
+
for (const item of answerObj.content)
|
| 260 |
+
{
|
| 261 |
+
if (item && item.type === 'tool_use') item.id = `call_${uuidHex(24)}`;
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
const inputTokens = countAnthropicTokensLocal(body?.model, body?.messages, body?.system);
|
| 265 |
+
const outputTokens = (() => {
|
| 266 |
+
try
|
| 267 |
+
{
|
| 268 |
+
const n = countTokens(content);
|
| 269 |
+
return typeof n === 'number' && isFinite(n) ? n : 0;
|
| 270 |
+
}
|
| 271 |
+
catch (_)
|
| 272 |
+
{
|
| 273 |
+
return 0;
|
| 274 |
+
}
|
| 275 |
+
})();
|
| 276 |
+
const response = {
|
| 277 |
+
id: uuidHex(32),
|
| 278 |
+
type: 'message',
|
| 279 |
+
role: 'assistant',
|
| 280 |
+
content: answerObj.content,
|
| 281 |
+
model: body?.model,
|
| 282 |
+
stop_reason:
|
| 283 |
+
Array.isArray(answerObj?.content) && answerObj.content.find((i) => i?.type === 'tool_use')
|
| 284 |
+
? 'tool_use'
|
| 285 |
+
: 'end_turn',
|
| 286 |
+
usage: {
|
| 287 |
+
input_tokens: inputTokens,
|
| 288 |
+
output_tokens: outputTokens,
|
| 289 |
+
cache_read_input_tokens: 0,
|
| 290 |
+
},
|
| 291 |
+
};
|
| 292 |
+
responseCache[answerTag] = response;
|
| 293 |
+
return jsonOk(res, response);
|
| 294 |
+
}
|
| 295 |
+
catch (e)
|
| 296 |
+
{
|
| 297 |
+
return jsonError(res, 500, String(e?.message || e));
|
| 298 |
+
}
|
| 299 |
+
});
|
| 300 |
+
|
| 301 |
+
const port = Number(env.PORT || 3000);
|
| 302 |
+
app.listen(port, () => {
|
| 303 |
+
console.log(`Server listening on http://localhost:${port}`);
|
| 304 |
+
});
|