File size: 7,579 Bytes
922e6b4
f48dc4d
58c2f0c
69e4e8b
 
ac74d4c
 
8c22121
ac74d4c
 
69e4e8b
 
 
ac74d4c
69e4e8b
 
 
ac74d4c
69e4e8b
ac74d4c
58c2f0c
f48dc4d
 
7ac3054
ac74d4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba8f2e0
ed5df64
69e4e8b
 
 
 
ba8f2e0
ac74d4c
 
 
 
 
ba8f2e0
ac74d4c
69e4e8b
f48dc4d
69e4e8b
 
 
 
9dd902e
69e4e8b
 
9dd902e
 
69e4e8b
 
7ac3054
69e4e8b
 
 
 
 
 
 
9dd902e
f48dc4d
 
 
 
 
 
 
69e4e8b
f48dc4d
 
 
 
 
c0e5311
f48dc4d
97fa783
 
ac74d4c
011db4b
f48dc4d
 
ba8f2e0
f48dc4d
 
ba8f2e0
f48dc4d
 
ac74d4c
 
ba8f2e0
ed5df64
69e4e8b
 
 
 
ba8f2e0
 
ac74d4c
 
 
 
ba8f2e0
ac74d4c
69e4e8b
95050a6
69e4e8b
 
 
 
9dd902e
69e4e8b
 
9dd902e
 
69e4e8b
 
7ac3054
69e4e8b
 
 
 
 
 
 
9dd902e
95050a6
69e4e8b
95050a6
 
69e4e8b
 
 
 
 
 
 
 
ba8f2e0
69e4e8b
 
ba8f2e0
69e4e8b
 
95050a6
 
 
 
011db4b
95050a6
 
 
97fa783
95050a6
 
 
 
ba8f2e0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
from datetime import datetime
import json
import uuid
import asyncio
import random
from typing import Any, Dict, Optional

import httpx
from fastapi import HTTPException
from api import validate  # Import validate to use getHid
from api.config import (
    MODEL_MAPPING,
    get_headers_api_chat,
    get_headers_chat,
    BASE_URL,
    AGENT_MODE,
    TRENDING_AGENT_MODE,
    MODEL_PREFIXES,
)
from api.models import ChatRequest
from api.logger import setup_logger

logger = setup_logger(__name__)

# Helper function to create chat completion data
def create_chat_completion_data(
    content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
) -> Dict[str, Any]:
    return {
        "id": f"chatcmpl-{uuid.uuid4()}",
        "object": "chat.completion.chunk",
        "created": timestamp,
        "model": model,
        "choices": [
            {
                "index": 0,
                "delta": {"content": content, "role": "assistant"},
                "finish_reason": finish_reason,
            }
        ],
        "usage": None,
    }

# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
def message_to_dict(message, model_prefix: Optional[str] = None):
    content = message.content if isinstance(message.content, str) else message.content[0]["text"]
    if model_prefix:
        content = f"{model_prefix} {content}"
    if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
        # Ensure base64 images are always included for all models
        return {
            "role": message.role,
            "content": content,
            "data": {
                "imageBase64": message.content[1]["image_url"]["url"],
                "fileText": "",
                "title": "snapshot",
            },
        }
    return {"role": message.role, "content": content}

# Process streaming response with headers from config.py
async def process_streaming_response(request: ChatRequest):
    logger.info(f"Processing streaming response for Model: {request.model}")

    agent_mode = AGENT_MODE.get(request.model, {})
    trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
    model_prefix = MODEL_PREFIXES.get(request.model, "")

    headers_api_chat = get_headers_api_chat(BASE_URL)
    validated_token = validate.getHid()  # Get the validated token from validate.py
    logger.info(f"Retrieved validated token: {validated_token}")

    if request.model == 'o1-preview':
        delay_seconds = random.randint(1, 60)
        logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
        await asyncio.sleep(delay_seconds)

    json_data = {
        "agentMode": agent_mode,
        "clickedAnswer2": False,
        "clickedAnswer3": False,
        "clickedForceWebSearch": False,
        "codeModelMode": True,
        "githubToken": None,
        "isChromeExt": False,
        "isMicMode": False,
        "maxTokens": request.max_tokens,
        "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
        "mobileClient": False,
        "playgroundTemperature": request.temperature,
        "playgroundTopP": request.top_p,
        "previewToken": None,
        "trendingAgentMode": trending_agent_mode,
        "userId": None,
        "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
        "userSystemPrompt": None,
        "validated": validated_token,
        "visitFromDelta": False,
    }

    async with httpx.AsyncClient() as client:
        try:
            async with client.stream(
                "POST",
                f"{BASE_URL}/api/chat",
                headers=headers_api_chat,
                json=json_data,
                timeout=100,
            ) as response:
                response.raise_for_status()
                async for line in response.aiter_lines():
                    timestamp = int(datetime.now().timestamp())
                    if line:
                        # Directly yield each line without additional processing
                        yield f"data: {json.dumps(create_chat_completion_data(line, request.model, timestamp))}\n\n"

                yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
                yield "data: [DONE]\n\n"
        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP error occurred: {e}")
            raise HTTPException(status_code=e.response.status_code, detail=str(e))
        except httpx.RequestError as e:
            logger.error(f"Error occurred during request: {e}")
            raise HTTPException(status_code=500, detail=str(e))

# Process non-streaming response with headers from config.py
async def process_non_streaming_response(request: ChatRequest):
    logger.info(f"Processing non-streaming response for Model: {request.model}")

    agent_mode = AGENT_MODE.get(request.model, {})
    trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
    model_prefix = MODEL_PREFIXES.get(request.model, "")

    headers_api_chat = get_headers_api_chat(BASE_URL)
    headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
    validated_token = validate.getHid()  # Get the validated token from validate.py

    if request.model == 'o1-preview':
        delay_seconds = random.randint(20, 60)
        logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
        await asyncio.sleep(delay_seconds)

    json_data = {
        "agentMode": agent_mode,
        "clickedAnswer2": False,
        "clickedAnswer3": False,
        "clickedForceWebSearch": False,
        "codeModelMode": True,
        "githubToken": None,
        "isChromeExt": False,
        "isMicMode": False,
        "maxTokens": request.max_tokens,
        "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
        "mobileClient": False,
        "playgroundTemperature": request.temperature,
        "playgroundTopP": request.top_p,
        "previewToken": None,
        "trendingAgentMode": trending_agent_mode,
        "userId": None,
        "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
        "userSystemPrompt": None,
        "validated": validated_token,
        "visitFromDelta": False,
    }

    full_response = ""
    async with httpx.AsyncClient() as client:
        try:
            async with client.stream(
                method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
            ) as response:
                response.raise_for_status()
                async for chunk in response.aiter_text():
                    full_response += chunk
        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP error occurred: {e}")
            raise HTTPException(status_code=e.response.status_code, detail=str(e))
        except httpx.RequestError as e:
            logger.error(f"Error occurred during request: {e}")
            raise HTTPException(status_code=500, detail=str(e))

    return {
        "id": f"chatcmpl-{uuid.uuid4()}",
        "object": "chat.completion",
        "created": int(datetime.now().timestamp()),
        "model": request.model,
        "choices": [
            {
                "index": 0,
                "message": {"role": "assistant", "content": full_response},
                "finish_reason": "stop",
            }
        ],
        "usage": None,
    }