format: ruff format line-length 145
Browse files- app/api/chat_api.py +18 -6
- app/config/db.py +4 -1
- app/config/log.py +4 -1
- app/config/secret.py +4 -1
- app/core/api_response.py +6 -10
- app/model/chat_model.py +9 -8
- app/repository/chat_repository.py +3 -12
- app/schema/chat_schema.py +9 -17
- app/schema/conversation.py +13 -34
- app/security/auth_service.py +4 -1
- app/service/chat_service.py +15 -14
- gradio_chatbot.py +27 -18
- main.py +1 -5
- pyproject.toml +4 -0
- scripts/api_key_genenerator.py +3 -9
    	
        app/api/chat_api.py
    CHANGED
    
    | @@ -8,7 +8,10 @@ from app.schema.chat_schema import ( | |
| 8 | 
             
                MessageResponse,
         | 
| 9 | 
             
                PlotResponse,
         | 
| 10 | 
             
            )
         | 
| 11 | 
            -
            from app.schema.conversation import  | 
|  | |
|  | |
|  | |
| 12 | 
             
            from app.service.chat_service import ChatService
         | 
| 13 | 
             
            from app.security.auth_service import AuthService
         | 
| 14 | 
             
            from app.core.api_response import api_response
         | 
| @@ -46,7 +49,8 @@ async def create_chat_completion( | |
| 46 | 
             
            @router.get("/chat/completions", response_model=List[ChatCompletionResponse])
         | 
| 47 | 
             
            @api_response()
         | 
| 48 | 
             
            async def list_chat_completions(
         | 
| 49 | 
            -
                request: Request, | 
|  | |
| 50 | 
             
            ):
         | 
| 51 | 
             
                """
         | 
| 52 | 
             
                Get all chat completions
         | 
| @@ -66,7 +70,10 @@ async def list_chat_completions( | |
| 66 |  | 
| 67 |  | 
| 68 | 
             
            # get a chat completion by id
         | 
| 69 | 
            -
            @router.get( | 
|  | |
|  | |
|  | |
| 70 | 
             
            @api_response()
         | 
| 71 | 
             
            async def retrieve_chat_completion(
         | 
| 72 | 
             
                completion_id: str,
         | 
| @@ -85,7 +92,8 @@ async def retrieve_chat_completion( | |
| 85 |  | 
| 86 | 
             
            # get all messages for a chat completion
         | 
| 87 | 
             
            @router.get(
         | 
| 88 | 
            -
                "/chat/completions/{completion_id}/messages", | 
|  | |
| 89 | 
             
            )
         | 
| 90 | 
             
            @api_response()
         | 
| 91 | 
             
            async def list_messages(
         | 
| @@ -139,7 +147,8 @@ async def retrieve_plot( | |
| 139 | 
             
            @router.get("/conversations", response_model=ConversationResponse)
         | 
| 140 | 
             
            @api_response()
         | 
| 141 | 
             
            async def list_conversations(
         | 
| 142 | 
            -
                request: Request, | 
|  | |
| 143 | 
             
            ):
         | 
| 144 | 
             
                """
         | 
| 145 | 
             
                Get all conversations
         | 
| @@ -156,7 +165,10 @@ async def list_conversations( | |
| 156 | 
             
            # get a conversation by id
         | 
| 157 |  | 
| 158 |  | 
| 159 | 
            -
            @router.get( | 
|  | |
|  | |
|  | |
| 160 | 
             
            @api_response()
         | 
| 161 | 
             
            async def retrieve_conversation(
         | 
| 162 | 
             
                completion_id: str,
         | 
|  | |
| 8 | 
             
                MessageResponse,
         | 
| 9 | 
             
                PlotResponse,
         | 
| 10 | 
             
            )
         | 
| 11 | 
            +
            from app.schema.conversation import (
         | 
| 12 | 
            +
                ConversationResponse,
         | 
| 13 | 
            +
                ConversationItemResponse,
         | 
| 14 | 
            +
            )
         | 
| 15 | 
             
            from app.service.chat_service import ChatService
         | 
| 16 | 
             
            from app.security.auth_service import AuthService
         | 
| 17 | 
             
            from app.core.api_response import api_response
         | 
|  | |
| 49 | 
             
            @router.get("/chat/completions", response_model=List[ChatCompletionResponse])
         | 
| 50 | 
             
            @api_response()
         | 
| 51 | 
             
            async def list_chat_completions(
         | 
| 52 | 
            +
                request: Request,
         | 
| 53 | 
            +
                username: str = Depends(auth_service.verify_credentials),
         | 
| 54 | 
             
            ):
         | 
| 55 | 
             
                """
         | 
| 56 | 
             
                Get all chat completions
         | 
|  | |
| 70 |  | 
| 71 |  | 
| 72 | 
             
            # get a chat completion by id
         | 
| 73 | 
            +
            @router.get(
         | 
| 74 | 
            +
                "/chat/completions/{completion_id}",
         | 
| 75 | 
            +
                response_model=ChatCompletionResponse,
         | 
| 76 | 
            +
            )
         | 
| 77 | 
             
            @api_response()
         | 
| 78 | 
             
            async def retrieve_chat_completion(
         | 
| 79 | 
             
                completion_id: str,
         | 
|  | |
| 92 |  | 
| 93 | 
             
            # get all messages for a chat completion
         | 
| 94 | 
             
            @router.get(
         | 
| 95 | 
            +
                "/chat/completions/{completion_id}/messages",
         | 
| 96 | 
            +
                response_model=List[MessageResponse],
         | 
| 97 | 
             
            )
         | 
| 98 | 
             
            @api_response()
         | 
| 99 | 
             
            async def list_messages(
         | 
|  | |
| 147 | 
             
            @router.get("/conversations", response_model=ConversationResponse)
         | 
| 148 | 
             
            @api_response()
         | 
| 149 | 
             
            async def list_conversations(
         | 
| 150 | 
            +
                request: Request,
         | 
| 151 | 
            +
                username: str = Depends(auth_service.verify_credentials),
         | 
| 152 | 
             
            ):
         | 
| 153 | 
             
                """
         | 
| 154 | 
             
                Get all conversations
         | 
|  | |
| 165 | 
             
            # get a conversation by id
         | 
| 166 |  | 
| 167 |  | 
| 168 | 
            +
            @router.get(
         | 
| 169 | 
            +
                "/conversations/{completion_id}",
         | 
| 170 | 
            +
                response_model=ConversationItemResponse,
         | 
| 171 | 
            +
            )
         | 
| 172 | 
             
            @api_response()
         | 
| 173 | 
             
            async def retrieve_conversation(
         | 
| 174 | 
             
                completion_id: str,
         | 
    	
        app/config/db.py
    CHANGED
    
    | @@ -3,7 +3,10 @@ from pydantic_settings import BaseSettings, SettingsConfigDict | |
| 3 |  | 
| 4 | 
             
            class DBConfig(BaseSettings):
         | 
| 5 | 
             
                model_config = SettingsConfigDict(
         | 
| 6 | 
            -
                    env_prefix="DB_", | 
|  | |
|  | |
|  | |
| 7 | 
             
                )
         | 
| 8 |  | 
| 9 | 
             
                MONGO_USER: str = "root"
         | 
|  | |
| 3 |  | 
| 4 | 
             
            class DBConfig(BaseSettings):
         | 
| 5 | 
             
                model_config = SettingsConfigDict(
         | 
| 6 | 
            +
                    env_prefix="DB_",
         | 
| 7 | 
            +
                    env_file=".env",
         | 
| 8 | 
            +
                    env_file_encoding="utf-8",
         | 
| 9 | 
            +
                    extra="ignore",
         | 
| 10 | 
             
                )
         | 
| 11 |  | 
| 12 | 
             
                MONGO_USER: str = "root"
         | 
    	
        app/config/log.py
    CHANGED
    
    | @@ -16,7 +16,10 @@ class LogConfig(BaseSettings): | |
| 16 | 
             
                    )
         | 
| 17 |  | 
| 18 | 
             
                model_config = SettingsConfigDict(
         | 
| 19 | 
            -
                    env_prefix="LOG_", | 
|  | |
|  | |
|  | |
| 20 | 
             
                )
         | 
| 21 |  | 
| 22 | 
             
                LEVEL: str = "DEBUG"
         | 
|  | |
| 16 | 
             
                    )
         | 
| 17 |  | 
| 18 | 
             
                model_config = SettingsConfigDict(
         | 
| 19 | 
            +
                    env_prefix="LOG_",
         | 
| 20 | 
            +
                    env_file=".env",
         | 
| 21 | 
            +
                    env_file_encoding="utf-8",
         | 
| 22 | 
            +
                    extra="ignore",
         | 
| 23 | 
             
                )
         | 
| 24 |  | 
| 25 | 
             
                LEVEL: str = "DEBUG"
         | 
    	
        app/config/secret.py
    CHANGED
    
    | @@ -5,7 +5,10 @@ class SecretConfig(BaseSettings): | |
| 5 | 
             
                """Secret configuration to be set in env variables"""
         | 
| 6 |  | 
| 7 | 
             
                model_config = SettingsConfigDict(
         | 
| 8 | 
            -
                    env_prefix="SECRET_", | 
|  | |
|  | |
|  | |
| 9 | 
             
                )
         | 
| 10 |  | 
| 11 | 
             
                KEY: str = "mock-secret-key"
         | 
|  | |
| 5 | 
             
                """Secret configuration to be set in env variables"""
         | 
| 6 |  | 
| 7 | 
             
                model_config = SettingsConfigDict(
         | 
| 8 | 
            +
                    env_prefix="SECRET_",
         | 
| 9 | 
            +
                    env_file=".env",
         | 
| 10 | 
            +
                    env_file_encoding="utf-8",
         | 
| 11 | 
            +
                    extra="ignore",
         | 
| 12 | 
             
                )
         | 
| 13 |  | 
| 14 | 
             
                KEY: str = "mock-secret-key"
         | 
    	
        app/core/api_response.py
    CHANGED
    
    | @@ -48,13 +48,9 @@ def url_to_filename(url: str, method: str) -> str: | |
| 48 | 
             
                return result
         | 
| 49 |  | 
| 50 |  | 
| 51 | 
            -
            def get_mock_response(
         | 
| 52 | 
            -
                url_path: str, python_module_name: str, python_method_name: str
         | 
| 53 | 
            -
            ) -> Dict:
         | 
| 54 | 
             
                """Get mock response from JSON file."""
         | 
| 55 | 
            -
                logger.trace(
         | 
| 56 | 
            -
                    f"BEGIN: url_path: {url_path} python_module_name: {python_module_name} python_method_name: {python_method_name}"
         | 
| 57 | 
            -
                )
         | 
| 58 | 
             
                filename = None
         | 
| 59 | 
             
                file_path = None
         | 
| 60 | 
             
                try:
         | 
| @@ -115,12 +111,12 @@ def api_response(): | |
| 115 | 
             
                        if USE_MOCK:
         | 
| 116 | 
             
                            python_method_name = func.__name__
         | 
| 117 | 
             
                            python_module_name = func.__module__
         | 
| 118 | 
            -
                            logger.warning(
         | 
| 119 | 
            -
                                f"Using mock response for {request.url.path} [{request.method}] > {python_module_name}.{python_method_name}"
         | 
| 120 | 
            -
                            )
         | 
| 121 | 
             
                            try:
         | 
| 122 | 
             
                                result = get_mock_response(
         | 
| 123 | 
            -
                                    request.url.path, | 
|  | |
|  | |
| 124 | 
             
                                )
         | 
| 125 | 
             
                                logger.trace(f"Mock response: {result}")
         | 
| 126 | 
             
                                return result
         | 
|  | |
| 48 | 
             
                return result
         | 
| 49 |  | 
| 50 |  | 
| 51 | 
            +
            def get_mock_response(url_path: str, python_module_name: str, python_method_name: str) -> Dict:
         | 
|  | |
|  | |
| 52 | 
             
                """Get mock response from JSON file."""
         | 
| 53 | 
            +
                logger.trace(f"BEGIN: url_path: {url_path} python_module_name: {python_module_name} python_method_name: {python_method_name}")
         | 
|  | |
|  | |
| 54 | 
             
                filename = None
         | 
| 55 | 
             
                file_path = None
         | 
| 56 | 
             
                try:
         | 
|  | |
| 111 | 
             
                        if USE_MOCK:
         | 
| 112 | 
             
                            python_method_name = func.__name__
         | 
| 113 | 
             
                            python_module_name = func.__module__
         | 
| 114 | 
            +
                            logger.warning(f"Using mock response for {request.url.path} [{request.method}] > {python_module_name}.{python_method_name}")
         | 
|  | |
|  | |
| 115 | 
             
                            try:
         | 
| 116 | 
             
                                result = get_mock_response(
         | 
| 117 | 
            +
                                    request.url.path,
         | 
| 118 | 
            +
                                    python_module_name,
         | 
| 119 | 
            +
                                    python_method_name,
         | 
| 120 | 
             
                                )
         | 
| 121 | 
             
                                logger.trace(f"Mock response: {result}")
         | 
| 122 | 
             
                                return result
         | 
    	
        app/model/chat_model.py
    CHANGED
    
    | @@ -72,11 +72,16 @@ class ChatMessage(BaseModel): | |
| 72 | 
             
                role: MessageRole = Field(
         | 
| 73 | 
             
                    ...,
         | 
| 74 | 
             
                    description="The role of the message sender",
         | 
| 75 | 
            -
                    examples=[ | 
|  | |
|  | |
|  | |
|  | |
| 76 | 
             
                )
         | 
| 77 | 
             
                content: str = Field(..., description="The content of the message")
         | 
| 78 | 
             
                timestamp: datetime = Field(
         | 
| 79 | 
            -
                    default_factory=datetime.now, | 
|  | |
| 80 | 
             
                )
         | 
| 81 |  | 
| 82 |  | 
| @@ -97,9 +102,7 @@ class ChatCompletion(BaseModel): | |
| 97 | 
             
                    description="The model used for the chat completion",
         | 
| 98 | 
             
                    examples=["gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo"],
         | 
| 99 | 
             
                )
         | 
| 100 | 
            -
                messages: List[ChatMessage] = Field(
         | 
| 101 | 
            -
                    ..., description="The messages in the chat completion"
         | 
| 102 | 
            -
                )
         | 
| 103 |  | 
| 104 | 
             
                # not implemented yet
         | 
| 105 | 
             
                # temperature: float = Field(default=0.7,ge=0.0, le=1.0, description="What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.")
         | 
| @@ -120,9 +123,7 @@ class ChatCompletion(BaseModel): | |
| 120 | 
             
                    default_factory=datetime.now,
         | 
| 121 | 
             
                    description="The date and time the chat completion was created",
         | 
| 122 | 
             
                )
         | 
| 123 | 
            -
                last_updated_by: str = Field(
         | 
| 124 | 
            -
                    ..., description="The user who last updated the chat completion"
         | 
| 125 | 
            -
                )
         | 
| 126 | 
             
                last_updated_date: datetime = Field(
         | 
| 127 | 
             
                    default_factory=datetime.now,
         | 
| 128 | 
             
                    description="The date and time the chat completion was last updated",
         | 
|  | |
| 72 | 
             
                role: MessageRole = Field(
         | 
| 73 | 
             
                    ...,
         | 
| 74 | 
             
                    description="The role of the message sender",
         | 
| 75 | 
            +
                    examples=[
         | 
| 76 | 
            +
                        MessageRole.USER,
         | 
| 77 | 
            +
                        MessageRole.ASSISTANT,
         | 
| 78 | 
            +
                        MessageRole.SYSTEM,
         | 
| 79 | 
            +
                    ],
         | 
| 80 | 
             
                )
         | 
| 81 | 
             
                content: str = Field(..., description="The content of the message")
         | 
| 82 | 
             
                timestamp: datetime = Field(
         | 
| 83 | 
            +
                    default_factory=datetime.now,
         | 
| 84 | 
            +
                    description="The timestamp of the message",
         | 
| 85 | 
             
                )
         | 
| 86 |  | 
| 87 |  | 
|  | |
| 102 | 
             
                    description="The model used for the chat completion",
         | 
| 103 | 
             
                    examples=["gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo"],
         | 
| 104 | 
             
                )
         | 
| 105 | 
            +
                messages: List[ChatMessage] = Field(..., description="The messages in the chat completion")
         | 
|  | |
|  | |
| 106 |  | 
| 107 | 
             
                # not implemented yet
         | 
| 108 | 
             
                # temperature: float = Field(default=0.7,ge=0.0, le=1.0, description="What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.")
         | 
|  | |
| 123 | 
             
                    default_factory=datetime.now,
         | 
| 124 | 
             
                    description="The date and time the chat completion was created",
         | 
| 125 | 
             
                )
         | 
| 126 | 
            +
                last_updated_by: str = Field(..., description="The user who last updated the chat completion")
         | 
|  | |
|  | |
| 127 | 
             
                last_updated_date: datetime = Field(
         | 
| 128 | 
             
                    default_factory=datetime.now,
         | 
| 129 | 
             
                    description="The date and time the chat completion was last updated",
         | 
    	
        app/repository/chat_repository.py
    CHANGED
    
    | @@ -17,9 +17,7 @@ class ChatRepository: | |
| 17 |  | 
| 18 | 
             
                    result = self.db.chat_completion.update_one(query, update, upsert=True)
         | 
| 19 |  | 
| 20 | 
            -
                    completion_id = (
         | 
| 21 | 
            -
                        str(result.upserted_id) if result.upserted_id else entity.completion_id
         | 
| 22 | 
            -
                    )
         | 
| 23 | 
             
                    entity.completion_id = completion_id
         | 
| 24 |  | 
| 25 | 
             
                    # save conversation if new chat completion
         | 
| @@ -43,12 +41,7 @@ class ChatRepository: | |
| 43 | 
             
                    skip = (page - 1) * limit
         | 
| 44 | 
             
                    sort = sort if sort else {"created_date": -1}
         | 
| 45 |  | 
| 46 | 
            -
                    result = (
         | 
| 47 | 
            -
                        self.db.chat_completion.find(query, project)
         | 
| 48 | 
            -
                        .skip(skip)
         | 
| 49 | 
            -
                        .limit(limit)
         | 
| 50 | 
            -
                        .sort(sort)
         | 
| 51 | 
            -
                    )
         | 
| 52 | 
             
                    return [ChatCompletion(**item) for item in result]
         | 
| 53 |  | 
| 54 | 
             
                def find_by_id(self, completion_id: str, project: dict = None) -> ChatCompletion:
         | 
| @@ -56,9 +49,7 @@ class ChatRepository: | |
| 56 | 
             
                    Find a chat completion by a given id.
         | 
| 57 | 
             
                    Example : completion_id = "123"
         | 
| 58 | 
             
                    """
         | 
| 59 | 
            -
                    result = self.db.chat_completion.find_one(
         | 
| 60 | 
            -
                        {"completion_id": completion_id}, project
         | 
| 61 | 
            -
                    )
         | 
| 62 | 
             
                    return ChatCompletion(**result)
         | 
| 63 |  | 
| 64 | 
             
                def find_messages(self, completion_id: str) -> List[ChatMessage]:
         | 
|  | |
| 17 |  | 
| 18 | 
             
                    result = self.db.chat_completion.update_one(query, update, upsert=True)
         | 
| 19 |  | 
| 20 | 
            +
                    completion_id = str(result.upserted_id) if result.upserted_id else entity.completion_id
         | 
|  | |
|  | |
| 21 | 
             
                    entity.completion_id = completion_id
         | 
| 22 |  | 
| 23 | 
             
                    # save conversation if new chat completion
         | 
|  | |
| 41 | 
             
                    skip = (page - 1) * limit
         | 
| 42 | 
             
                    sort = sort if sort else {"created_date": -1}
         | 
| 43 |  | 
| 44 | 
            +
                    result = self.db.chat_completion.find(query, project).skip(skip).limit(limit).sort(sort)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 45 | 
             
                    return [ChatCompletion(**item) for item in result]
         | 
| 46 |  | 
| 47 | 
             
                def find_by_id(self, completion_id: str, project: dict = None) -> ChatCompletion:
         | 
|  | |
| 49 | 
             
                    Find a chat completion by a given id.
         | 
| 50 | 
             
                    Example : completion_id = "123"
         | 
| 51 | 
             
                    """
         | 
| 52 | 
            +
                    result = self.db.chat_completion.find_one({"completion_id": completion_id}, project)
         | 
|  | |
|  | |
| 53 | 
             
                    return ChatCompletion(**result)
         | 
| 54 |  | 
| 55 | 
             
                def find_messages(self, completion_id: str) -> List[ChatMessage]:
         | 
    	
        app/schema/chat_schema.py
    CHANGED
    
    | @@ -41,9 +41,7 @@ class ChatCompletionRequest(ChatCompletionBase): | |
| 41 | 
             
                """
         | 
| 42 |  | 
| 43 | 
             
                model: str = Field(..., description="The model to use for the chat completion")
         | 
| 44 | 
            -
                messages: List[MessageRequest] = Field(
         | 
| 45 | 
            -
                    ..., description="The messages to use for the chat completion"
         | 
| 46 | 
            -
                )
         | 
| 47 | 
             
                stream: bool = Field(..., description="Whether to stream the chat completion")
         | 
| 48 |  | 
| 49 |  | 
| @@ -63,12 +61,8 @@ class ChoiceResponse(BaseModel): | |
| 63 | 
             
                    ...,
         | 
| 64 | 
             
                    description="The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters",
         | 
| 65 | 
             
                )
         | 
| 66 | 
            -
                index: int = Field(
         | 
| 67 | 
            -
             | 
| 68 | 
            -
                )
         | 
| 69 | 
            -
                message: MessageResponse = Field(
         | 
| 70 | 
            -
                    ..., description="The message to use for the chat completion"
         | 
| 71 | 
            -
                )
         | 
| 72 | 
             
                # logprobs: str = None # not implemented yet
         | 
| 73 |  | 
| 74 |  | 
| @@ -77,12 +71,8 @@ class ChatCompletionResponse(ChatCompletionBase): | |
| 77 | 
             
                Represents a chat completion response returned by model, based on the provided input.
         | 
| 78 | 
             
                """
         | 
| 79 |  | 
| 80 | 
            -
                completion_id: str = Field(
         | 
| 81 | 
            -
             | 
| 82 | 
            -
                )
         | 
| 83 | 
            -
                choices: List[ChoiceResponse] = Field(
         | 
| 84 | 
            -
                    ..., description="A list of chat completion choices."
         | 
| 85 | 
            -
                )
         | 
| 86 | 
             
                created: int = Field(
         | 
| 87 | 
             
                    ...,
         | 
| 88 | 
             
                    description="The Unix timestamp (in seconds) of when the chat completion was created.",
         | 
| @@ -107,7 +97,8 @@ class PlotRequest(BaseModel): | |
| 107 | 
             
                """
         | 
| 108 |  | 
| 109 | 
             
                message_id: str = Field(
         | 
| 110 | 
            -
                    ..., | 
|  | |
| 111 | 
             
                )
         | 
| 112 |  | 
| 113 |  | 
| @@ -122,6 +113,7 @@ class PlotResponse(BaseModel): | |
| 122 | 
             
                    description="The unique identifier for the chat completion that the plot is for",
         | 
| 123 | 
             
                )
         | 
| 124 | 
             
                message_id: str = Field(
         | 
| 125 | 
            -
                    ..., | 
|  | |
| 126 | 
             
                )
         | 
| 127 | 
             
                figure: dict = Field(..., description="The figure data to be visualized")
         | 
|  | |
| 41 | 
             
                """
         | 
| 42 |  | 
| 43 | 
             
                model: str = Field(..., description="The model to use for the chat completion")
         | 
| 44 | 
            +
                messages: List[MessageRequest] = Field(..., description="The messages to use for the chat completion")
         | 
|  | |
|  | |
| 45 | 
             
                stream: bool = Field(..., description="Whether to stream the chat completion")
         | 
| 46 |  | 
| 47 |  | 
|  | |
| 61 | 
             
                    ...,
         | 
| 62 | 
             
                    description="The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters",
         | 
| 63 | 
             
                )
         | 
| 64 | 
            +
                index: int = Field(..., description="The index of the choice in the list of choices.")
         | 
| 65 | 
            +
                message: MessageResponse = Field(..., description="The message to use for the chat completion")
         | 
|  | |
|  | |
|  | |
|  | |
| 66 | 
             
                # logprobs: str = None # not implemented yet
         | 
| 67 |  | 
| 68 |  | 
|  | |
| 71 | 
             
                Represents a chat completion response returned by model, based on the provided input.
         | 
| 72 | 
             
                """
         | 
| 73 |  | 
| 74 | 
            +
                completion_id: str = Field(..., description="The unique identifier for the chat completion")
         | 
| 75 | 
            +
                choices: List[ChoiceResponse] = Field(..., description="A list of chat completion choices.")
         | 
|  | |
|  | |
|  | |
|  | |
| 76 | 
             
                created: int = Field(
         | 
| 77 | 
             
                    ...,
         | 
| 78 | 
             
                    description="The Unix timestamp (in seconds) of when the chat completion was created.",
         | 
|  | |
| 97 | 
             
                """
         | 
| 98 |  | 
| 99 | 
             
                message_id: str = Field(
         | 
| 100 | 
            +
                    ...,
         | 
| 101 | 
            +
                    description="The unique identifier for the message that the plot is for",
         | 
| 102 | 
             
                )
         | 
| 103 |  | 
| 104 |  | 
|  | |
| 113 | 
             
                    description="The unique identifier for the chat completion that the plot is for",
         | 
| 114 | 
             
                )
         | 
| 115 | 
             
                message_id: str = Field(
         | 
| 116 | 
            +
                    ...,
         | 
| 117 | 
            +
                    description="The unique identifier for the message that the plot is for",
         | 
| 118 | 
             
                )
         | 
| 119 | 
             
                figure: dict = Field(..., description="The figure data to be visualized")
         | 
    	
        app/schema/conversation.py
    CHANGED
    
    | @@ -6,18 +6,12 @@ from typing import List, Optional | |
| 6 | 
             
            class ConversationItemResponse(BaseModel):
         | 
| 7 | 
             
                """Represents an individual conversation record in the platform's chat history."""
         | 
| 8 |  | 
| 9 | 
            -
                completion_id: str = Field(
         | 
| 10 | 
            -
             | 
| 11 | 
            -
                )
         | 
| 12 | 
            -
                title: str = Field(
         | 
| 13 | 
            -
                    description="Title or name of the conversation, describing its content or purpose."
         | 
| 14 | 
            -
                )
         | 
| 15 | 
             
                create_time: datetime = Field(
         | 
| 16 | 
             
                    description="Timestamp when the conversation was created, in ISO 8601 format (e.g., '2025-05-22T10:54:37.569747Z')."
         | 
| 17 | 
             
                )
         | 
| 18 | 
            -
                update_time: datetime = Field(
         | 
| 19 | 
            -
                    description="Timestamp when the conversation was last updated, in ISO 8601 format."
         | 
| 20 | 
            -
                )
         | 
| 21 | 
             
                mapping: Optional[dict] = Field(
         | 
| 22 | 
             
                    default=None,
         | 
| 23 | 
             
                    description="Optional dictionary containing additional conversation metadata or mappings, if applicable.",
         | 
| @@ -34,9 +28,7 @@ class ConversationItemResponse(BaseModel): | |
| 34 | 
             
                    default=None,
         | 
| 35 | 
             
                    description="Identifier for the gizmo or tool associated with the conversation, if applicable.",
         | 
| 36 | 
             
                )
         | 
| 37 | 
            -
                is_archived: bool = Field(
         | 
| 38 | 
            -
                    description="Indicates whether the conversation is archived."
         | 
| 39 | 
            -
                )
         | 
| 40 | 
             
                is_starred: Optional[bool] = Field(
         | 
| 41 | 
             
                    default=None,
         | 
| 42 | 
             
                    description="Indicates whether the conversation is marked as starred or favorite, if set.",
         | 
| @@ -45,9 +37,7 @@ class ConversationItemResponse(BaseModel): | |
| 45 | 
             
                    default=None,
         | 
| 46 | 
             
                    description="Indicates whether the conversation is excluded from memory or history, if set.",
         | 
| 47 | 
             
                )
         | 
| 48 | 
            -
                memory_scope: str = Field(
         | 
| 49 | 
            -
                    description="Scope of the conversation's memory, e.g., 'global_enabled' for global memory access."
         | 
| 50 | 
            -
                )
         | 
| 51 | 
             
                workspace_id: Optional[str] = Field(
         | 
| 52 | 
             
                    default=None,
         | 
| 53 | 
             
                    description="Identifier for the workspace the conversation belongs to, if applicable.",
         | 
| @@ -56,14 +46,11 @@ class ConversationItemResponse(BaseModel): | |
| 56 | 
             
                    default=None,
         | 
| 57 | 
             
                    description="Status of any asynchronous operations related to the conversation, if applicable.",
         | 
| 58 | 
             
                )
         | 
| 59 | 
            -
                safe_urls: List[str] = Field(
         | 
| 60 | 
            -
             | 
| 61 | 
            -
                )
         | 
| 62 | 
            -
                blocked_urls: List[str] = Field(
         | 
| 63 | 
            -
                    description="List of URLs blocked for the conversation context."
         | 
| 64 | 
            -
                )
         | 
| 65 | 
             
                conversation_origin: Optional[str] = Field(
         | 
| 66 | 
            -
                    default=None, | 
|  | |
| 67 | 
             
                )
         | 
| 68 | 
             
                snippet: Optional[str] = Field(
         | 
| 69 | 
             
                    default=None,
         | 
| @@ -74,15 +61,7 @@ class ConversationItemResponse(BaseModel): | |
| 74 | 
             
            class ConversationResponse(BaseModel):
         | 
| 75 | 
             
                """Represents the response object containing a list of conversation records and pagination metadata from the platform."""
         | 
| 76 |  | 
| 77 | 
            -
                items: List[ConversationItemResponse] = Field(
         | 
| 78 | 
            -
             | 
| 79 | 
            -
                )
         | 
| 80 | 
            -
                 | 
| 81 | 
            -
                    description="Total number of conversations available in the user's history."
         | 
| 82 | 
            -
                )
         | 
| 83 | 
            -
                limit: int = Field(
         | 
| 84 | 
            -
                    description="Maximum number of conversation items returned in this response."
         | 
| 85 | 
            -
                )
         | 
| 86 | 
            -
                offset: int = Field(
         | 
| 87 | 
            -
                    description="Starting index of the conversation items in this response, used for pagination."
         | 
| 88 | 
            -
                )
         | 
|  | |
| 6 | 
             
            class ConversationItemResponse(BaseModel):
         | 
| 7 | 
             
                """Represents an individual conversation record in the platform's chat history."""
         | 
| 8 |  | 
| 9 | 
            +
                completion_id: str = Field(description="Unique identifier for the conversation in UUID format.")
         | 
| 10 | 
            +
                title: str = Field(description="Title or name of the conversation, describing its content or purpose.")
         | 
|  | |
|  | |
|  | |
|  | |
| 11 | 
             
                create_time: datetime = Field(
         | 
| 12 | 
             
                    description="Timestamp when the conversation was created, in ISO 8601 format (e.g., '2025-05-22T10:54:37.569747Z')."
         | 
| 13 | 
             
                )
         | 
| 14 | 
            +
                update_time: datetime = Field(description="Timestamp when the conversation was last updated, in ISO 8601 format.")
         | 
|  | |
|  | |
| 15 | 
             
                mapping: Optional[dict] = Field(
         | 
| 16 | 
             
                    default=None,
         | 
| 17 | 
             
                    description="Optional dictionary containing additional conversation metadata or mappings, if applicable.",
         | 
|  | |
| 28 | 
             
                    default=None,
         | 
| 29 | 
             
                    description="Identifier for the gizmo or tool associated with the conversation, if applicable.",
         | 
| 30 | 
             
                )
         | 
| 31 | 
            +
                is_archived: bool = Field(description="Indicates whether the conversation is archived.")
         | 
|  | |
|  | |
| 32 | 
             
                is_starred: Optional[bool] = Field(
         | 
| 33 | 
             
                    default=None,
         | 
| 34 | 
             
                    description="Indicates whether the conversation is marked as starred or favorite, if set.",
         | 
|  | |
| 37 | 
             
                    default=None,
         | 
| 38 | 
             
                    description="Indicates whether the conversation is excluded from memory or history, if set.",
         | 
| 39 | 
             
                )
         | 
| 40 | 
            +
                memory_scope: str = Field(description="Scope of the conversation's memory, e.g., 'global_enabled' for global memory access.")
         | 
|  | |
|  | |
| 41 | 
             
                workspace_id: Optional[str] = Field(
         | 
| 42 | 
             
                    default=None,
         | 
| 43 | 
             
                    description="Identifier for the workspace the conversation belongs to, if applicable.",
         | 
|  | |
| 46 | 
             
                    default=None,
         | 
| 47 | 
             
                    description="Status of any asynchronous operations related to the conversation, if applicable.",
         | 
| 48 | 
             
                )
         | 
| 49 | 
            +
                safe_urls: List[str] = Field(description="List of URLs deemed safe for the conversation context.")
         | 
| 50 | 
            +
                blocked_urls: List[str] = Field(description="List of URLs blocked for the conversation context.")
         | 
|  | |
|  | |
|  | |
|  | |
| 51 | 
             
                conversation_origin: Optional[str] = Field(
         | 
| 52 | 
            +
                    default=None,
         | 
| 53 | 
            +
                    description="Origin or source of the conversation, if specified.",
         | 
| 54 | 
             
                )
         | 
| 55 | 
             
                snippet: Optional[str] = Field(
         | 
| 56 | 
             
                    default=None,
         | 
|  | |
| 61 | 
             
            class ConversationResponse(BaseModel):
         | 
| 62 | 
             
                """Represents the response object containing a list of conversation records and pagination metadata from the platform."""
         | 
| 63 |  | 
| 64 | 
            +
                items: List[ConversationItemResponse] = Field(description="List of conversation items representing the user's chat history.")
         | 
| 65 | 
            +
                total: int = Field(description="Total number of conversations available in the user's history.")
         | 
| 66 | 
            +
                limit: int = Field(description="Maximum number of conversation items returned in this response.")
         | 
| 67 | 
            +
                offset: int = Field(description="Starting index of the conversation items in this response, used for pagination.")
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        app/security/auth_service.py
    CHANGED
    
    | @@ -52,7 +52,10 @@ class AuthService: | |
| 52 | 
             
                            )
         | 
| 53 |  | 
| 54 | 
             
                        # Debug için JSON verilerini logla
         | 
| 55 | 
            -
                        json_data = { | 
|  | |
|  | |
|  | |
| 56 | 
             
                        json_str = json.dumps(json_data)
         | 
| 57 | 
             
                        logger.trace(f"JSON data for signature: {json_str}")
         | 
| 58 | 
             
                        logger.trace(f"Secret key: {self.secret.KEY}")
         | 
|  | |
| 52 | 
             
                            )
         | 
| 53 |  | 
| 54 | 
             
                        # Debug için JSON verilerini logla
         | 
| 55 | 
            +
                        json_data = {
         | 
| 56 | 
            +
                            "username": data["username"],
         | 
| 57 | 
            +
                            "created_at": data["created_at"],
         | 
| 58 | 
            +
                        }
         | 
| 59 | 
             
                        json_str = json.dumps(json_data)
         | 
| 60 | 
             
                        logger.trace(f"JSON data for signature: {json_str}")
         | 
| 61 | 
             
                        logger.trace(f"Secret key: {self.secret.KEY}")
         | 
    	
        app/service/chat_service.py
    CHANGED
    
    | @@ -17,13 +17,9 @@ class ChatService: | |
| 17 | 
             
                def __init__(self):
         | 
| 18 | 
             
                    self.chat_repository = ChatRepository()
         | 
| 19 |  | 
| 20 | 
            -
                async def handle_chat_completion(
         | 
| 21 | 
            -
                    self, request: ChatCompletionRequest
         | 
| 22 | 
            -
                ) -> ChatCompletionResponse:
         | 
| 23 | 
             
                    last_user_message = request.messages[-1].content
         | 
| 24 | 
            -
                    response_content =  | 
| 25 | 
            -
                        f"TODO implement ai-agent response for this message: {last_user_message}"
         | 
| 26 | 
            -
                    )
         | 
| 27 | 
             
                    username = "admin"
         | 
| 28 |  | 
| 29 | 
             
                    entity = ChatCompletion(**request.model_dump())
         | 
| @@ -38,24 +34,29 @@ class ChatService: | |
| 38 | 
             
                    entity = self.chat_repository.save(entity)
         | 
| 39 |  | 
| 40 | 
             
                    result = ChatCompletionResponse(**entity.model_dump())
         | 
| 41 | 
            -
                    messages = [
         | 
| 42 | 
            -
                        MessageResponse(**{"role": "assistant", "content": response_content})
         | 
| 43 | 
            -
                    ]  # TODO: implement ai-agent response
         | 
| 44 | 
             
                    result.choices = [
         | 
| 45 | 
             
                        ChoiceResponse(
         | 
| 46 | 
            -
                            **{ | 
|  | |
|  | |
|  | |
|  | |
| 47 | 
             
                        )
         | 
| 48 | 
             
                    ]
         | 
| 49 | 
             
                    return result
         | 
| 50 |  | 
| 51 | 
             
                async def find(
         | 
| 52 | 
            -
                    self, | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 53 | 
             
                ) -> List[ChatCompletion]:
         | 
| 54 | 
             
                    return self.chat_repository.find(query, page, limit, sort, project)
         | 
| 55 |  | 
| 56 | 
            -
                async def find_by_id(
         | 
| 57 | 
            -
                    self, completion_id: str, project: dict = None
         | 
| 58 | 
            -
                ) -> ChatCompletion:
         | 
| 59 | 
             
                    return self.chat_repository.find_by_id(completion_id, project)
         | 
| 60 |  | 
| 61 | 
             
                async def find_messages(self, completion_id: str) -> List[ChatMessage]:
         | 
|  | |
| 17 | 
             
                def __init__(self):
         | 
| 18 | 
             
                    self.chat_repository = ChatRepository()
         | 
| 19 |  | 
| 20 | 
            +
                async def handle_chat_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse:
         | 
|  | |
|  | |
| 21 | 
             
                    last_user_message = request.messages[-1].content
         | 
| 22 | 
            +
                    response_content = f"TODO implement ai-agent response for this message: {last_user_message}"
         | 
|  | |
|  | |
| 23 | 
             
                    username = "admin"
         | 
| 24 |  | 
| 25 | 
             
                    entity = ChatCompletion(**request.model_dump())
         | 
|  | |
| 34 | 
             
                    entity = self.chat_repository.save(entity)
         | 
| 35 |  | 
| 36 | 
             
                    result = ChatCompletionResponse(**entity.model_dump())
         | 
| 37 | 
            +
                    messages = [MessageResponse(**{"role": "assistant", "content": response_content})]  # TODO: implement ai-agent response
         | 
|  | |
|  | |
| 38 | 
             
                    result.choices = [
         | 
| 39 | 
             
                        ChoiceResponse(
         | 
| 40 | 
            +
                            **{
         | 
| 41 | 
            +
                                "index": 0,
         | 
| 42 | 
            +
                                "message": messages[0],
         | 
| 43 | 
            +
                                "finish_reason": "stop",
         | 
| 44 | 
            +
                            }
         | 
| 45 | 
             
                        )
         | 
| 46 | 
             
                    ]
         | 
| 47 | 
             
                    return result
         | 
| 48 |  | 
| 49 | 
             
                async def find(
         | 
| 50 | 
            +
                    self,
         | 
| 51 | 
            +
                    query: dict,
         | 
| 52 | 
            +
                    page: int,
         | 
| 53 | 
            +
                    limit: int,
         | 
| 54 | 
            +
                    sort: dict,
         | 
| 55 | 
            +
                    project: dict = None,
         | 
| 56 | 
             
                ) -> List[ChatCompletion]:
         | 
| 57 | 
             
                    return self.chat_repository.find(query, page, limit, sort, project)
         | 
| 58 |  | 
| 59 | 
            +
                async def find_by_id(self, completion_id: str, project: dict = None) -> ChatCompletion:
         | 
|  | |
|  | |
| 60 | 
             
                    return self.chat_repository.find_by_id(completion_id, project)
         | 
| 61 |  | 
| 62 | 
             
                async def find_messages(self, completion_id: str) -> List[ChatMessage]:
         | 
    	
        gradio_chatbot.py
    CHANGED
    
    | @@ -15,11 +15,7 @@ env.read_env() | |
| 15 |  | 
| 16 | 
             
            # Hugging Face Spaces için özel yapılandırma
         | 
| 17 | 
             
            IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
         | 
| 18 | 
            -
            SPACE_URL =  | 
| 19 | 
            -
                "https://lokumai-openai-openapi-template.hf.space"
         | 
| 20 | 
            -
                if IS_HF_SPACE
         | 
| 21 | 
            -
                else "http://localhost:7860"
         | 
| 22 | 
            -
            )
         | 
| 23 |  | 
| 24 | 
             
            # API Configuration
         | 
| 25 | 
             
            BASE_URL = env.str("BASE_URL", SPACE_URL)
         | 
| @@ -43,6 +39,7 @@ def app_auth(username: str, password: str) -> bool: | |
| 43 | 
             
                logger.debug(f"AUTH_USERNAME: {AUTH_USERNAME}")
         | 
| 44 | 
             
                return username == AUTH_USERNAME and password == AUTH_PASSWORD
         | 
| 45 |  | 
|  | |
| 46 | 
             
            # Custom CSS for fonts
         | 
| 47 | 
             
            CUSTOM_CSS = """
         | 
| 48 | 
             
            @font-face {{
         | 
| @@ -183,7 +180,7 @@ class ChatAPI: | |
| 183 | 
             
                            logger.trace("######################## BEGIN API response #########################")
         | 
| 184 | 
             
                            logger.trace(json.dumps(result, indent=4))
         | 
| 185 | 
             
                            logger.trace("######################## END API response #########################")
         | 
| 186 | 
            -
             | 
| 187 | 
             
                            if "choices" in result and len(result["choices"]) > 0:
         | 
| 188 | 
             
                                message = result["choices"][0].get("message", {})
         | 
| 189 | 
             
                                figure = message.get("figure", None)
         | 
| @@ -191,7 +188,9 @@ class ChatAPI: | |
| 191 | 
             
                                content = message.get("content", "Content not found")
         | 
| 192 | 
             
                                logger.trace(f"Last message: {content}")
         | 
| 193 | 
             
                                return MessageResponse(
         | 
| 194 | 
            -
                                    status=MessageStatus.SUCCESS, | 
|  | |
|  | |
| 195 | 
             
                                )
         | 
| 196 | 
             
                            else:
         | 
| 197 | 
             
                                logger.error("Invalid API response")
         | 
| @@ -200,7 +199,7 @@ class ChatAPI: | |
| 200 | 
             
                                    content="",
         | 
| 201 | 
             
                                    error="Invalid API response",
         | 
| 202 | 
             
                                )
         | 
| 203 | 
            -
             | 
| 204 | 
             
                    except httpx.TimeoutException:
         | 
| 205 | 
             
                        logger.error("API request timed out")
         | 
| 206 | 
             
                        return MessageResponse(
         | 
| @@ -211,7 +210,9 @@ class ChatAPI: | |
| 211 | 
             
                    except Exception as e:
         | 
| 212 | 
             
                        logger.error(f"Error: {str(e)}")
         | 
| 213 | 
             
                        return MessageResponse(
         | 
| 214 | 
            -
                            status=MessageStatus.ERROR, | 
|  | |
|  | |
| 215 | 
             
                        )
         | 
| 216 |  | 
| 217 |  | 
| @@ -276,13 +277,11 @@ class ChatInterface: | |
| 276 | 
             
                                last_message = gr.Textbox(label="Last Message", interactive=False)
         | 
| 277 |  | 
| 278 | 
             
                        # Event handlers
         | 
| 279 | 
            -
                        async def user_message(
         | 
| 280 | 
            -
                            message: str, history: List[List[str]]
         | 
| 281 | 
            -
                        ) -> Tuple[List[List[str]], str, str, str, object]:
         | 
| 282 | 
             
                            """Handle user message submission"""
         | 
| 283 | 
             
                            if not message.strip():
         | 
| 284 | 
             
                                return history, "", "Please enter a message.", "", None
         | 
| 285 | 
            -
             | 
| 286 | 
             
                            logger.debug(f"User message: {message}")
         | 
| 287 |  | 
| 288 | 
             
                            history.append([message, ""])
         | 
| @@ -301,14 +300,24 @@ class ChatInterface: | |
| 301 | 
             
                                    except Exception as e:
         | 
| 302 | 
             
                                        logger.error(f"Error creating plotly figure: {e}")
         | 
| 303 | 
             
                                        figure = None
         | 
| 304 | 
            -
                                        history[-1][1] +=  | 
| 305 | 
            -
                                            "\n\n⚠️ Graph data is not valid, cannot be displayed."
         | 
| 306 | 
            -
                                        )
         | 
| 307 | 
             
                                history[-1][1] = content
         | 
| 308 | 
            -
                                return  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 309 | 
             
                            else:
         | 
| 310 | 
             
                                history[-1][1] = f"❌ {response.error}"
         | 
| 311 | 
            -
                                return  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 312 |  | 
| 313 | 
             
                        def clear_history() -> Tuple[List[List[str]], str, str, str, dict]:
         | 
| 314 | 
             
                            """Clear chat history"""
         | 
|  | |
| 15 |  | 
| 16 | 
             
            # Hugging Face Spaces için özel yapılandırma
         | 
| 17 | 
             
            IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
         | 
| 18 | 
            +
            SPACE_URL = "https://lokumai-openai-openapi-template.hf.space" if IS_HF_SPACE else "http://localhost:7860"
         | 
|  | |
|  | |
|  | |
|  | |
| 19 |  | 
| 20 | 
             
            # API Configuration
         | 
| 21 | 
             
            BASE_URL = env.str("BASE_URL", SPACE_URL)
         | 
|  | |
| 39 | 
             
                logger.debug(f"AUTH_USERNAME: {AUTH_USERNAME}")
         | 
| 40 | 
             
                return username == AUTH_USERNAME and password == AUTH_PASSWORD
         | 
| 41 |  | 
| 42 | 
            +
             | 
| 43 | 
             
            # Custom CSS for fonts
         | 
| 44 | 
             
            CUSTOM_CSS = """
         | 
| 45 | 
             
            @font-face {{
         | 
|  | |
| 180 | 
             
                            logger.trace("######################## BEGIN API response #########################")
         | 
| 181 | 
             
                            logger.trace(json.dumps(result, indent=4))
         | 
| 182 | 
             
                            logger.trace("######################## END API response #########################")
         | 
| 183 | 
            +
             | 
| 184 | 
             
                            if "choices" in result and len(result["choices"]) > 0:
         | 
| 185 | 
             
                                message = result["choices"][0].get("message", {})
         | 
| 186 | 
             
                                figure = message.get("figure", None)
         | 
|  | |
| 188 | 
             
                                content = message.get("content", "Content not found")
         | 
| 189 | 
             
                                logger.trace(f"Last message: {content}")
         | 
| 190 | 
             
                                return MessageResponse(
         | 
| 191 | 
            +
                                    status=MessageStatus.SUCCESS,
         | 
| 192 | 
            +
                                    content=content,
         | 
| 193 | 
            +
                                    figure=figure,
         | 
| 194 | 
             
                                )
         | 
| 195 | 
             
                            else:
         | 
| 196 | 
             
                                logger.error("Invalid API response")
         | 
|  | |
| 199 | 
             
                                    content="",
         | 
| 200 | 
             
                                    error="Invalid API response",
         | 
| 201 | 
             
                                )
         | 
| 202 | 
            +
             | 
| 203 | 
             
                    except httpx.TimeoutException:
         | 
| 204 | 
             
                        logger.error("API request timed out")
         | 
| 205 | 
             
                        return MessageResponse(
         | 
|  | |
| 210 | 
             
                    except Exception as e:
         | 
| 211 | 
             
                        logger.error(f"Error: {str(e)}")
         | 
| 212 | 
             
                        return MessageResponse(
         | 
| 213 | 
            +
                            status=MessageStatus.ERROR,
         | 
| 214 | 
            +
                            content="",
         | 
| 215 | 
            +
                            error=f"Error: {str(e)}",
         | 
| 216 | 
             
                        )
         | 
| 217 |  | 
| 218 |  | 
|  | |
| 277 | 
             
                                last_message = gr.Textbox(label="Last Message", interactive=False)
         | 
| 278 |  | 
| 279 | 
             
                        # Event handlers
         | 
| 280 | 
            +
                        async def user_message(message: str, history: List[List[str]]) -> Tuple[List[List[str]], str, str, str, object]:
         | 
|  | |
|  | |
| 281 | 
             
                            """Handle user message submission"""
         | 
| 282 | 
             
                            if not message.strip():
         | 
| 283 | 
             
                                return history, "", "Please enter a message.", "", None
         | 
| 284 | 
            +
             | 
| 285 | 
             
                            logger.debug(f"User message: {message}")
         | 
| 286 |  | 
| 287 | 
             
                            history.append([message, ""])
         | 
|  | |
| 300 | 
             
                                    except Exception as e:
         | 
| 301 | 
             
                                        logger.error(f"Error creating plotly figure: {e}")
         | 
| 302 | 
             
                                        figure = None
         | 
| 303 | 
            +
                                        history[-1][1] += "\n\n⚠️ Graph data is not valid, cannot be displayed."
         | 
|  | |
|  | |
| 304 | 
             
                                history[-1][1] = content
         | 
| 305 | 
            +
                                return (
         | 
| 306 | 
            +
                                    history,
         | 
| 307 | 
            +
                                    "",
         | 
| 308 | 
            +
                                    "Message sent successfully.",
         | 
| 309 | 
            +
                                    content,
         | 
| 310 | 
            +
                                    figure,
         | 
| 311 | 
            +
                                )
         | 
| 312 | 
             
                            else:
         | 
| 313 | 
             
                                history[-1][1] = f"❌ {response.error}"
         | 
| 314 | 
            +
                                return (
         | 
| 315 | 
            +
                                    history,
         | 
| 316 | 
            +
                                    "",
         | 
| 317 | 
            +
                                    f"Error: {response.error}",
         | 
| 318 | 
            +
                                    "",
         | 
| 319 | 
            +
                                    None,
         | 
| 320 | 
            +
                                )
         | 
| 321 |  | 
| 322 | 
             
                        def clear_history() -> Tuple[List[List[str]], str, str, str, dict]:
         | 
| 323 | 
             
                            """Clear chat history"""
         | 
    	
        main.py
    CHANGED
    
    | @@ -21,11 +21,7 @@ STORAGE_TYPE = env.str("STORAGE_TYPE", "mongodb") | |
| 21 |  | 
| 22 | 
             
            # Hugging Face Spaces için özel yapılandırma
         | 
| 23 | 
             
            IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
         | 
| 24 | 
            -
            SPACE_URL =  | 
| 25 | 
            -
                "https://lokumai-openai-openapi-template.hf.space"
         | 
| 26 | 
            -
                if IS_HF_SPACE
         | 
| 27 | 
            -
                else "http://localhost:7860"
         | 
| 28 | 
            -
            )
         | 
| 29 |  | 
| 30 |  | 
| 31 | 
             
            @asynccontextmanager
         | 
|  | |
| 21 |  | 
| 22 | 
             
            # Hugging Face Spaces için özel yapılandırma
         | 
| 23 | 
             
            IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
         | 
| 24 | 
            +
            SPACE_URL = "https://lokumai-openai-openapi-template.hf.space" if IS_HF_SPACE else "http://localhost:7860"
         | 
|  | |
|  | |
|  | |
|  | |
| 25 |  | 
| 26 |  | 
| 27 | 
             
            @asynccontextmanager
         | 
    	
        pyproject.toml
    CHANGED
    
    | @@ -18,3 +18,7 @@ dependencies = [ | |
| 18 | 
             
                "requests>=2.32.3",
         | 
| 19 | 
             
                "uvicorn>=0.34.2",
         | 
| 20 | 
             
            ]
         | 
|  | |
|  | |
|  | |
|  | 
|  | |
| 18 | 
             
                "requests>=2.32.3",
         | 
| 19 | 
             
                "uvicorn>=0.34.2",
         | 
| 20 | 
             
            ]
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            # ruff line length
         | 
| 23 | 
            +
            [tool.ruff]
         | 
| 24 | 
            +
            line-length = 145
         | 
    	
        scripts/api_key_genenerator.py
    CHANGED
    
    | @@ -46,9 +46,7 @@ def generate_api_key(username: str, secret_key: str) -> str: | |
| 46 | 
             
                logger.debug(f"Secret key: {secret_key}")
         | 
| 47 |  | 
| 48 | 
             
                # Add HMAC signature for additional security
         | 
| 49 | 
            -
                signature = hmac.new(
         | 
| 50 | 
            -
                    secret_key.encode(), json_str.encode(), hashlib.sha256
         | 
| 51 | 
            -
                ).hexdigest()
         | 
| 52 |  | 
| 53 | 
             
                logger.debug(f"Generated signature: {signature}")
         | 
| 54 |  | 
| @@ -69,9 +67,7 @@ def save_api_key(username: str, api_key: str, timestamp: int): | |
| 69 | 
             
                    api_key (str): Generated API key
         | 
| 70 | 
             
                    timestamp (int): Creation timestamp
         | 
| 71 | 
             
                """
         | 
| 72 | 
            -
                formatted_timestamp = datetime.fromtimestamp(timestamp).strftime(
         | 
| 73 | 
            -
                    "%Y-%m-%d %H:%M:%S"
         | 
| 74 | 
            -
                )
         | 
| 75 | 
             
                with open("api_keys.txt", "a") as f:
         | 
| 76 | 
             
                    f.write(f"Username: {username}\n")
         | 
| 77 | 
             
                    f.write(f"API Key: {api_key}\n")
         | 
| @@ -100,9 +96,7 @@ def main(): | |
| 100 | 
             
                    print('curl -X POST "http://localhost:8000/v1/chat/completions" \\')
         | 
| 101 | 
             
                    print(f'     -H "Authorization: Bearer {api_key}" \\')
         | 
| 102 | 
             
                    print('     -H "Content-Type: application/json" \\')
         | 
| 103 | 
            -
                    print(
         | 
| 104 | 
            -
                        '     -d \'{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hello!"}]}\''
         | 
| 105 | 
            -
                    )
         | 
| 106 |  | 
| 107 | 
             
                except argparse.ArgumentError:
         | 
| 108 | 
             
                    print("Please provide a username and secret key", file=sys.stderr)
         | 
|  | |
| 46 | 
             
                logger.debug(f"Secret key: {secret_key}")
         | 
| 47 |  | 
| 48 | 
             
                # Add HMAC signature for additional security
         | 
| 49 | 
            +
                signature = hmac.new(secret_key.encode(), json_str.encode(), hashlib.sha256).hexdigest()
         | 
|  | |
|  | |
| 50 |  | 
| 51 | 
             
                logger.debug(f"Generated signature: {signature}")
         | 
| 52 |  | 
|  | |
| 67 | 
             
                    api_key (str): Generated API key
         | 
| 68 | 
             
                    timestamp (int): Creation timestamp
         | 
| 69 | 
             
                """
         | 
| 70 | 
            +
                formatted_timestamp = datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
         | 
|  | |
|  | |
| 71 | 
             
                with open("api_keys.txt", "a") as f:
         | 
| 72 | 
             
                    f.write(f"Username: {username}\n")
         | 
| 73 | 
             
                    f.write(f"API Key: {api_key}\n")
         | 
|  | |
| 96 | 
             
                    print('curl -X POST "http://localhost:8000/v1/chat/completions" \\')
         | 
| 97 | 
             
                    print(f'     -H "Authorization: Bearer {api_key}" \\')
         | 
| 98 | 
             
                    print('     -H "Content-Type: application/json" \\')
         | 
| 99 | 
            +
                    print('     -d \'{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hello!"}]}\'')
         | 
|  | |
|  | |
| 100 |  | 
| 101 | 
             
                except argparse.ArgumentError:
         | 
| 102 | 
             
                    print("Please provide a username and secret key", file=sys.stderr)
         | 
