ngwakomadikwe commited on
Commit
de4e354
·
verified ·
1 Parent(s): 491ff1d

Create app/services/openai_service.py

Browse files
Files changed (1) hide show
  1. app/services/openai_service.py +105 -0
app/services/openai_service.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ OpenAI service for handling chat completions
3
+ """
4
+ import os
5
+ import logging
6
+ from typing import Dict, Any, Optional
7
+ from openai import OpenAI
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ class OpenAIService:
12
+ """Service class for OpenAI API interactions"""
13
+
14
+ def __init__(self, api_key: str):
15
+ """Initialize OpenAI client"""
16
+ self.client = OpenAI(api_key=api_key)
17
+ self.default_model = "gpt-3.5-turbo"
18
+ self.default_temperature = 0.7
19
+ self.max_tokens = 1000
20
+
21
+ def chat_completion(
22
+ self,
23
+ message: str,
24
+ model: Optional[str] = None,
25
+ temperature: Optional[float] = None,
26
+ max_tokens: Optional[int] = None,
27
+ system_message: Optional[str] = None
28
+ ) -> Dict[str, Any]:
29
+ """
30
+ Generate chat completion using OpenAI API
31
+
32
+ Args:
33
+ message: User message
34
+ model: OpenAI model to use
35
+ temperature: Response randomness (0-2)
36
+ max_tokens: Maximum tokens in response
37
+ system_message: Optional system message
38
+
39
+ Returns:
40
+ Dict containing response and metadata
41
+
42
+ Raises:
43
+ Exception: If API call fails
44
+ """
45
+ try:
46
+ # Prepare messages
47
+ messages = []
48
+
49
+ if system_message:
50
+ messages.append({"role": "system", "content": system_message})
51
+
52
+ messages.append({"role": "user", "content": message})
53
+
54
+ # Use provided parameters or defaults
55
+ model = model or self.default_model
56
+ temperature = temperature if temperature is not None else self.default_temperature
57
+ max_tokens = max_tokens or self.max_tokens
58
+
59
+ logger.info(f"Making OpenAI API call with model: {model}")
60
+
61
+ # Make API call with new OpenAI client
62
+ response = self.client.chat.completions.create(
63
+ model=model,
64
+ messages=messages,
65
+ temperature=temperature,
66
+ max_tokens=max_tokens
67
+ )
68
+
69
+ # Extract response data
70
+ reply = response.choices[0].message.content
71
+
72
+ result = {
73
+ "reply": reply,
74
+ "model": model,
75
+ "usage": {
76
+ "prompt_tokens": response.usage.prompt_tokens,
77
+ "completion_tokens": response.usage.completion_tokens,
78
+ "total_tokens": response.usage.total_tokens
79
+ }
80
+ }
81
+
82
+ logger.info(f"OpenAI API call successful. Tokens used: {result['usage']['total_tokens']}")
83
+ return result
84
+
85
+ except Exception as e:
86
+ logger.error(f"OpenAI API call failed: {str(e)}")
87
+ raise Exception(f"Failed to generate response: {str(e)}")
88
+
89
+ def validate_message(self, message: str) -> tuple[bool, str]:
90
+ """
91
+ Validate user message
92
+
93
+ Args:
94
+ message: User input message
95
+
96
+ Returns:
97
+ Tuple of (is_valid, error_message)
98
+ """
99
+ if not message or not message.strip():
100
+ return False, "Message cannot be empty"
101
+
102
+ if len(message) > 4000: # Reasonable limit
103
+ return False, "Message too long (max 4000 characters)"
104
+
105
+ return True, ""