Hammad712 commited on
Commit
9929084
Β·
1 Parent(s): bcc2979

Added ADs Module

Browse files
app/ads/descriptions_service.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/services/descriptions_service.py
2
+ import os
3
+ import json
4
+ import logging
5
+ import time
6
+ from typing import List
7
+
8
+ import google.generativeai as genai
9
+
10
+ from app.ads.schemas import DescriptionsRequest, Persona
11
+
12
+ logger = logging.getLogger(__name__)
13
+ logger.addHandler(logging.NullHandler())
14
+
15
+ # Ensure genai configured (harmless if already configured elsewhere)
16
+ API_KEY = os.getenv("GEMINI_API_KEY")
17
+ if API_KEY:
18
+ try:
19
+ genai.configure(api_key=API_KEY)
20
+ logger.debug("Configured google.generativeai in descriptions service")
21
+ except Exception:
22
+ logger.exception("Failed to configure google.generativeai in descriptions service")
23
+
24
+
25
+ def _extract_json_array(raw: str) -> str:
26
+ start = raw.find('[')
27
+ end = raw.rfind(']')
28
+ if start != -1 and end != -1 and end > start:
29
+ return raw[start:end + 1]
30
+ return raw
31
+
32
+
33
+ def _build_descriptions_prompt(req: DescriptionsRequest) -> str:
34
+ """
35
+ Build prompt asking Gemini to return ONLY a JSON array of strings (ad descriptions).
36
+ """
37
+ try:
38
+ personas_json = json.dumps([p.dict() for p in req.selected_personas], indent=2)
39
+ except Exception:
40
+ personas_json = json.dumps(req.selected_personas, indent=2)
41
+
42
+ main_goal_value = req.main_goal.value
43
+ main_goal_desc = getattr(req.main_goal, "description", "")
44
+
45
+ prompt = f"""
46
+ You are an expert ad copywriter specialized in short, high-converting ad descriptions for digital ads.
47
+
48
+ Task:
49
+ Produce exactly {req.num_descriptions} ad descriptions (each 1-2 short sentences) tailored to the business and the selected persona(s). RETURN ONLY a JSON array of strings (e.g. ["Desc 1", "Desc 2", ...]) and nothing else.
50
+
51
+ Requirements:
52
+ - Each description should be concise (max ~140 characters preferred), benefit-focused, and aligned with the business value and main goal.
53
+ - Vary tone across descriptions (e.g., urgent, aspirational, trust-building, practical).
54
+ - Include the main value or offer where appropriate (e.g., "MVP development", "AI integration", "fast time-to-market", "trusted SaaS partner").
55
+ - If the selected personas have different priorities, generate descriptions that address those priorities.
56
+ - Goal: "{main_goal_value}" β€” {main_goal_desc}
57
+
58
+ Business inputs:
59
+ - Business name: {req.business_name}
60
+ - Business category: {req.business_category}
61
+ - Business description: {req.business_description}
62
+ - Promotion type: {req.promotion_type}
63
+ - Offer description: {req.offer_description}
64
+ - Value proposition: {req.value}
65
+ - Main goal: {main_goal_value} β€” {main_goal_desc}
66
+ - Serving clients info: {req.serving_clients_info}
67
+ - Serving clients location: {req.serving_clients_location}
68
+
69
+ Selected persona(s) (use these to shape descriptions):
70
+ {personas_json}
71
+
72
+ Now generate exactly {req.num_descriptions} unique ad descriptions as a JSON array of strings. No explanation, no extra text.
73
+ """
74
+ logger.debug("Built descriptions prompt (len=%d) for business '%s'", len(prompt), req.business_name)
75
+ return prompt.strip()
76
+
77
+
78
+ def generate_descriptions(req: DescriptionsRequest) -> List[str]:
79
+ prompt = _build_descriptions_prompt(req)
80
+
81
+ model_name = "gemini-2.5-pro"
82
+ logger.info("Generating %d descriptions for business '%s' using model %s",
83
+ req.num_descriptions, req.business_name, model_name)
84
+
85
+ try:
86
+ model = genai.GenerativeModel(model_name)
87
+ except Exception as e:
88
+ logger.exception("Failed to init Gemini model for descriptions: %s", e)
89
+ raise RuntimeError(f"Gemini model init failed: {e}")
90
+
91
+ try:
92
+ start = time.perf_counter()
93
+ response = model.generate_content(prompt)
94
+ duration = time.perf_counter() - start
95
+ logger.info("Gemini generate_content (descriptions) completed in %.2fs", duration)
96
+ except Exception as e:
97
+ logger.exception("Gemini generate_content failed for descriptions")
98
+ raise RuntimeError(f"Gemini request failed: {e}")
99
+
100
+ # extract raw text
101
+ raw = None
102
+ try:
103
+ if response and hasattr(response, "text") and response.text:
104
+ raw = response.text
105
+ logger.debug("Received response.text (len=%d) for descriptions", len(raw))
106
+ elif response and getattr(response, "candidates", None):
107
+ first = response.candidates[0]
108
+ if getattr(first, "finish_reason", "").upper() == "SAFETY":
109
+ msg = "Gemini descriptions generation blocked by safety filter"
110
+ logger.error(msg)
111
+ raise RuntimeError(msg)
112
+ raw = getattr(first, "content", None) or getattr(first, "text", None) or str(response)
113
+ logger.debug("Received candidate response for descriptions (len=%d)", len(raw) if raw else 0)
114
+ else:
115
+ raw = str(response)
116
+ logger.debug("Converted descriptions response to string (len=%d)", len(raw))
117
+ except Exception as e:
118
+ logger.exception("Failed to extract raw text from Gemini descriptions response")
119
+ raise RuntimeError(f"Failed to extract Gemini response text: {e}")
120
+
121
+ if not raw:
122
+ logger.error("Empty response from Gemini when generating descriptions")
123
+ raise RuntimeError("Empty response from Gemini")
124
+
125
+ snippet = _extract_json_array(raw)
126
+ try:
127
+ parsed = json.loads(snippet)
128
+ except json.JSONDecodeError:
129
+ logger.exception("Failed to parse JSON from descriptions response. Raw response: %s", raw)
130
+ raise RuntimeError(f"Failed to parse Gemini response as JSON array of strings.\nRaw: {raw}")
131
+
132
+ if not isinstance(parsed, list) or not all(isinstance(i, str) for i in parsed):
133
+ logger.error("Parsed descriptions JSON is not a list of strings. Parsed type: %s", type(parsed))
134
+ raise RuntimeError("Gemini did not return a JSON array of strings as expected.")
135
+
136
+ descriptions = parsed
137
+ if len(descriptions) < req.num_descriptions:
138
+ logger.warning("Gemini returned %d descriptions; expected %d. Returning what we have.",
139
+ len(descriptions), req.num_descriptions)
140
+ elif len(descriptions) > req.num_descriptions:
141
+ descriptions = descriptions[: req.num_descriptions]
142
+ logger.debug("Trimmed descriptions to requested num_descriptions=%d", req.num_descriptions)
143
+
144
+ descriptions = [d.strip() for d in descriptions]
145
+ logger.info("Generated %d descriptions for business '%s'", len(descriptions), req.business_name)
146
+ return descriptions
app/ads/headings_service.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/services/headings_service.py
2
+ import os
3
+ import json
4
+ import logging
5
+ import time
6
+ from typing import List
7
+
8
+ import google.generativeai as genai
9
+
10
+ from app.ads.schemas import HeadingsRequest, Persona
11
+
12
+ logger = logging.getLogger(__name__)
13
+ logger.addHandler(logging.NullHandler())
14
+
15
+ # Ensure Gemini SDK is configured (harmless if already configured elsewhere)
16
+ API_KEY = os.getenv("GEMINI_API_KEY")
17
+ if not API_KEY:
18
+ logger.error("GEMINI_API_KEY not set; headings generation will fail if called without configuration.")
19
+ else:
20
+ try:
21
+ genai.configure(api_key=API_KEY)
22
+ logger.debug("Configured google.generativeai in headings service")
23
+ except Exception as e:
24
+ logger.exception("Failed to configure google.generativeai in headings service: %s", e)
25
+
26
+
27
+ def _extract_json_array(raw: str) -> str:
28
+ """
29
+ Return the first JSON array substring from raw (from '[' to ']') to be robust
30
+ against extra commentary in model output.
31
+ """
32
+ start = raw.find('[')
33
+ end = raw.rfind(']')
34
+ if start != -1 and end != -1 and end > start:
35
+ return raw[start:end + 1]
36
+ return raw
37
+
38
+
39
+ def _build_headings_prompt(req: HeadingsRequest) -> str:
40
+ """
41
+ Build a clear prompt asking Gemini to return ONLY a JSON array of strings (headings).
42
+ """
43
+ # Convert selected_personas to compact JSON for context
44
+ personas_json = json.dumps([p.dict() for p in req.selected_personas], indent=2)
45
+
46
+ main_goal_value = req.main_goal.value
47
+ main_goal_desc = getattr(req.main_goal, "description", "")
48
+
49
+ prompt = f"""
50
+ You are an expert copywriter specialized in short, high-converting ad headlines for digital ads.
51
+
52
+ Task:
53
+ Produce exactly {req.num_headings} short, punchy ad headings (strings) for a paid ad campaign that target the selected personas and align with the business goal. RETURN ONLY a JSON array of strings (e.g. ["Heading 1", "Heading 2", ...]) and nothing else.
54
+
55
+ Requirements:
56
+ - Each heading should be concise (max ~60 characters), benefit-focused, and tailored to the provided personas and business goal.
57
+ - Use active language and mention the key value when appropriate (e.g., "scale", "AI", "launch", "MVP", "secure funding", "reduce time-to-market").
58
+ - Vary the tone across the 4 headings (e.g., urgent, aspirational, trust-building, and practical).
59
+ - Avoid punctuation-only headlines, and do not include numbering in text.
60
+ - If the main goal is "{main_goal_value}", use that intention as a primary framing. Goal description: {main_goal_desc}
61
+
62
+ Business Inputs:
63
+ - Business name: {req.business_name}
64
+ - Business category: {req.business_category}
65
+ - Business description: {req.business_description}
66
+ - Promotion type: {req.promotion_type}
67
+ - Offer description: {req.offer_description}
68
+ - Value proposition: {req.value}
69
+ - Main goal: {main_goal_value} β€” {main_goal_desc}
70
+ - Serving clients info: {req.serving_clients_info}
71
+ - Serving clients location: {req.serving_clients_location}
72
+
73
+ Selected persona(s) (use these to shape headings):
74
+ {personas_json}
75
+
76
+ Now generate exactly {req.num_headings} unique ad headings as a JSON array of strings. No explanation, no extra text.
77
+ """
78
+ logger.debug("Built headings prompt (len=%d) for business '%s'", len(prompt), req.business_name)
79
+ return prompt.strip()
80
+
81
+
82
+ def generate_headings(req: HeadingsRequest) -> List[str]:
83
+ """
84
+ Call Gemini to generate ad headings and return list[str].
85
+ """
86
+ prompt = _build_headings_prompt(req)
87
+
88
+ model_name = "gemini-2.5-pro"
89
+ logger.info("Generating %d headings for business '%s' using model %s",
90
+ req.num_headings, req.business_name, model_name)
91
+
92
+ try:
93
+ model = genai.GenerativeModel(model_name)
94
+ except Exception as e:
95
+ logger.exception("Failed to create GenerativeModel: %s", e)
96
+ raise RuntimeError(f"Gemini model init failed: {e}")
97
+
98
+ try:
99
+ start = time.perf_counter()
100
+ response = model.generate_content(prompt)
101
+ duration = time.perf_counter() - start
102
+ logger.info("Gemini generate_content (headings) completed in %.2fs", duration)
103
+ except Exception as e:
104
+ logger.exception("Gemini generate_content failed for headings")
105
+ raise RuntimeError(f"Gemini request failed: {e}")
106
+
107
+ # Extract raw text from response
108
+ raw = None
109
+ try:
110
+ if response and hasattr(response, "text") and response.text:
111
+ raw = response.text
112
+ logger.debug("Received response.text (len=%d) for headings", len(raw))
113
+ elif response and getattr(response, "candidates", None):
114
+ first = response.candidates[0]
115
+ if getattr(first, "finish_reason", "").upper() == "SAFETY":
116
+ msg = "Gemini headings generation blocked by safety filter"
117
+ logger.error(msg)
118
+ raise RuntimeError(msg)
119
+ raw = getattr(first, "content", None) or getattr(first, "text", None) or str(response)
120
+ logger.debug("Received candidate response for headings (len=%d)", len(raw) if raw else 0)
121
+ else:
122
+ raw = str(response)
123
+ logger.debug("Converted headings response to string (len=%d)", len(raw))
124
+ except Exception as e:
125
+ logger.exception("Failed to extract raw text from Gemini headings response")
126
+ raise RuntimeError(f"Failed to extract Gemini response text: {e}")
127
+
128
+ if not raw:
129
+ logger.error("Empty response from Gemini when generating headings")
130
+ raise RuntimeError("Empty response from Gemini")
131
+
132
+ # Robust JSON extraction & parsing
133
+ snippet = _extract_json_array(raw)
134
+ try:
135
+ parsed = json.loads(snippet)
136
+ except json.JSONDecodeError:
137
+ logger.exception("Failed to parse JSON from headings response. Raw response: %s", raw)
138
+ raise RuntimeError(f"Failed to parse Gemini response as JSON array of strings.\nRaw: {raw}")
139
+
140
+ if not isinstance(parsed, list) or not all(isinstance(i, str) for i in parsed):
141
+ logger.error("Parsed headings JSON is not a list of strings. Parsed type: %s", type(parsed))
142
+ raise RuntimeError("Gemini did not return a JSON array of strings as expected.")
143
+
144
+ # Normalize: ensure exactly num_headings items
145
+ headings = parsed
146
+ if len(headings) < req.num_headings:
147
+ logger.warning("Gemini returned %d headings; expected %d. Returning what we have.",
148
+ len(headings), req.num_headings)
149
+ elif len(headings) > req.num_headings:
150
+ headings = headings[: req.num_headings]
151
+ logger.debug("Trimmed headings to requested num_headings=%d", req.num_headings)
152
+
153
+ # Final basic cleanup (strip whitespace)
154
+ headings = [h.strip() for h in headings]
155
+
156
+ logger.info("Generated %d headings for business '%s'", len(headings), req.business_name)
157
+ return headings
app/ads/image_service.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import logging
3
+ from typing import Tuple
4
+ from google import genai
5
+ from google.genai import types
6
+
7
+ from app.ads.schemas import ImageRequest, Persona
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ def _persona_to_text(persona: Persona) -> str:
12
+ """Return a one-line description for a Persona model."""
13
+ interests = ", ".join(persona.interests) if persona.interests else "no listed interests"
14
+ return f"{persona.name} (age: {persona.age_range}, location: {persona.location}, interests: {interests})"
15
+
16
+ def generate_image(req: ImageRequest) -> Tuple[bytes, str]:
17
+ """
18
+ Generate an Ad image using Gemini 2.0 flash experimental image model.
19
+ Falls back to returning text prompts if Gemini returns only text.
20
+ """
21
+ # Safely convert Persona objects into strings
22
+ if req.selected_personas and len(req.selected_personas) > 0:
23
+ personas_text = "; ".join(_persona_to_text(p) for p in req.selected_personas)
24
+ else:
25
+ personas_text = "general target audience"
26
+
27
+ prompt = (
28
+ f"Create a professional advertisement image for the business '{req.business_name}'. "
29
+ f"Category: {req.business_category}. "
30
+ f"Description: {req.business_description}. "
31
+ f"Promotion type: {req.promotion_type}. "
32
+ f"Offer details: {req.offer_description}. "
33
+ f"Value proposition: {req.value}. "
34
+ f"Main goal: {req.main_goal.value}. "
35
+ f"Serving clients info: {req.serving_clients_info}. "
36
+ f"Location: {req.serving_clients_location}. "
37
+ f"Target persona(s): {personas_text}. "
38
+ "The image should be modern, vibrant, and suitable for a social media advertisement."
39
+ )
40
+
41
+ logger.info("Requesting Gemini image generation for business '%s'", req.business_name)
42
+
43
+ try:
44
+ client = genai.Client()
45
+ response = client.models.generate_content(
46
+ model="gemini-2.0-flash-exp",
47
+ contents=(prompt
48
+ ),
49
+ config=types.GenerateContentConfig(
50
+ response_modalities=["text", "Image"]
51
+ ),
52
+ )
53
+
54
+ # Parse Gemini response for images
55
+ for part in response.candidates[0].content.parts:
56
+ if getattr(part, "inline_data", None) and part.inline_data.data:
57
+ image_bytes = part.inline_data.data
58
+ return image_bytes, "image/png"
59
+
60
+ # Fallback: Gemini returned text (prompt or explanation)
61
+ for part in response.candidates[0].content.parts:
62
+ if getattr(part, "text", None):
63
+ logger.warning("Gemini returned text instead of image: %s", part.text[:300])
64
+ raise RuntimeError("Gemini returned text only, no image data found.")
65
+
66
+ raise RuntimeError("No image data found in Gemini response.")
67
+
68
+ except Exception as e:
69
+ logger.exception("Gemini image generation failed: %s", e)
70
+ raise RuntimeError(f"Gemini image generation failed: {e}")
app/ads/persona_routes.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/routes/persona_routes.py
2
+ from fastapi import APIRouter, HTTPException
3
+ from typing import List
4
+ from fastapi.responses import StreamingResponse
5
+
6
+ from app.ads.schemas import BusinessInput, Persona, RegenerateRequest, HeadingsRequest, DescriptionsRequest, ImageRequest
7
+ import io
8
+ import app.ads.image_service as image_service
9
+ import app.ads.headings_service as headings_service
10
+ import app.ads.descriptions_service as descriptions_service
11
+ from app.ads.persona_service import generate_personas , regenerate_personas
12
+
13
+ router = APIRouter(prefix="/Ads", tags=["Ads"])
14
+
15
+ @router.post("/create", response_model=List[Persona])
16
+ def create_personas(payload: BusinessInput):
17
+ try:
18
+ personas = generate_personas(payload)
19
+ except Exception as e:
20
+ raise HTTPException(status_code=500, detail=str(e))
21
+ return personas
22
+
23
+ @router.post("/regenerate", response_model=List[Persona])
24
+ def regenerate_personas_endpoint(payload: RegenerateRequest):
25
+ """
26
+ Regenerate personas given all business inputs AND a list of previous personas.
27
+ The endpoint returns a new list of personas (same schema).
28
+ """
29
+ try:
30
+ personas = regenerate_personas(payload, payload.previous_personas)
31
+ except Exception as e:
32
+ # return the error message to the client for quick debugging
33
+ raise HTTPException(status_code=500, detail=str(e))
34
+ return personas
35
+
36
+ @router.post("/Headings", response_model=List[str])
37
+ def create_headings(payload: HeadingsRequest):
38
+ """
39
+ Generate ad headings (returns list[str]).
40
+ """
41
+ try:
42
+ return headings_service.generate_headings(payload)
43
+ except Exception as e:
44
+ # Log error server-side; return HTTP 500 with message for debugging
45
+ raise HTTPException(status_code=500, detail=str(e))
46
+
47
+ @router.post("/Descriptions", response_model=List[str])
48
+ def create_descriptions(payload: DescriptionsRequest):
49
+ try:
50
+ return descriptions_service.generate_descriptions(payload)
51
+ except Exception as e:
52
+ raise HTTPException(status_code=500, detail=str(e))
53
+
54
+ @router.post("/Image", response_class=StreamingResponse)
55
+ def create_image(payload: ImageRequest):
56
+ """
57
+ Generate a marketing image for an ad using Gemini.
58
+ """
59
+ try:
60
+ img_bytes, mime = image_service.generate_image(payload)
61
+ return StreamingResponse(io.BytesIO(img_bytes), media_type=mime)
62
+ except Exception as e:
63
+ raise HTTPException(status_code=500, detail=str(e))
app/ads/persona_service.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/services/persona_service.py
2
+ import os
3
+ import json
4
+ import logging
5
+ import time
6
+ from typing import List
7
+ import google.generativeai as genai
8
+ from pydantic import ValidationError
9
+
10
+ from app.ads.schemas import Persona, BusinessInput
11
+
12
+ # module logger
13
+ logger = logging.getLogger(__name__)
14
+ # Avoid configuring global logging here; app-level config should set handlers/levels.
15
+ # But ensure we have at least a NullHandler to avoid "No handler" warnings in some apps.
16
+ logger.addHandler(logging.NullHandler())
17
+
18
+
19
+ # initialize client (reads GEMINI_API_KEY from environment)
20
+ API_KEY = os.getenv("GEMINI_API_KEY")
21
+ if not API_KEY:
22
+ logger.error("GEMINI_API_KEY environment variable not set")
23
+ raise RuntimeError("Please set the GEMINI_API_KEY environment variable")
24
+
25
+ # Configure the genai SDK (reference pattern)
26
+ try:
27
+ genai.configure(api_key=API_KEY)
28
+ logger.info("Configured google.generativeai with provided API key")
29
+ except Exception as e:
30
+ logger.exception("Failed to configure google.generativeai: %s", e)
31
+ raise
32
+
33
+
34
+ def _build_prompt(b: BusinessInput) -> str:
35
+
36
+ examples_json = [
37
+ {
38
+ "name": "Startup Founders",
39
+ "headline": "Entrepreneurs launching new businesses",
40
+ "age_range": "25-40",
41
+ "location": "United Kingdom",
42
+ "interests": [
43
+ "Entrepreneurship",
44
+ "Startups",
45
+ "Business coaching",
46
+ "Tech tools"
47
+ ],
48
+ "description": "Likely to need professional websites to establish credibility; motivated by investor/customer trust and fast go-to-market. Prefer outreach via LinkedIn, Twitter, and startup meetups."
49
+ },
50
+ {
51
+ "name": "Local Shop Owners",
52
+ "headline": "Owners of brick-and-mortar retail shops",
53
+ "age_range": "35-55",
54
+ "location": "London and Midlands",
55
+ "interests": [
56
+ "Small business",
57
+ "Retail management",
58
+ "Local advertising"
59
+ ],
60
+ "description": "They want affordable websites to attract local customers and show store hours/offers. Respond well to local ads, Facebook community groups, and in-store flyers."
61
+ },
62
+ {
63
+ "name": "Freelancers & Consultants",
64
+ "headline": "Independent professionals offering services online",
65
+ "age_range": "22-45",
66
+ "location": "United Kingdom",
67
+ "interests": [
68
+ "Personal branding",
69
+ "Online marketing",
70
+ "Networking",
71
+ "LinkedIn"
72
+ ],
73
+ "description": "Need personal websites to showcase expertise, attract clients and build credibility; motivated by lead generation and portfolio presentation. Prefer LinkedIn, industry communities, and content marketing."
74
+ }
75
+ ]
76
+
77
+ # Use both the enum value and the associated description
78
+ main_goal_value = b.main_goal.value
79
+ main_goal_desc = getattr(b.main_goal, "description", "")
80
+
81
+ prompt = f'''
82
+ You are a senior marketing strategist specialized in creating *ideal-customer / target-audience personas* for businesses. Produce exactly {b.num_personas} distinct IDEAL-CUSTOMER personas tailored to the business described below.
83
+
84
+ **Output format (required):** Return ONLY a JSON array of objects. Each object must contain these properties in this exact order:
85
+ 1. name (string)
86
+ 2. headline (string; 3-6 words)
87
+ 3. age_range (string; numeric range like "25-40")
88
+ 4. location (string)
89
+ 5. interests (array of short strings; 3-6 items)
90
+ 6. description (string; 1-3 sentences)
91
+
92
+ **Description field requirements:** The `description` must summarize the persona as an *ideal customer*:
93
+ - who they are (role / brief demographic),
94
+ - top 1–2 pain points or needs,
95
+ - primary buying trigger or motivation,
96
+ - preferred channels to reach them (e.g., Instagram, LinkedIn, email, local events),
97
+ - why they would choose this business / how the offer solves their need.
98
+
99
+ **Do NOT include any extra top-level keys, comments, or explanation text. JSON array only.**
100
+
101
+ Below are three example personas showing the exact style and level of detail I want your output to match. Use them as format examples β€” but produce personas specific to the business inputs that follow.
102
+
103
+ EXAMPLE PERSONAS (format example):
104
+ {json.dumps(examples_json, indent=2)}
105
+
106
+ Business inputs:
107
+ - Business name: {b.business_name}
108
+ - Business category: {b.business_category}
109
+ - Business description: {b.business_description}
110
+ - Promotion type: {b.promotion_type}
111
+ - Offer description: {b.offer_description}
112
+ - Value proposition: {b.value}
113
+ - Main goal: {main_goal_value} β€” {main_goal_desc}
114
+ - Serving clients info: {b.serving_clients_info}
115
+ - Serving clients location: {b.serving_clients_location}
116
+
117
+ Generate the {b.num_personas} personas now as a JSON array that exactly matches the schema and style shown above.
118
+ '''
119
+ built = prompt.strip()
120
+ logger.debug("Built persona prompt (goal=%s): %s", main_goal_value, built[:400] + ("…" if len(built) > 400 else ""))
121
+ return built
122
+
123
+
124
+ def _extract_json_array(raw: str) -> str:
125
+ """
126
+ Find and return the first JSON array substring in raw text (from '[' to ']').
127
+ If not found, return raw as-is (parsing will attempt).
128
+ """
129
+ start = raw.find('[')
130
+ end = raw.rfind(']')
131
+ if start != -1 and end != -1 and end > start:
132
+ snippet = raw[start:end + 1]
133
+ logger.debug("Extracted JSON array snippet from raw response (length=%d)", len(snippet))
134
+ return snippet
135
+ logger.debug("No JSON array brackets found in raw response; returning full raw text for parsing")
136
+ return raw
137
+
138
+
139
+ def generate_personas(b: BusinessInput) -> List[Persona]:
140
+ """
141
+ Generate personas using Gemini. Returns a list of Persona Pydantic models.
142
+ Logs important steps and errors for easier debugging.
143
+ """
144
+ prompt = _build_prompt(b)
145
+
146
+ try:
147
+ model_name = "gemini-2.5-pro"
148
+ logger.info("Initializing Gemini model: %s", model_name)
149
+ model = genai.GenerativeModel(model_name)
150
+
151
+ logger.info("Sending generation request to Gemini for business '%s'", b.business_name)
152
+ start_ts = time.perf_counter()
153
+ response = model.generate_content(prompt)
154
+ duration = time.perf_counter() - start_ts
155
+ logger.info("Gemini generate_content completed in %.2fs", duration)
156
+
157
+ except Exception as e:
158
+ # surface Gemini initialization / network errors with stack trace
159
+ logger.exception("Gemini request failed for business '%s': %s", b.business_name, e)
160
+ raise RuntimeError(f"Gemini request failed: {e}")
161
+
162
+ # Inspect response for text or safety block
163
+ raw = None
164
+ try:
165
+ if response and hasattr(response, "text") and response.text:
166
+ raw = response.text
167
+ logger.debug("Received response.text (length=%d)", len(raw))
168
+ elif response and getattr(response, "candidates", None):
169
+ first = response.candidates[0]
170
+ if getattr(first, "finish_reason", "").upper() == "SAFETY":
171
+ msg = "Gemini generation blocked by safety filter"
172
+ logger.error(msg)
173
+ raise RuntimeError(msg)
174
+ raw = getattr(first, "content", None) or getattr(first, "text", None) or str(response)
175
+ logger.debug("Received candidate-based response (length=%d)", len(raw) if raw else 0)
176
+ else:
177
+ raw = str(response)
178
+ logger.debug("Converted response object to string (length=%d)", len(raw))
179
+ except Exception as e:
180
+ logger.exception("Failed to extract raw text from Gemini response for business '%s'", b.business_name)
181
+ raise RuntimeError(f"Failed to extract Gemini response text: {e}")
182
+
183
+ if not raw:
184
+ logger.error("Empty response received from Gemini for business '%s'", b.business_name)
185
+ raise RuntimeError("Empty response received from Gemini")
186
+
187
+ # Extract JSON array substring (robust for extra commentary)
188
+ json_snippet = _extract_json_array(raw)
189
+ logger.info("Attempting to parse JSON snippet for business '%s' (snippet length=%d)", b.business_name, len(json_snippet))
190
+
191
+ # Attempt to parse JSON and validate against Persona schema
192
+ try:
193
+ parsed = json.loads(json_snippet)
194
+
195
+ # If model returned a dict wrapper, attempt to find the list inside
196
+ if isinstance(parsed, dict):
197
+ # common wrapper keys to check
198
+ for key in ("items", "personas", "data", "results"):
199
+ if key in parsed and isinstance(parsed[key], list):
200
+ parsed = parsed[key]
201
+ logger.debug("Found persona list inside wrapper key '%s'", key)
202
+ break
203
+
204
+ if not isinstance(parsed, list):
205
+ logger.error("Parsed JSON is not a list for business '%s' (type=%s)", b.business_name, type(parsed))
206
+ raise ValueError("Expected top-level JSON array of persona objects")
207
+
208
+ personas: List[Persona] = []
209
+ for idx, obj in enumerate(parsed):
210
+ try:
211
+ persona = Persona.parse_obj(obj)
212
+ personas.append(persona)
213
+ logger.debug("Validated persona %d: %s", idx, persona.name)
214
+ except ValidationError as ve:
215
+ # include which item failed for better debugging
216
+ logger.error("Persona validation failed for item %s: %s\nRaw item: %s", idx, ve, obj)
217
+ raise
218
+
219
+ logger.info("Successfully generated and validated %d personas for business '%s'", len(personas), b.business_name)
220
+ return personas
221
+
222
+ except (json.JSONDecodeError, ValidationError, ValueError) as e:
223
+ # Provide helpful debug output including raw Gemini text
224
+ logger.exception("Failed to parse/validate Gemini JSON output for business '%s'", b.business_name)
225
+ raise RuntimeError(
226
+ f"Failed to parse Gemini response as JSON Personas: {e}\n\nRaw Gemini response:\n{raw}"
227
+ )
228
+
229
+ def regenerate_personas(b: BusinessInput, previous_personas: List[Persona]) -> List[Persona]:
230
+ """
231
+ Generate a new set of personas given the business input AND a list of
232
+ previously generated personas. The model is instructed to avoid duplicating
233
+ the previous personas and produce distinct/improved target-audience personas.
234
+ """
235
+ # Build base prompt from existing function
236
+ base_prompt = _build_prompt(b)
237
+
238
+ # Convert previous_personas to simple dicts (Pydantic models -> plain dicts)
239
+ try:
240
+ prev_list = [p.dict() if hasattr(p, "dict") else p for p in previous_personas]
241
+ except Exception:
242
+ # Defensive: if previous_personas are plain dicts already
243
+ prev_list = previous_personas
244
+
245
+ prev_json = json.dumps(prev_list, indent=2)
246
+
247
+ # Append clear instructions about previous personas and uniqueness requirement
248
+ extra_instructions = f"""
249
+ Previous personas provided (do NOT repeat these exactly):
250
+ {prev_json}
251
+
252
+ Instructions:
253
+ - Produce exactly {b.num_personas} personas tailored to the same business inputs above.
254
+ - **Do not duplicate** persona names or core audience segments included in the previous list.
255
+ - If a previous persona should be refined, produce a refined version but change the name slightly
256
+ and mention in the description what was improved.
257
+ - Aim for personas that are distinct, actionable, and aligned with the business's main goal:
258
+ "{getattr(b, 'main_goal', '')}".
259
+ - Output MUST be ONLY a JSON array of persona objects matching the schema:
260
+ name, headline, age_range, location, interests, description (in that order).
261
+ """
262
+ prompt = base_prompt + "\n\n" + extra_instructions
263
+
264
+ logger.info("Regenerating personas for business '%s' with %d previous personas",
265
+ b.business_name, len(prev_list))
266
+
267
+ # call model (same pattern as generate_personas)
268
+ try:
269
+ model_name = "gemini-2.5-pro"
270
+ logger.info("Initializing Gemini model for regeneration: %s", model_name)
271
+ model = genai.GenerativeModel(model_name)
272
+
273
+ start_ts = time.perf_counter()
274
+ response = model.generate_content(prompt)
275
+ duration = time.perf_counter() - start_ts
276
+ logger.info("Gemini regenerate_content completed in %.2fs", duration)
277
+
278
+ except Exception as e:
279
+ logger.exception("Gemini regenerate request failed for business '%s': %s", b.business_name, e)
280
+ raise RuntimeError(f"Gemini regenerate request failed: {e}")
281
+
282
+ # Extract raw text (same robust logic)
283
+ raw = None
284
+ try:
285
+ if response and hasattr(response, "text") and response.text:
286
+ raw = response.text
287
+ logger.debug("Regenerate response.text length=%d", len(raw))
288
+ elif response and getattr(response, "candidates", None):
289
+ first = response.candidates[0]
290
+ if getattr(first, "finish_reason", "").upper() == "SAFETY":
291
+ msg = "Gemini regeneration blocked by safety filter"
292
+ logger.error(msg)
293
+ raise RuntimeError(msg)
294
+ raw = getattr(first, "content", None) or getattr(first, "text", None) or str(response)
295
+ logger.debug("Regenerate candidate response length=%d", len(raw) if raw else 0)
296
+ else:
297
+ raw = str(response)
298
+ logger.debug("Regenerate response converted to string length=%d", len(raw))
299
+ except Exception as e:
300
+ logger.exception("Failed to extract raw text from Gemini regenerate response")
301
+ raise RuntimeError(f"Failed to extract Gemini response text: {e}")
302
+
303
+ if not raw:
304
+ logger.error("Empty regenerate response from Gemini for business '%s'", b.business_name)
305
+ raise RuntimeError("Empty response received from Gemini")
306
+
307
+ # Extract JSON and validate (reuse helper)
308
+ json_snippet = _extract_json_array(raw)
309
+ logger.info("Attempting to parse regenerated JSON snippet for business '%s' (len=%d)",
310
+ b.business_name, len(json_snippet))
311
+
312
+ try:
313
+ parsed = json.loads(json_snippet)
314
+
315
+ # If wrapper -> find list
316
+ if isinstance(parsed, dict):
317
+ for key in ("items", "personas", "data", "results"):
318
+ if key in parsed and isinstance(parsed[key], list):
319
+ parsed = parsed[key]
320
+ logger.debug("Found regenerated persona list inside wrapper key '%s'", key)
321
+ break
322
+
323
+ if not isinstance(parsed, list):
324
+ logger.error("Parsed regenerated JSON is not a list (type=%s)", type(parsed))
325
+ raise ValueError("Expected top-level JSON array of persona objects (regenerate)")
326
+
327
+ personas: List[Persona] = []
328
+ for idx, obj in enumerate(parsed):
329
+ try:
330
+ persona = Persona.parse_obj(obj)
331
+ personas.append(persona)
332
+ logger.debug("Validated regenerated persona %d: %s", idx, persona.name)
333
+ except ValidationError as ve:
334
+ logger.error("Regenerated persona validation failed for index %d: %s\nRaw item: %s",
335
+ idx, ve, obj)
336
+ raise
337
+
338
+ logger.info("Successfully regenerated %d personas for business '%s'", len(personas), b.business_name)
339
+ return personas
340
+
341
+ except (json.JSONDecodeError, ValidationError, ValueError) as e:
342
+ logger.exception("Failed to parse/validate regenerated Gemini JSON output for business '%s'", b.business_name)
343
+ raise RuntimeError(
344
+ f"Failed to parse Gemini regenerate response as JSON Personas: {e}\n\nRaw Gemini response:\n{raw}"
345
+ )
app/ads/schemas.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/ads/schemas.py
2
+ from enum import Enum
3
+ from pydantic import BaseModel, Field
4
+ from typing import List, Optional
5
+
6
+ class GoalEnum(str, Enum):
7
+ GET_MORE_WEBSITE_VISITORS = (
8
+ "Get more website visitors",
9
+ "Drive quality traffic to your website and increase page views"
10
+ )
11
+ GENERATE_LEADS = (
12
+ "Generate Leads",
13
+ "Collect contact information from potential customers"
14
+ )
15
+ INCREASE_SALES = (
16
+ "Increase Sales",
17
+ "Drive purchases and boost your revenue"
18
+ )
19
+ BRAND_AWARENESS = (
20
+ "Brand Awareness",
21
+ "Increase visibility and recognition of your brand"
22
+ )
23
+
24
+ def __new__(cls, value, description):
25
+ obj = str.__new__(cls, value)
26
+ obj._value_ = value
27
+ obj.description = description
28
+ return obj
29
+
30
+ class Persona(BaseModel):
31
+ name: str
32
+ headline: str
33
+ age_range: str
34
+ location: str
35
+ interests: List[str]
36
+ description: str
37
+
38
+ class BusinessInput(BaseModel):
39
+ business_name: str = Field(..., example="GrowthAspired")
40
+ business_category: str = Field(..., example="Software House")
41
+ business_description: str
42
+ promotion_type: str
43
+ offer_description: str
44
+ value: str
45
+ main_goal: GoalEnum = Field(..., description="Primary marketing goal (enum)", example=GoalEnum.GENERATE_LEADS.value)
46
+ serving_clients_info: str
47
+ serving_clients_location: str
48
+ num_personas: int = 3
49
+
50
+
51
+ class RegenerateRequest(BusinessInput):
52
+ """
53
+ Request model for regenerating personas:
54
+ - includes the same business inputs as BusinessInput
55
+ - plus previous_personas (list of Persona objects) to inform regeneration
56
+ """
57
+ previous_personas: List[Persona]
58
+
59
+
60
+ class HeadingsRequest(BaseModel):
61
+ business_name: str = Field(..., example="GrowthAspired")
62
+ business_category: str = Field(..., example="Software House")
63
+ business_description: str
64
+ promotion_type: str
65
+ offer_description: str
66
+ value: str
67
+ main_goal: GoalEnum = Field(..., description="Primary marketing goal (enum)")
68
+ serving_clients_info: str
69
+ serving_clients_location: str
70
+ # list of previously generated or selected persona objects (use Persona model)
71
+ selected_personas: List[Persona] = Field(..., description="List of selected persona objects to target")
72
+ # optional: prefer number of headings (defaults to 4)
73
+ num_headings: Optional[int] = Field(4, description="How many headings to generate")
74
+
75
+
76
+ class DescriptionsRequest(BaseModel):
77
+ business_name: str = Field(..., example="GrowthAspired")
78
+ business_category: str = Field(..., example="Software House")
79
+ business_description: str
80
+ promotion_type: str
81
+ offer_description: str
82
+ value: str
83
+ main_goal: GoalEnum = Field(..., description="Primary marketing goal (enum)")
84
+ serving_clients_info: str
85
+ serving_clients_location: str
86
+ selected_personas: List[Persona] = Field(..., description="List of selected persona objects to target")
87
+ num_descriptions: Optional[int] = Field(4, description="How many ad descriptions to generate (default 4)")
88
+
89
+
90
+ class ImageRequest(BaseModel):
91
+ business_name: str = Field(..., example="GrowthAspired")
92
+ business_category: str = Field(..., example="Software House")
93
+ business_description: str
94
+ promotion_type: str
95
+ offer_description: str
96
+ value: str
97
+ main_goal: GoalEnum = Field(..., description="Primary marketing goal (enum)")
98
+ serving_clients_info: str
99
+ serving_clients_location: str
100
+ selected_personas: List[Persona] = Field(..., description="List of selected persona objects to target")
101
+ # optional style/size params
102
+ style: Optional[str] = Field("modern", description="Desired art style or mood (e.g. modern, minimal, illustrative)")
103
+ width: Optional[int] = Field(1200, description="Image width in px")
104
+ height: Optional[int] = Field(628, description="Image height in px")
app/main.py CHANGED
@@ -21,7 +21,7 @@ from app.content_relevence import routes as content_relevance_routes
21
  from app.keywords.routes import router as keywords_router
22
  from app.uiux import routes as uiux_routes
23
  from app.mobile_usability import routes as mobile_usability
24
-
25
  # ─────────────────────────────────────────────
26
  # Suppress warnings
27
  # ─────────────────────────────────────────────
@@ -82,6 +82,7 @@ app.include_router(page_speed_routes.router)
82
  app.include_router(keywords_router)
83
  app.include_router(uiux_routes.router)
84
  app.include_router(mobile_usability.router)
 
85
 
86
  # CORS
87
  app.add_middleware(
 
21
  from app.keywords.routes import router as keywords_router
22
  from app.uiux import routes as uiux_routes
23
  from app.mobile_usability import routes as mobile_usability
24
+ from app.ads.persona_routes import router as persona_router
25
  # ─────────────────────────────────────────────
26
  # Suppress warnings
27
  # ─────────────────────────────────────────────
 
82
  app.include_router(keywords_router)
83
  app.include_router(uiux_routes.router)
84
  app.include_router(mobile_usability.router)
85
+ app.include_router(persona_router)
86
 
87
  # CORS
88
  app.add_middleware(
app/rag/embeddings.py CHANGED
@@ -1,24 +1,68 @@
1
  import os
2
  from langchain.text_splitter import RecursiveCharacterTextSplitter
3
- from dotenv import load_dotenv
 
 
 
4
 
5
- load_dotenv()
 
 
 
 
 
 
6
 
 
 
 
 
7
 
8
- def get_llm():
9
- """
10
- Returns a ChatGroq LLM instance using the GROQ API key
11
- stored in the environment.
12
  """
13
- from langchain_groq import ChatGroq
14
-
15
- llm = ChatGroq(
16
- model="openai/gpt-oss-120b",
17
- temperature=0,
18
- max_tokens=1024,
19
- api_key=os.getenv("GROQ_API_KEY", "")
20
- )
21
- return llm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # ──────────────────────────────────────────────────────────────────────────────
24
  # 1. Text Splitter (512 tokens per chunk, 100 token overlap)
 
1
  import os
2
  from langchain.text_splitter import RecursiveCharacterTextSplitter
3
+ from typing import Any, Optional
4
+ import logging
5
+ logger = logging.getLogger(__name__)
6
+ logger.addHandler(logging.NullHandler())
7
 
8
+ def get_llm(model: str = "gemini-2.5-flash",
9
+ temperature: float = 0.0,
10
+ max_tokens: Optional[int] = None,
11
+ timeout: Optional[int] = None,
12
+ max_retries: int = 3) -> Any:
13
+ """
14
+ Return a LangChain ChatGoogleGenerativeAI LLM configured to use Gemini.
15
 
16
+ - Reads GEMINI_API_KEY from environment.
17
+ - Default model: 'gemini-2.5-flash' (change if you need another).
18
+ - Temperature default 0 for deterministic responses.
19
+ - max_tokens/timeout can be None to allow defaults from the underlying client.
20
 
21
+ Returns:
22
+ An instance of langchain.chat_models.ChatGoogleGenerativeAI (or raises informative error).
 
 
23
  """
24
+ try:
25
+ from langchain_google_genai import ChatGoogleGenerativeAI
26
+ except Exception as e:
27
+ logger.exception("langchain ChatGoogleGenerativeAI import failed")
28
+ raise RuntimeError(
29
+ "langchain (with ChatGoogleGenerativeAI) is required but not installed. "
30
+ "Install with: pip install 'langchain[google]' or refer to your LangChain version docs."
31
+ ) from e
32
+
33
+ # Prefer explicit environment variable or other configured setting
34
+ api_key = os.getenv("GEMINI_API_KEY")
35
+ if not api_key:
36
+ msg = "GEMINI_API_KEY environment variable not set. Set it to your Gemini API key."
37
+ logger.error(msg)
38
+ raise RuntimeError(msg)
39
+
40
+ # Build client config
41
+ try:
42
+ llm = ChatGoogleGenerativeAI(
43
+ model=model,
44
+ temperature=temperature,
45
+ max_tokens=max_tokens,
46
+ timeout=timeout,
47
+ max_retries=max_retries,
48
+ api_key=api_key,
49
+ )
50
+ logger.info("Initialized ChatGoogleGenerativeAI LLM (model=%s)", model)
51
+ return llm
52
+ except TypeError:
53
+ # Some langchain versions may accept different parameter names (api_key vs openai_api_key etc.)
54
+ # Try a safer fallback with only the most common args.
55
+ try:
56
+ llm = ChatGoogleGenerativeAI(model=model, temperature=temperature, api_key=api_key)
57
+ logger.info("Initialized ChatGoogleGenerativeAI LLM (fallback constructor) model=%s", model)
58
+ return llm
59
+ except Exception as e:
60
+ logger.exception("Failed to create ChatGoogleGenerativeAI instance")
61
+ raise RuntimeError(f"Failed to initialize Gemini LLM: {e}") from e
62
+ except Exception as e:
63
+ logger.exception("Failed to initialize Gemini LLM")
64
+ raise RuntimeError(f"Failed to initialize Gemini LLM: {e}") from e
65
+
66
 
67
  # ──────────────────────────────────────────────────────────────────────────────
68
  # 1. Text Splitter (512 tokens per chunk, 100 token overlap)