MBilal-72 commited on
Commit
2e6803f
·
verified ·
1 Parent(s): 0bb1c44

update with rag utils/optimizer.py

Browse files
Files changed (1) hide show
  1. utils/optimizer.py +503 -458
utils/optimizer.py CHANGED
@@ -1,503 +1,557 @@
1
  """
2
- Content Optimization Module
3
- Enhances content for better AI/LLM performance and GEO scores
4
  """
5
 
6
  import json
7
  import re
8
  from typing import Dict, Any, List, Optional
9
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
 
10
 
11
 
12
  class ContentOptimizer:
13
- """Main class for optimizing content for AI search engines"""
14
 
15
- def __init__(self, llm):
16
  self.llm = llm
 
17
  self.setup_prompts()
 
18
 
19
- def setup_prompts(self):
20
- """Initialize optimization prompts"""
21
-
22
- # Main content enhancement prompt
23
- self.enhancement_prompt = (
24
- "You are an AI Content Enhancement Specialist. Your purpose is to optimize user-provided text to maximize its effectiveness for large language models (LLMs) in search, question-answering, and conversational AI systems.\n\n"
25
- "Evaluate the input text based on the following criteria, assigning a score from 1-10 for each:\n"
26
- "- Clarity: How easily can the content be understood?\n"
27
- "- Structuredness: How well-organized and coherent is the content?\n"
28
- "- LLM Answerability: How easily can an LLM extract precise answers from the content?\n\n"
29
- "Identify the most salient keywords.\n\n"
30
- "Rewrite the text to improve:\n"
31
- "- Clarity and precision\n"
32
- "- Logical structure and flow\n"
33
- "- Suitability for LLM-based information retrieval\n\n"
34
- "Present your analysis and optimized text in the following JSON format:\n"
35
- "```json\n"
36
- "{{\n"
37
- " \"scores\": {{\n"
38
- " \"clarity\": 8.5,\n"
39
- " \"structuredness\": 7.0,\n"
40
- " \"answerability\": 9.0\n"
41
- " }},\n"
42
- " \"keywords\": [\"example\", \"installation\", \"setup\"],\n,"
43
- " \"optimized_text\": \"...\"\n,"
44
- "}}\n"
45
- "```"
46
- )
47
-
48
- # SEO-style optimization prompt
49
- self.seo_style_prompt = (
50
- "You are an AI-first SEO specialist. Optimize this content for AI search engines and LLM systems. "
51
- "Focus on:\n"
52
- "1. Semantic keyword optimization\n"
53
- "2. Question-answer format enhancement\n"
54
- "3. Factual accuracy and authority signals\n"
55
- "4. Conversational readiness\n"
56
- "5. Citation-worthy structure\n"
57
- "Provide analysis and optimization in JSON:\n"
58
- "```json\n"
59
- "{{\n"
60
- " \"seo_analysis\": {{\n"
61
- " \"keyword_density\": \"analysis of current keywords\",\n"
62
- " \"semantic_gaps\": [\"missing semantic terms\"],\n"
63
- " \"readability_score\": 8.5,\n"
64
- " \"authority_signals\": [\"credentials\", \"citations\"]\n"
65
- " }},\n"
66
- " \"optimized_content\": {{\n"
67
- " \"title_suggestions\": [\"optimized title 1\", \"optimized title 2\"],\n"
68
- " \"meta_description\": \"AI-optimized meta description\",\n"
69
- " \"enhanced_content\": \"full optimized content...\",\n"
70
- " \"structured_data_suggestions\": [\"schema markup recommendations\"]\n"
71
- " }},\n"
72
- " \"improvement_summary\": {{\n"
73
- " \"changes_made\": [\"change 1\", \"change 2\"],\n"
74
- " \"expected_impact\": \"description of expected improvements\"\n"
75
- " }}\n"
76
- "}}\n"
77
- "```"
78
- )
79
-
80
- # Competitive content analysis prompt
81
- # self.competitive_analysis_prompt = ("Analyze the following content for AI search optimization gaps in entities, questions, clarity, flow, and semantic links. Return JSON with gaps and actionable recommendations.\nContent: {content}")
82
- self.competitive_analysis_prompt = (
83
- "Analyze the following content for AI search optimization gaps in entities, questions, clarity, flow, and semantic links. "
84
- "Return JSON with gaps and actionable recommendations.\n"
85
- "Content: {content}\n"
86
- "Provide competitive analysis in JSON format:\n"
87
- "{{\n"
88
- " \"competitive_analysis\": {{\n"
89
- " \"entity_gaps\": [\"gap1\", \"gap2\"],\n"
90
- " \"question_coverage\": \"summary of coverage\",\n"
91
- " \"factual_clarity\": \"assessment\",\n"
92
- " \"conversational_flow\": \"assessment\",\n"
93
- " \"semantic_relationships\": [\"relationship1\", \"relationship2\"]\n"
94
- " }},\n"
95
- " \"recommendations\": [\"recommendation 1\", \"recommendation 2\"]\n"
96
- "}}\n"
97
- )
98
- self.voice_prompt = (
99
  """
100
- Optimize this content for voice search and conversational AI systems.
101
- Focus on:
102
- 1. Natural language patterns
103
- 2. Question-based structure
104
- 3. Conversational tone
105
- 4. Clear, direct answers
106
- 5. Featured snippet optimization
107
- Original content: {content}
108
- Provide optimization in JSON:
109
- ```json
110
- {{
111
- "voice_optimized_content": "conversational version...",
112
- "question_answer_pairs": [
113
- {{"question": "What is...", "answer": "Direct answer..."}},
114
- {{"question": "How does...", "answer": "Step by step..."}}
115
- ],
116
- "featured_snippet_candidates": ["snippet 1", "snippet 2"],
117
- "natural_language_improvements": ["improvement 1", "improvement 2"],
118
- "conversational_score": 8.5
119
- }}
120
- ```
121
  """
122
- )
123
 
124
-
125
- # Dedicated prompt for rewriting/optimizing content
126
- self.optimization_rewrite_prompt = (
127
- "You are an expert AI content optimizer. Rewrite the provided text to maximize clarity, logical structure, and suitability for LLM-based search and conversational AI. "
128
- "Your rewritten version should be more precise, well-organized, and easier for AI systems to extract answers from. "
129
- "Return your output in the following JSON format:\n"
130
- "```json\n"
131
- "{{\n"
132
- " \"optimized_text\": \"...your rewritten content here...\"\n"
133
- "}}\n"
134
- "```"
135
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
- def optimize_content(self, content: str, analyze_only: bool = False,
138
- include_keywords: bool = True, optimization_type: str = "seo") -> Dict[str, Any]:
139
  """
140
- Main content optimization function
141
- Args:
142
- content (str): Content to optimize
143
- analyze_only (bool): If True, only analyze without rewriting
144
- include_keywords (bool): Whether to include keyword analysis
145
- optimization_type (str): Type of optimization ("standard", "seo", "competitive")
146
- Returns:
147
- Dict: Optimization results with scores and enhanced content
 
148
  """
149
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  # Choose optimization approach
151
- if optimization_type == "seo" and not analyze_only:
152
- return self._seo_style_optimization(content, analyze_only)
153
- elif optimization_type == "competitive" and not analyze_only:
154
- return self._competitive_optimization(content)
155
  else:
156
- return self._standard_optimization(content, analyze_only, include_keywords)
157
 
158
  except Exception as e:
159
- return {'error': f"Optimization failed: {str(e)}"}
160
-
161
- def _standard_optimization(self, content: str, analyze_only: bool, include_keywords: bool) -> Dict[str, Any]:
162
- """Standard content optimization using enhancement prompt"""
163
- try:
164
- # Always assign prompt_text
165
- if analyze_only is True:
166
- prompt_text = self.enhancement_prompt
167
- prompt_text = prompt_text.replace(
168
- "Rewrite the text to improve:",
169
- "Analyze the text for potential improvements in:"
170
- ).replace(
171
- '"optimized_text": "..."',
172
- '"optimization_suggestions": ["suggestion 1", "suggestion 2"]'
173
- )
174
- if not include_keywords:
175
- prompt_text = prompt_text.replace(
176
- '"keywords": ["example", "installation", "setup"],',
177
- ''
178
- )
179
- else:
180
- # Use dedicated rewrite prompt for optimization
181
- prompt_text = self.optimization_rewrite_prompt
182
-
183
- prompt_template = ChatPromptTemplate.from_messages([
184
- SystemMessagePromptTemplate.from_template(prompt_text),
185
- HumanMessagePromptTemplate.from_template(content[:6000])
186
- ])
187
-
188
- chain = prompt_template | self.llm
189
- result = chain.invoke({})
190
-
191
- result_content = result.content if hasattr(result, 'content') else str(result)
192
- parsed_result = self._parse_optimization_result(result_content)
193
-
194
- parsed_result.update({
195
- 'optimization_type': 'standard',
196
- 'analyze_only': analyze_only,
197
- 'original_length': len(content),
198
- 'original_word_count': len(content.split())
199
- })
200
 
201
- return parsed_result
202
-
203
- except Exception as e:
204
- return {'error': f"Standard optimization failed: {str(e)}"}
205
- def _seo_style_optimization(self, content: str, analyze_only: bool) -> Dict[str, Any]:
206
- """SEO-focused optimization for AI search engines"""
207
  try:
208
  prompt_template = ChatPromptTemplate.from_messages([
209
- SystemMessagePromptTemplate.from_template(self.seo_style_prompt),
210
- HumanMessagePromptTemplate.from_template(f"Optimize this content for AI search engines:\n\n{content[:6000]}")
211
  ])
212
-
213
 
214
  chain = prompt_template | self.llm
215
- result = chain.invoke({})
 
 
 
216
 
217
  result_content = result.content if hasattr(result, 'content') else str(result)
218
  parsed_result = self._parse_optimization_result(result_content)
219
 
220
- # Add SEO-specific metadata
221
  parsed_result.update({
222
- 'optimization_type': 'seo',
 
223
  'analyze_only': analyze_only,
224
- 'seo_focused': True
 
225
  })
226
 
227
  return parsed_result
228
 
229
  except Exception as e:
230
- return {'error': f"SEO optimization failed: {str(e)}"}
231
-
232
- def _competitive_optimization(self, content: str) -> Dict[str, Any]:
233
- """Competitive analysis-based optimization"""
234
  try:
235
- formatted_prompt = self.competitive_analysis_prompt.format(content=content[:5000])
236
-
237
  prompt_template = ChatPromptTemplate.from_messages([
238
- SystemMessagePromptTemplate.from_template(formatted_prompt),
239
- HumanMessagePromptTemplate.from_template("Perform the competitive analysis and provide optimization recommendations.")
240
  ])
241
- # ("system", formatted_prompt),
242
- # ("user", "Perform the competitive analysis and provide optimization recommendations.")
243
 
244
  chain = prompt_template | self.llm
245
- result = chain.invoke({})
 
 
 
246
 
247
  result_content = result.content if hasattr(result, 'content') else str(result)
248
  parsed_result = self._parse_optimization_result(result_content)
249
 
250
  parsed_result.update({
251
- 'optimization_type': 'competitive',
 
252
  'competitive_analysis': True
253
  })
254
 
255
  return parsed_result
256
 
257
  except Exception as e:
258
- return {'error': f"Competitive optimization failed: {str(e)}"}
259
-
260
- # def batch_optimize_content(self, content_list: List[str], optimization_type: str = "standard") -> List[Dict[str, Any]]:
261
- # """
262
- # Optimize multiple pieces of content in batch
263
-
264
- # Args:
265
- # content_list (List[str]): List of content pieces to optimize
266
- # optimization_type (str): Type of optimization to apply
267
-
268
- # Returns:
269
- # List[Dict]: List of optimization results
270
- # """
271
- # results = []
272
-
273
- # for i, content in enumerate(content_list):
274
- # try:
275
- # result = self.optimize_content(
276
- # content,
277
- # optimization_type=optimization_type
278
- # )
279
- # result['batch_index'] = i
280
- # results.append(result)
281
-
282
- # except Exception as e:
283
- # results.append({
284
- # 'batch_index': i,
285
- # 'error': f"Batch optimization failed: {str(e)}"
286
- # })
287
-
288
- # return results
289
-
290
- # def generate_content_variations(self, content: str, num_variations: int = 3) -> List[Dict[str, Any]]:
291
- # """
292
- # Generate multiple optimized variations of the same content
293
-
294
- # Args:
295
- # content (str): Original content
296
- # num_variations (int): Number of variations to generate
297
-
298
- # Returns:
299
- # List[Dict]: List of content variations with analysis
300
- # """
301
- # variations = []
302
-
303
- # variation_prompts = [
304
- # "Create a more conversational version optimized for AI chat responses",
305
- # "Create a more authoritative version optimized for citations",
306
- # "Create a more structured version optimized for question-answering"
307
- # ]
308
-
309
- # for i in range(min(num_variations, len(variation_prompts))):
310
- # try:
311
- # custom_prompt = f"""You are optimizing content for AI systems. {variation_prompts[i]}.
312
 
313
- # Original content: {content[:4000]}
314
-
315
- # Provide the optimized variation in JSON format:
316
- # ```json
317
- # {{
318
- # "variation_type": "conversational/authoritative/structured",
319
- # "optimized_content": "the rewritten content...",
320
- # "key_changes": ["change 1", "change 2"],
321
- # "target_use_case": "description of ideal use case"
322
- # }}
323
- # ```
324
- # """
325
-
326
- # prompt_template = ChatPromptTemplate.from_messages([
327
- # SystemMessagePromptTemplate.from_template(custom_prompt),
328
- # HumanMessagePromptTemplate.from_template("Generate the variation.")
329
- # ])
330
- # # ("system", custom_prompt),
331
- # # ("user", "Generate the variation.")
332
-
333
- # chain = prompt_template | self.llm
334
- # result = chain.invoke({})
335
-
336
- # result_content = result.content if hasattr(result, 'content') else str(result)
337
- # parsed_result = self._parse_optimization_result(result_content)
338
-
339
- # parsed_result.update({
340
- # 'variation_index': i,
341
- # 'variation_prompt': variation_prompts[i]
342
- # })
343
-
344
- # variations.append(parsed_result)
345
-
346
- # except Exception as e:
347
- # variations.append({
348
- # 'variation_index': i,
349
- # 'error': f"Variation generation failed: {str(e)}"
350
- # })
351
-
352
- # return variations
353
-
354
- def analyze_content_readability(self, content: str) -> Dict[str, Any]:
355
  """
356
- Analyze content readability for AI systems
357
 
358
  Args:
359
- content (str): Content to analyze
 
360
 
361
  Returns:
362
- Dict: Readability analysis results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
  """
364
  try:
365
- # Basic readability metrics
366
  words = content.split()
367
  sentences = re.split(r'[.!?]+', content)
368
  sentences = [s.strip() for s in sentences if s.strip()]
369
-
370
  paragraphs = [p.strip() for p in content.split('\n\n') if p.strip()]
371
 
372
- # Calculate metrics
373
- avg_words_per_sentence = len(words) / len(sentences) if sentences else 0
374
- avg_sentences_per_paragraph = len(sentences) / len(paragraphs) if paragraphs else 0
375
-
376
- # Character-based metrics
377
- avg_word_length = sum(len(word) for word in words) / len(words) if words else 0
378
-
379
- # Complexity indicators
380
- long_sentences = [s for s in sentences if len(s.split()) > 20]
381
- complex_words = [w for w in words if len(w) > 6]
 
 
 
 
 
 
 
382
 
383
  return {
384
- 'basic_metrics': {
385
  'total_words': len(words),
386
  'total_sentences': len(sentences),
387
  'total_paragraphs': len(paragraphs),
388
- 'avg_words_per_sentence': avg_words_per_sentence,
389
- 'avg_sentences_per_paragraph': avg_sentences_per_paragraph,
390
- 'avg_word_length': avg_word_length
 
 
391
  },
392
- 'complexity_indicators': {
393
- 'long_sentences_count': len(long_sentences),
394
- 'long_sentences_percentage': len(long_sentences) / len(sentences) * 100 if sentences else 0,
395
- 'complex_words_count': len(complex_words),
396
- 'complex_words_percentage': len(complex_words) / len(words) * 100 if words else 0
 
397
  },
398
- 'ai_readability_score': self._calculate_ai_readability_score({
399
- 'avg_words_per_sentence': avg_words_per_sentence,
400
- 'avg_word_length': avg_word_length,
401
- 'complex_words_percentage': len(complex_words) / len(words) * 100 if words else 0
402
- }),
403
- 'recommendations': self._generate_readability_recommendations({
404
- 'avg_words_per_sentence': avg_words_per_sentence,
405
- 'long_sentences_percentage': len(long_sentences) / len(sentences) * 100 if sentences else 0,
406
- 'complex_words_percentage': len(complex_words) / len(words) * 100 if words else 0
407
  })
408
  }
409
 
410
  except Exception as e:
411
- return {'error': f"Readability analysis failed: {str(e)}"}
412
-
413
- # def extract_key_entities(self, content: str) -> Dict[str, Any]:
414
- # """
415
- # Extract key entities and topics for optimization
416
-
417
- # Args:
418
- # content (str): Content to analyze
419
-
420
- # Returns:
421
- # Dict: Extracted entities and topics
422
- # """
423
- # try:
424
- # entity_prompt = """Extract key entities, topics, and concepts from this content for AI optimization.
425
 
426
- # Content: {content}
427
-
428
- # Identify:
429
- # 1. Named entities (people, places, organizations)
430
- # 2. Key concepts and topics
431
- # 3. Technical terms and jargon
432
- # 4. Potential semantic keywords
433
- # 5. Question-answer opportunities
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
 
435
- # Format as JSON:
436
- # ```json
437
- # {{
438
- # "named_entities": ["entity1", "entity2"],
439
- # "key_topics": ["topic1", "topic2"],
440
- # "technical_terms": ["term1", "term2"],
441
- # "semantic_keywords": ["keyword1", "keyword2"],
442
- # "question_opportunities": ["What is...", "How does..."],
443
- # "entity_relationships": ["relationship descriptions"]
444
- # }}
445
- # ```
446
- # """
447
-
448
- # prompt_template = ChatPromptTemplate.from_messages([
449
- # SystemMessagePromptTemplate.from_template(entity_prompt.format(content=content[:5000])),
450
- # HumanMessagePromptTemplate.from_template("Extract the entities and topics.")
451
- # ])
452
- # # ("system", entity_prompt.format(content=content[:5000])),
453
- # # ("user", "Extract the entities and topics.")
454
-
455
- # chain = prompt_template | self.llm
456
- # result = chain.invoke({})
457
-
458
- # result_content = result.content if hasattr(result, 'content') else str(result)
459
- # return self._parse_optimization_result(result_content)
460
-
461
- # except Exception as e:
462
- # return {'error': f"Entity extraction failed: {str(e)}"}
463
-
464
- def optimize_for_voice_search(self, content: str) -> Dict[str, Any]:
465
  """
466
- Optimize content specifically for voice search and conversational AI
 
 
467
 
468
- Args:
469
- content (str): Content to optimize
 
 
 
 
 
 
 
 
470
 
471
- Returns:
472
- Dict: Voice search optimization results
473
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474
  try:
475
- # self.voice_prompt = ("Optimize the following content for voice search and conversational AI by improving natural language flow, question-based structure, tone, and featured snippet potential. Return JSON with improved content, Q&A pairs, snippet candidates, and a conversational score.\nContent: {content}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476
 
 
 
477
 
478
- prompt_template = ChatPromptTemplate.from_messages([
479
- SystemMessagePromptTemplate.from_template(voice_prompt.format(content=content[:4000])),
480
- HumanMessagePromptTemplate.from_template("Optimize for voice search.")
481
- ])
482
- # ("system", voice_prompt.format(content=content[:4000])),
483
- # ("user", "Optimize for voice search.")
 
484
 
485
- chain = prompt_template | self.llm
486
- result = chain.invoke({})
487
 
488
- result_content = result.content if hasattr(result, 'content') else str(result)
489
- parsed_result = self._parse_optimization_result(result_content)
490
 
491
- parsed_result.update({
492
- 'optimization_type': 'voice_search',
493
- 'voice_optimized': True
494
- })
495
 
496
- return parsed_result
 
497
 
498
- except Exception as e:
499
- return {'error': f"Voice search optimization failed: {str(e)}"}
500
-
 
 
501
  def _parse_optimization_result(self, response_text: str) -> Dict[str, Any]:
502
  """Parse LLM response and extract structured results"""
503
  try:
@@ -508,73 +562,64 @@ class ContentOptimizer:
508
  if json_start != -1 and json_end != -1:
509
  json_str = response_text[json_start:json_end]
510
  parsed = json.loads(json_str)
511
-
512
- # Ensure consistent structure
513
- if 'scores' not in parsed and 'score' in parsed:
514
- parsed['scores'] = parsed['score']
515
-
516
  return parsed
517
  else:
518
- # If no JSON found, return raw response with error flag
519
  return {
520
  'raw_response': response_text,
521
  'parsing_error': 'No JSON structure found in response',
522
- 'scores': {'clarity': 0, 'structuredness': 0, 'answerability': 0}
 
 
 
 
 
 
 
523
  }
524
 
525
  except json.JSONDecodeError as e:
526
  return {
527
  'raw_response': response_text,
528
  'parsing_error': f'JSON decode error: {str(e)}',
529
- 'scores': {'clarity': 0, 'structuredness': 0, 'answerability': 0}
 
 
 
 
 
 
 
530
  }
531
  except Exception as e:
532
  return {
533
  'raw_response': response_text,
534
  'parsing_error': f'Unexpected parsing error: {str(e)}',
535
- 'scores': {'clarity': 0, 'structuredness': 0, 'answerability': 0}
 
 
 
 
 
 
 
536
  }
537
-
538
- def _calculate_ai_readability_score(self, metrics: Dict[str, float]) -> float:
539
- """Calculate AI-specific readability score"""
540
- try:
541
- # Optimal ranges for AI consumption
542
- optimal_words_per_sentence = 15 # Sweet spot for AI processing
543
- optimal_word_length = 5 # Balance of complexity and clarity
544
- optimal_complex_words_percentage = 15 # Some complexity is good for authority
545
-
546
- # Calculate deviations from optimal
547
- sentence_score = max(0, 10 - abs(metrics['avg_words_per_sentence'] - optimal_words_per_sentence) * 0.5)
548
- word_length_score = max(0, 10 - abs(metrics['avg_word_length'] - optimal_word_length) * 2)
549
- complexity_score = max(0, 10 - abs(metrics['complex_words_percentage'] - optimal_complex_words_percentage) * 0.3)
550
-
551
- # Weighted average
552
- overall_score = (sentence_score * 0.4 + word_length_score * 0.3 + complexity_score * 0.3)
553
-
554
- return round(overall_score, 1)
555
-
556
- except Exception:
557
- return 5.0 # Default neutral score
558
-
559
- def _generate_readability_recommendations(self, metrics: Dict[str, float]) -> List[str]:
560
- """Generate specific readability improvement recommendations"""
561
- recommendations = []
562
-
563
- try:
564
- if metrics['avg_words_per_sentence'] > 20:
565
- recommendations.append("Break down long sentences for better AI processing")
566
- elif metrics['avg_words_per_sentence'] < 8:
567
- recommendations.append("Consider combining very short sentences for better context")
568
-
569
- if metrics['long_sentences_percentage'] > 30:
570
- recommendations.append("Reduce the number of complex sentences (>20 words)")
571
-
572
- if metrics['complex_words_percentage'] > 25:
573
- recommendations.append("Simplify vocabulary where possible for broader accessibility")
574
- elif metrics['complex_words_percentage'] < 5:
575
- recommendations.append("Add more specific terminology to establish authority")
576
-
577
- return recommendations
578
-
579
- except Exception:
580
- return ["Unable to generate specific recommendations"]
 
1
  """
2
+ Enhanced Content Optimization Module with RAG for GEO
3
+ Integrates RAG functionality for better Generative Engine Optimization
4
  """
5
 
6
  import json
7
  import re
8
  from typing import Dict, Any, List, Optional
9
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
10
+ from langchain.schema import Document
11
 
12
 
13
  class ContentOptimizer:
14
+ """Enhanced Content Optimizer with RAG capabilities for GEO"""
15
 
16
+ def __init__(self, llm, vector_chunker=None):
17
  self.llm = llm
18
+ self.vector_chunker = vector_chunker
19
  self.setup_prompts()
20
+ self.setup_geo_knowledge_base()
21
 
22
+ def setup_geo_knowledge_base(self):
23
+ """Initialize GEO best practices knowledge base"""
24
+ self.geo_knowledge = [
25
+ """
26
+ Generative Engine Optimization (GEO) Best Practices:
27
+
28
+ 1. Structure for AI Consumption:
29
+ - Use clear headings and subheadings
30
+ - Include bullet points and numbered lists
31
+ - Provide direct, concise answers to common questions
32
+ - Use schema markup when possible
33
+
34
+ 2. Content Format for LLMs:
35
+ - Answer questions directly in the first sentence
36
+ - Use "what, why, how" question patterns
37
+ - Include relevant entities and proper nouns
38
+ - Maintain factual accuracy with citations
39
+
40
+ 3. Semantic Optimization:
41
+ - Include related terms and synonyms
42
+ - Use entity-rich content (people, places, organizations)
43
+ - Connect concepts with clear relationships
44
+ - Optimize for topic clusters, not just keywords
45
+ """,
46
+
47
+ """
48
+ AI Search Visibility Optimization:
49
+
50
+ 1. Query Intent Matching:
51
+ - Address user intent explicitly
52
+ - Use natural language patterns
53
+ - Include question-answer pairs
54
+ - Optimize for conversational queries
55
+
56
+ 2. Citation Worthiness:
57
+ - Include authoritative sources and data
58
+ - Use specific facts and statistics
59
+ - Provide expert opinions and insights
60
+ - Maintain consistent tone and expertise
61
+
62
+ 3. Multi-Query Coverage:
63
+ - Address related questions in the same content
64
+ - Use comprehensive topic coverage
65
+ - Include long-tail and specific queries
66
+ - Provide context for complex topics
67
+ """,
68
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  """
70
+ Content Structure for AI Systems:
71
+
72
+ 1. Information Architecture:
73
+ - Lead with key information
74
+ - Use inverted pyramid structure
75
+ - Include table of contents for long content
76
+ - Break complex topics into digestible sections
77
+
78
+ 2. Conversational Readiness:
79
+ - Write in active voice
80
+ - Use clear, direct language
81
+ - Include transitional phrases
82
+ - Optimize sentence length (12-20 words)
83
+
84
+ 3. Context Completeness:
85
+ - Define technical terms
86
+ - Provide background information
87
+ - Include relevant examples
88
+ - Connect to broader topic context
 
 
89
  """
90
+ ]
91
 
92
+ def setup_prompts(self):
93
+ """Initialize optimization prompts with RAG integration"""
94
+
95
+ self.rag_enhancement_prompt = """
96
+ You are a Generative Engine Optimization (GEO) specialist with access to best practices knowledge.
97
+
98
+ Based on the provided GEO knowledge and the user's content, optimize the content for:
99
+ 1. AI search engines (ChatGPT, Claude, Gemini)
100
+ 2. LLM-based question answering systems
101
+ 3. Conversational AI interfaces
102
+ 4. Citation and reference systems
103
+
104
+ Use the knowledge base to inform your optimization decisions.
105
+
106
+ Knowledge Base Context:
107
+ {context}
108
+
109
+ Original Content:
110
+ {content}
111
+
112
+ Provide comprehensive GEO optimization in JSON format:
113
+ ```json
114
+ {{
115
+ "geo_analysis": {{
116
+ "current_geo_score": 7.5,
117
+ "ai_search_visibility": 8.0,
118
+ "query_intent_matching": 7.0,
119
+ "conversational_readiness": 8.5,
120
+ "citation_worthiness": 6.5,
121
+ "context_completeness": 7.5
122
+ }},
123
+ "optimization_opportunities": [
124
+ {{
125
+ "type": "Structure Enhancement",
126
+ "description": "Add clear headings and Q&A format",
127
+ "priority": "high",
128
+ "expected_impact": "Improve AI parsing by 25%"
129
+ }}
130
+ ],
131
+ "optimized_content": {{
132
+ "enhanced_text": "Your optimized content here...",
133
+ "structural_improvements": ["Added FAQ section", "Improved headings"],
134
+ "semantic_enhancements": ["Added related terms", "Improved entity density"]
135
+ }},
136
+ "geo_keywords": {{
137
+ "primary_entities": ["entity1", "entity2"],
138
+ "semantic_terms": ["term1", "term2"],
139
+ "question_patterns": ["What is...", "How does..."],
140
+ "related_concepts": ["concept1", "concept2"]
141
+ }},
142
+ "recommendations": [
143
+ "Add more specific examples",
144
+ "Include authoritative citations",
145
+ "Improve conversational flow"
146
+ ]
147
+ }}
148
+ ```
149
+ """
150
+
151
+ self.competitive_geo_prompt = """
152
+ Analyze the content against GEO best practices and identify competitive optimization opportunities.
153
+
154
+ GEO Knowledge Base:
155
+ {context}
156
+
157
+ Content to Analyze:
158
+ {content}
159
+
160
+ Provide competitive GEO analysis:
161
+ ```json
162
+ {{
163
+ "competitive_gaps": {{
164
+ "missing_question_patterns": ["What questions aren't covered"],
165
+ "entity_gaps": ["Important entities not mentioned"],
166
+ "semantic_opportunities": ["Related terms to include"],
167
+ "structural_weaknesses": ["Formatting issues for AI"]
168
+ }},
169
+ "benchmark_comparison": {{
170
+ "current_performance": {{
171
+ "ai_answerability": 6.5,
172
+ "semantic_richness": 7.0,
173
+ "structural_clarity": 8.0
174
+ }},
175
+ "optimization_potential": {{
176
+ "ai_answerability": 9.0,
177
+ "semantic_richness": 8.5,
178
+ "structural_clarity": 9.5
179
+ }}
180
+ }},
181
+ "action_plan": [
182
+ {{
183
+ "priority": "high",
184
+ "action": "Add FAQ section",
185
+ "rationale": "Improves direct question answering"
186
+ }}
187
+ ]
188
+ }}
189
+ ```
190
+ """
191
 
192
+ def optimize_content_with_rag(self, content: str, optimization_type: str = "geo_standard",
193
+ analyze_only: bool = False) -> Dict[str, Any]:
194
  """
195
+ Main RAG-enhanced content optimization for GEO
196
+
197
+ Args:
198
+ content (str): Content to optimize
199
+ optimization_type (str): Type of GEO optimization
200
+ analyze_only (bool): Whether to only analyze without rewriting
201
+
202
+ Returns:
203
+ Dict: Comprehensive GEO optimization results
204
  """
205
  try:
206
+ # Create knowledge base documents
207
+ knowledge_docs = [Document(page_content=knowledge, metadata={"source": "geo_best_practices"})
208
+ for knowledge in self.geo_knowledge]
209
+
210
+ if self.vector_chunker:
211
+ # Use RAG to get relevant knowledge
212
+ qa_chain = self.vector_chunker.create_qa_chain(knowledge_docs, self.llm)
213
+
214
+ # Query for relevant GEO practices
215
+ geo_query = f"How to optimize this type of content for AI search engines: {content[:500]}"
216
+ context_result = qa_chain({"query": geo_query})
217
+ context = context_result.get("result", "")
218
+ else:
219
+ # Fallback to using all knowledge if vector_chunker not available
220
+ context = "\n\n".join(self.geo_knowledge)
221
+
222
  # Choose optimization approach
223
+ if optimization_type == "competitive_geo":
224
+ return self._competitive_geo_optimization(content, context)
 
 
225
  else:
226
+ return self._standard_geo_optimization(content, context, analyze_only)
227
 
228
  except Exception as e:
229
+ return {'error': f"RAG-enhanced optimization failed: {str(e)}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
+ def _standard_geo_optimization(self, content: str, context: str, analyze_only: bool) -> Dict[str, Any]:
232
+ """Standard GEO optimization with RAG context"""
 
 
 
 
233
  try:
234
  prompt_template = ChatPromptTemplate.from_messages([
235
+ SystemMessagePromptTemplate.from_template(self.rag_enhancement_prompt),
236
+ HumanMessagePromptTemplate.from_template("Optimize this content using GEO best practices.")
237
  ])
 
238
 
239
  chain = prompt_template | self.llm
240
+ result = chain.invoke({
241
+ "context": context,
242
+ "content": content[:5000] # Limit content length
243
+ })
244
 
245
  result_content = result.content if hasattr(result, 'content') else str(result)
246
  parsed_result = self._parse_optimization_result(result_content)
247
 
248
+ # Add metadata
249
  parsed_result.update({
250
+ 'optimization_type': 'geo_standard',
251
+ 'rag_enhanced': True,
252
  'analyze_only': analyze_only,
253
+ 'original_length': len(content),
254
+ 'knowledge_sources': len(self.geo_knowledge)
255
  })
256
 
257
  return parsed_result
258
 
259
  except Exception as e:
260
+ return {'error': f"Standard GEO optimization failed: {str(e)}"}
261
+
262
+ def _competitive_geo_optimization(self, content: str, context: str) -> Dict[str, Any]:
263
+ """Competitive GEO analysis with RAG context"""
264
  try:
 
 
265
  prompt_template = ChatPromptTemplate.from_messages([
266
+ SystemMessagePromptTemplate.from_template(self.competitive_geo_prompt),
267
+ HumanMessagePromptTemplate.from_template("Perform competitive GEO analysis.")
268
  ])
 
 
269
 
270
  chain = prompt_template | self.llm
271
+ result = chain.invoke({
272
+ "context": context,
273
+ "content": content[:5000]
274
+ })
275
 
276
  result_content = result.content if hasattr(result, 'content') else str(result)
277
  parsed_result = self._parse_optimization_result(result_content)
278
 
279
  parsed_result.update({
280
+ 'optimization_type': 'competitive_geo',
281
+ 'rag_enhanced': True,
282
  'competitive_analysis': True
283
  })
284
 
285
  return parsed_result
286
 
287
  except Exception as e:
288
+ return {'error': f"Competitive GEO optimization failed: {str(e)}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
+ def batch_optimize_with_rag(self, content_list: List[str], optimization_type: str = "geo_standard") -> List[Dict[str, Any]]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  """
292
+ Batch optimize multiple content pieces with RAG
293
 
294
  Args:
295
+ content_list: List of content to optimize
296
+ optimization_type: Type of optimization
297
 
298
  Returns:
299
+ List of optimization results
300
+ """
301
+ results = []
302
+
303
+ for i, content in enumerate(content_list):
304
+ try:
305
+ result = self.optimize_content_with_rag(
306
+ content,
307
+ optimization_type=optimization_type
308
+ )
309
+ result['batch_index'] = i
310
+ results.append(result)
311
+
312
+ except Exception as e:
313
+ results.append({
314
+ 'batch_index': i,
315
+ 'error': f"Batch GEO optimization failed: {str(e)}"
316
+ })
317
+
318
+ return results
319
+
320
+ def analyze_geo_readability(self, content: str) -> Dict[str, Any]:
321
+ """
322
+ Analyze content readability specifically for GEO/AI systems
323
  """
324
  try:
325
+ # Basic metrics
326
  words = content.split()
327
  sentences = re.split(r'[.!?]+', content)
328
  sentences = [s.strip() for s in sentences if s.strip()]
 
329
  paragraphs = [p.strip() for p in content.split('\n\n') if p.strip()]
330
 
331
+ # GEO-specific analysis
332
+ questions = len(re.findall(r'\?', content))
333
+ headings = len(re.findall(r'^#+\s', content, re.MULTILINE))
334
+ lists = len(re.findall(r'^\s*[-*+]\s', content, re.MULTILINE))
335
+ numbers = len(re.findall(r'\b\d+\.?\d*\b', content))
336
+
337
+ # Entity-like patterns (proper nouns)
338
+ entities = len(re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', content))
339
+
340
+ # Calculate GEO readability score
341
+ geo_score = self._calculate_geo_readability_score({
342
+ 'avg_words_per_sentence': len(words) / len(sentences) if sentences else 0,
343
+ 'questions_ratio': questions / len(sentences) if sentences else 0,
344
+ 'structure_elements': headings + lists,
345
+ 'entity_density': entities / len(words) if words else 0,
346
+ 'numeric_data': numbers / len(words) if words else 0
347
+ })
348
 
349
  return {
350
+ 'geo_readability_metrics': {
351
  'total_words': len(words),
352
  'total_sentences': len(sentences),
353
  'total_paragraphs': len(paragraphs),
354
+ 'questions_count': questions,
355
+ 'headings_count': headings,
356
+ 'lists_count': lists,
357
+ 'entity_mentions': entities,
358
+ 'numeric_data_points': numbers
359
  },
360
+ 'geo_readability_score': geo_score,
361
+ 'ai_optimization_indicators': {
362
+ 'question_ratio': questions / len(sentences) if sentences else 0,
363
+ 'structure_score': min(10, (headings + lists) * 2),
364
+ 'entity_density': entities / len(words) if words else 0,
365
+ 'data_richness': numbers / len(words) if words else 0
366
  },
367
+ 'geo_recommendations': self._generate_geo_recommendations({
368
+ 'questions': questions,
369
+ 'headings': headings,
370
+ 'lists': lists,
371
+ 'entities': entities,
372
+ 'sentences': len(sentences)
 
 
 
373
  })
374
  }
375
 
376
  except Exception as e:
377
+ return {'error': f"GEO readability analysis failed: {str(e)}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
378
 
379
+ def extract_geo_entities(self, content: str) -> Dict[str, Any]:
380
+ """
381
+ Extract entities and concepts relevant for GEO optimization
382
+ """
383
+ try:
384
+ if not self.vector_chunker:
385
+ return {'error': 'Vector chunker not available for entity extraction'}
386
+
387
+ # Create knowledge context about entity extraction
388
+ entity_knowledge = [Document(
389
+ page_content="""
390
+ For GEO optimization, important entities include:
391
+ 1. Named entities: People, organizations, locations, brands
392
+ 2. Technical concepts: Industry terms, methodologies, tools
393
+ 3. Topical entities: Core subjects, themes, categories
394
+ 4. Relational entities: Connected concepts, dependencies
395
+ 5. Question entities: What users commonly ask about
396
+ """,
397
+ metadata={"source": "entity_extraction_guide"}
398
+ )]
399
+
400
+ qa_chain = self.vector_chunker.create_qa_chain(entity_knowledge, self.llm)
401
+
402
+ # Extract different types of entities
403
+ extraction_queries = [
404
+ "What are the main named entities (people, places, organizations) in this content?",
405
+ "What are the key technical concepts and terms?",
406
+ "What questions might users have about this content?",
407
+ "What related topics and concepts are mentioned?"
408
+ ]
409
+
410
+ extracted_data = {}
411
+ for query in extraction_queries:
412
+ full_query = f"{query}\n\nContent: {content[:3000]}"
413
+ result = qa_chain({"query": full_query})
414
+ query_key = query.split('?')[0].lower().replace(' ', '_').replace('what_are_the_', '')
415
+ extracted_data[query_key] = result.get("result", "")
416
+
417
+ return {
418
+ 'geo_entities': extracted_data,
419
+ 'extraction_method': 'rag_enhanced',
420
+ 'content_length': len(content),
421
+ 'extraction_success': True
422
+ }
423
+
424
+ except Exception as e:
425
+ return {'error': f"GEO entity extraction failed: {str(e)}"}
426
 
427
+ def generate_geo_variations(self, content: str, num_variations: int = 3) -> List[Dict[str, Any]]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
  """
429
+ Generate GEO-optimized content variations using RAG
430
+ """
431
+ variations = []
432
 
433
+ variation_types = [
434
+ ("faq_focused", "Transform into FAQ format optimized for AI Q&A systems"),
435
+ ("conversational", "Optimize for conversational AI and voice search"),
436
+ ("authoritative", "Enhance with authoritative tone for citation systems")
437
+ ]
438
+
439
+ try:
440
+ # Get GEO context
441
+ knowledge_docs = [Document(page_content=knowledge, metadata={"source": "geo_practices"})
442
+ for knowledge in self.geo_knowledge]
443
 
444
+ if self.vector_chunker:
445
+ qa_chain = self.vector_chunker.create_qa_chain(knowledge_docs, self.llm)
446
+
447
+ for i, (variation_type, description) in enumerate(variation_types[:num_variations]):
448
+ try:
449
+ # Get specific guidance for this variation type
450
+ context_query = f"How to optimize content for {variation_type} in AI systems?"
451
+ context_result = qa_chain({"query": context_query})
452
+ context = context_result.get("result", "")
453
+
454
+ variation_prompt = f"""
455
+ Create a {variation_type} version of the content optimized for GEO.
456
+
457
+ Context: {context}
458
+
459
+ Original Content: {content[:4000]}
460
+
461
+ Variation Goal: {description}
462
+
463
+ Return JSON:
464
+ {{
465
+ "variation_type": "{variation_type}",
466
+ "optimized_content": "the rewritten content...",
467
+ "geo_improvements": ["improvement 1", "improvement 2"],
468
+ "target_ai_systems": ["ChatGPT", "Claude", "etc"],
469
+ "expected_geo_benefits": ["benefit 1", "benefit 2"]
470
+ }}
471
+ """
472
+
473
+ prompt_template = ChatPromptTemplate.from_messages([
474
+ SystemMessagePromptTemplate.from_template(variation_prompt),
475
+ HumanMessagePromptTemplate.from_template("Generate the GEO-optimized variation.")
476
+ ])
477
+
478
+ chain = prompt_template | self.llm
479
+ result = chain.invoke({})
480
+
481
+ result_content = result.content if hasattr(result, 'content') else str(result)
482
+ parsed_result = self._parse_optimization_result(result_content)
483
+
484
+ parsed_result.update({
485
+ 'variation_index': i,
486
+ 'rag_enhanced': True,
487
+ 'geo_optimized': True
488
+ })
489
+
490
+ variations.append(parsed_result)
491
+
492
+ except Exception as e:
493
+ variations.append({
494
+ 'variation_index': i,
495
+ 'variation_type': variation_type,
496
+ 'error': f"GEO variation generation failed: {str(e)}"
497
+ })
498
+ else:
499
+ return [{'error': 'Vector chunker not available for variation generation'}]
500
+
501
+ except Exception as e:
502
+ return [{'error': f"GEO variation generation failed: {str(e)}"}]
503
+
504
+ return variations
505
+
506
+ def _calculate_geo_readability_score(self, metrics: Dict[str, float]) -> float:
507
+ """Calculate GEO-specific readability score"""
508
  try:
509
+ # GEO-optimized scoring
510
+ sentence_score = max(0, 10 - abs(metrics['avg_words_per_sentence'] - 15) * 0.3)
511
+ question_score = min(10, metrics['questions_ratio'] * 50) # Reward questions
512
+ structure_score = min(10, metrics['structure_elements'] * 1.5) # Reward headings/lists
513
+ entity_score = min(10, metrics['entity_density'] * 100) # Reward entities
514
+ data_score = min(10, metrics['numeric_data'] * 200) # Reward data points
515
+
516
+ # Weighted for GEO priorities
517
+ overall_score = (
518
+ sentence_score * 0.2 +
519
+ question_score * 0.25 +
520
+ structure_score * 0.25 +
521
+ entity_score * 0.15 +
522
+ data_score * 0.15
523
+ )
524
+
525
+ return round(overall_score, 1)
526
 
527
+ except Exception:
528
+ return 5.0
529
 
530
+ def _generate_geo_recommendations(self, metrics: Dict[str, int]) -> List[str]:
531
+ """Generate GEO-specific recommendations"""
532
+ recommendations = []
533
+
534
+ try:
535
+ if metrics['questions'] == 0:
536
+ recommendations.append("Add FAQ section or question-based headings for better AI Q&A performance")
537
 
538
+ if metrics['headings'] < 2:
539
+ recommendations.append("Add more structured headings to improve AI content parsing")
540
 
541
+ if metrics['lists'] == 0:
542
+ recommendations.append("Include bullet points or numbered lists for better information extraction")
543
 
544
+ if metrics['entities'] < 5:
545
+ recommendations.append("Include more specific entities (names, places, organizations) for authority")
 
 
546
 
547
+ if metrics['questions'] / metrics['sentences'] < 0.1:
548
+ recommendations.append("Consider transforming statements into question-answer pairs")
549
 
550
+ return recommendations
551
+
552
+ except Exception:
553
+ return ["Unable to generate specific GEO recommendations"]
554
+
555
  def _parse_optimization_result(self, response_text: str) -> Dict[str, Any]:
556
  """Parse LLM response and extract structured results"""
557
  try:
 
562
  if json_start != -1 and json_end != -1:
563
  json_str = response_text[json_start:json_end]
564
  parsed = json.loads(json_str)
 
 
 
 
 
565
  return parsed
566
  else:
567
+ # If no JSON found, return structured error
568
  return {
569
  'raw_response': response_text,
570
  'parsing_error': 'No JSON structure found in response',
571
+ 'geo_analysis': {
572
+ 'current_geo_score': 0,
573
+ 'ai_search_visibility': 0,
574
+ 'query_intent_matching': 0,
575
+ 'conversational_readiness': 0,
576
+ 'citation_worthiness': 0,
577
+ 'context_completeness': 0
578
+ }
579
  }
580
 
581
  except json.JSONDecodeError as e:
582
  return {
583
  'raw_response': response_text,
584
  'parsing_error': f'JSON decode error: {str(e)}',
585
+ 'geo_analysis': {
586
+ 'current_geo_score': 0,
587
+ 'ai_search_visibility': 0,
588
+ 'query_intent_matching': 0,
589
+ 'conversational_readiness': 0,
590
+ 'citation_worthiness': 0,
591
+ 'context_completeness': 0
592
+ }
593
  }
594
  except Exception as e:
595
  return {
596
  'raw_response': response_text,
597
  'parsing_error': f'Unexpected parsing error: {str(e)}',
598
+ 'geo_analysis': {
599
+ 'current_geo_score': 0,
600
+ 'ai_search_visibility': 0,
601
+ 'query_intent_matching': 0,
602
+ 'conversational_readiness': 0,
603
+ 'citation_worthiness': 0,
604
+ 'context_completeness': 0
605
+ }
606
  }
607
+
608
+ # Legacy methods for backward compatibility
609
+ def optimize_content(self, content: str, analyze_only: bool = False,
610
+ include_keywords: bool = True, optimization_type: str = "standard") -> Dict[str, Any]:
611
+ """
612
+ Legacy method - redirects to RAG-enhanced optimization
613
+ """
614
+ if optimization_type == "standard":
615
+ return self.optimize_content_with_rag(content, "geo_standard", analyze_only)
616
+ elif optimization_type == "seo":
617
+ return self.optimize_content_with_rag(content, "geo_standard", analyze_only)
618
+ elif optimization_type == "competitive":
619
+ return self.optimize_content_with_rag(content, "competitive_geo", analyze_only)
620
+ else:
621
+ return self.optimize_content_with_rag(content, "geo_standard", analyze_only)
622
+
623
+ def analyze_content_readability(self, content: str) -> Dict[str, Any]:
624
+ """Legacy method - redirects to GEO readability analysis"""
625
+ return self.analyze_geo_readability(content)