text
stringlengths
0
131
return processed_lines
def _create_pre_chunks(self, lines: List[Line]) -> List[PreChunkSegment]:
logger.debug(f"Segmenting lines (Limit: {self.config.llm_token_limit})...")
segments = []
current_segment_lines = []
current_tokens = 0
i = 0
while i < len(lines):
line = lines[i]
if current_tokens + line.token_count > self.config.llm_token_limit and current_segment_lines:
segments.append(PreChunkSegment(list(current_segment_lines)))
# Overlap Logic
overlap_buffer = []
overlap_tokens = 0
back_idx = i - 1
while back_idx >= 0:
prev_line = lines[back_idx]
if prev_line in current_segment_lines:
overlap_buffer.insert(0, prev_line)
overlap_tokens += prev_line.token_count
if overlap_tokens >= self.config.overlap_token_count:
break
else:
break
back_idx -= 1
current_segment_lines = list(overlap_buffer)
current_tokens = overlap_tokens
current_segment_lines.append(line)
current_tokens += line.token_count
i += 1
if current_segment_lines:
segments.append(PreChunkSegment(current_segment_lines))
return segments
def _call_openai(self, segment_text: str, available_lines: List[int]) -> List[List[int]]:
runtime_constraint = f"\nCRITICAL CONSTRAINT: Only use the line numbers provided in this specific range: {available_lines}"
full_system_prompt = self.config.system_prompt_base + runtime_constraint
user_prompt = f"Input Lines:\n{segment_text}\n\nOutput JSON:"
try:
logger.debug(f"Calling OpenAI (Lines {available_lines[0]}-{available_lines[-1]})...")
response = self.client.chat.completions.create(
model=self.config.llm_model_name,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": user_prompt}
],
response_format={"type": "json_object"},
temperature=self.config.temperature
)
parsed = json.loads(response.choices[0].message.content)
groups = parsed.get("groups", [])
if isinstance(groups, list) and all(isinstance(g, list) for g in groups):
return groups
return [[l] for l in available_lines]
except Exception as e:
logger.error(f"OpenAI Call Failed: {e}")
return [[l] for l in available_lines]
def _get_semantic_groupings(self, segments: List[PreChunkSegment]) -> List[List[int]]:
all_raw_groups = []
for idx, seg in enumerate(segments):
available_lines = [l.number for l in seg.lines]
response_groups = self._call_openai(seg.formatted_text, available_lines)
all_raw_groups.extend(response_groups)
return all_raw_groups
def resolve_overlaps(raw_groups: List[List[int]], all_lines_map: Dict[int, Line]) -> List[SemanticGroup]:
"""
Merges groups based on overlapping line number ranges.
Uses a standard 'Merge Intervals' algorithm.
"""
intervals: List[GroupInterval] = []
# 1. Convert raw groups to Intervals
for group in raw_groups:
if not group:
continue
# Filter for valid lines only
valid_lines = {g for g in group if g in all_lines_map}
if not valid_lines:
continue
# Define range based on min and max line numbers in the group
intervals.append(GroupInterval(
start=min(valid_lines),
end=max(valid_lines),
line_numbers=valid_lines
))