text stringlengths 0 131 |
|---|
if not intervals: |
return [] |
# 2. Sort by start time |
intervals.sort(key=lambda x: x.start) |
# 3. Merge overlapping intervals |
merged: List[GroupInterval] = [] |
for current in intervals: |
if not merged: |
merged.append(current) |
continue |
last = merged[-1] |
# Check for overlap: |
# If current starts before (or exactly when) last ends, they overlap. |
# e.g. [84, 795] and [788, 887] -> 788 <= 795, so merge. |
if current.start <= last.end: |
# Merge logic: |
# 1. Extend the end if needed |
last.end = max(last.end, current.end) |
# 2. Combine the sets of line numbers |
last.line_numbers.update(current.line_numbers) |
else: |
# No overlap, start a new cluster |
merged.append(current) |
# 4. Convert back to SemanticGroups |
results = [SemanticGroup(group.line_numbers) for group in merged] |
return sorted(results, key=lambda x: min(x.line_numbers) if x.line_numbers else 0) |
def _finalize_chunk(self, content: str, line_numbers: List[int], parent_id: Optional[str] = None) -> List[Dict[str, Any]]: |
count = self._count_tokens(content) |
if count <= self.config.model_token_limit: |
return [{ |
"content": content, |
"line_numbers": line_numbers, |
"token_estimate": count, |
"metadata": {"parent_id": parent_id} |
}] |
if len(line_numbers) <= 1: |
return [{ |
"content": content, |
"line_numbers": line_numbers, |
"token_estimate": count, |
"metadata": {"parent_id": parent_id, "warning": "oversized"} |
}] |
mid = len(line_numbers) // 2 |
left_lines = line_numbers[:mid] |
right_lines = line_numbers[mid:] |
left_text = "\n".join([self.current_doc_map[n].text for n in left_lines]) |
right_text = "\n".join([self.current_doc_map[n].text for n in right_lines]) |
cid = parent_id if parent_id else str(uuid.uuid4())[:8] |
results = [] |
results.extend(self._finalize_chunk(left_text, left_lines, parent_id=cid)) |
results.extend(self._finalize_chunk(right_text, right_lines, parent_id=cid)) |
return results |
def process_document(self, plaintext: str) -> str: |
logger.info(f">>> Processing Document [Mode: {self.config.tokenizer_method.upper()}]") |
lines = self._prepare_lines(plaintext) |
self.current_doc_map = {l.number: l for l in lines} |
pre_chunks = self._create_pre_chunks(lines) |
raw_groups = self._get_semantic_groupings(pre_chunks) |
merged_groups = self._resolve_overlaps(raw_groups, self.current_doc_map) |
final_output = [] |
logger.info("Finalizing chunks...") |
for group in merged_groups: |
sorted_nums = sorted(list(group.line_numbers)) |
text_content = "\n".join([self.current_doc_map[n].text for n in sorted_nums]) |
chunks = self._finalize_chunk(text_content, sorted_nums) |
final_output.extend(chunks) |
logger.info(f"<<< Done. Generated {len(final_output)} chunks.") |
return json.dumps(final_output, indent=2) |
# ----------------------------------------------------------------------------- |
# Main Execution |
# ----------------------------------------------------------------------------- |
if __name__ == "__main__": |
sample_text = """The history of Artificial Intelligence is fascinating. |
It begins with the Turing Test proposed by Alan Turing. |
Early AI research focused on symbolic logic and problem solving. |
However, computing power was limited in the 1950s. |
Decades later, machine learning emerged as a dominant paradigm. |
Neural networks, inspired by the human brain, gained popularity. |
Deep learning revolutionized the field in the 2010s. |
Transformers, introduced by Google, changed NLP forever. |
Large Language Models like GPT-4 are now commonplace. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.