text stringlengths 0 131 |
|---|
llm_token_limit=lim.get('llm_context_window', 300), |
overlap_token_count=lim.get('window_overlap', 50), |
model_token_limit=lim.get('target_chunk_size', 100), |
system_prompt_base=prompts.get('system_instructions', '') |
) |
# ----------------------------------------------------------------------------- |
# Data Structures |
# ----------------------------------------------------------------------------- |
@dataclass |
class Line: |
number: int |
text: str |
token_count: int |
@dataclass |
class PreChunkSegment: |
lines: List[Line] |
segment_id: str = field(default_factory=lambda: str(uuid.uuid4())) |
@property |
def formatted_text(self) -> str: |
return "\n".join([f"{line.number} | {line.text}" for line in self.lines]) |
@dataclass |
class SemanticGroup: |
line_numbers: Set[int] |
# ----------------------------------------------------------------------------- |
# Service Implementation |
# ----------------------------------------------------------------------------- |
class DocumentChunkingService: |
def __init__(self, config_path: str = "config.yaml"): |
# 1. Load Config |
try: |
self.config = ChunkingConfig.from_yaml(config_path) |
except Exception as e: |
logger.critical(f"Failed to load config: {e}") |
sys.exit(1) |
# 2. Setup Tokenizer based on Method |
self.hf_tokenizer = None |
if self.config.tokenizer_method == "huggingface": |
if not TRANSFORMERS_AVAILABLE: |
logger.critical("Config requests 'huggingface', but library is missing. Install 'transformers'.") |
sys.exit(1) |
try: |
logger.info(f"Initializing HuggingFace Tokenizer: {self.config.hf_model_name}") |
os.environ["TOKENIZERS_PARALLELISM"] = "false" |
self.hf_tokenizer = AutoTokenizer.from_pretrained(self.config.hf_model_name) |
except Exception as e: |
logger.critical(f"Failed to load HF Tokenizer: {e}") |
sys.exit(1) |
elif self.config.tokenizer_method == "heuristic": |
logger.info(f"Using Heuristic Tokenizer ({self.config.heuristic_chars_per_token} chars/token)") |
else: |
logger.warning(f"Unknown tokenizer method '{self.config.tokenizer_method}'. Defaulting to heuristic.") |
# 3. Setup OpenAI |
if self.config.api_key == "MISSING_KEY": |
logger.critical("No valid API Key found.") |
self.client = None |
else: |
try: |
self.client = OpenAI(api_key=self.config.api_key) |
except Exception as e: |
logger.error(f"Failed to initialize OpenAI Client: {e}") |
sys.exit(1) |
def _count_tokens(self, text: str) -> int: |
""" |
Determines token count based on the configured method. |
""" |
if not text: |
return 0 |
if self.config.tokenizer_method == "huggingface" and self.hf_tokenizer: |
# HuggingFace Count |
return len(self.hf_tokenizer.encode(text, add_special_tokens=False)) |
else: |
# Heuristic Count |
return math.ceil(len(text) / self.config.heuristic_chars_per_token) |
def _prepare_lines(self, document_text: str) -> List[Line]: |
logger.debug(f"Preparing lines using {self.config.tokenizer_method} method...") |
raw_lines = document_text.split('\n') |
processed_lines = [] |
for idx, text in enumerate(raw_lines, start=1): |
if not text.strip(): continue |
count = self._count_tokens(text) |
processed_lines.append(Line(idx, text, count)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.