File size: 5,044 Bytes
cdd85c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331c08f
cdd85c7
 
 
 
331c08f
cdd85c7
 
 
 
 
 
 
 
 
 
 
 
 
331c08f
cdd85c7
 
331c08f
cdd85c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331c08f
cdd85c7
 
 
 
 
 
 
331c08f
cdd85c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331c08f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import os
import re
from typing import List, Dict, Tuple
import chromadb
from chromadb.utils import embedding_functions
from config import CHUNK_SIZE, CHUNK_OVERLAP, DATABASE_DIR, EMBEDDING_MODEL

class KodeksProcessor:
    def __init__(self):
        self.client = chromadb.PersistentClient(path=DATABASE_DIR)
        try:
            self.collection = self.client.get_collection("kodeksy")
        except:
            self.collection = self.client.create_collection(
                name="kodeksy",
                embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(
                    model_name=EMBEDDING_MODEL
                )
            )

    def extract_metadata(self, text: str) -> Dict:
        metadata = {}
        dz_u_match = re.search(r'Dz\.U\.(\d{4})\.(\d+)\.(\d+)', text)
        if dz_u_match:
            metadata['dz_u'] = f"Dz.U.{dz_u_match.group(1)}.{dz_u_match.group(2)}.{dz_u_match.group(3)}"
            metadata['rok'] = dz_u_match.group(1)
        
        nazwa_match = re.search(r'USTAWA\s+z dnia(.*?)\n(.*?)\n', text)
        if nazwa_match:
            metadata['data_ustawy'] = nazwa_match.group(1).strip()
            metadata['nazwa'] = nazwa_match.group(2).strip()
        
        return metadata

    def split_header_and_content(self, text: str) -> Tuple[str, str]:
        parts = text.split("USTAWA", 1)
        if len(parts) > 1:
            return parts[0], "USTAWA" + parts[1]
        return "", text

    def process_article(self, article_text: str) -> Dict:
        art_num_match = re.match(r'Art\.\s*(\d+)', article_text)
        article_num = art_num_match.group(1) if art_num_match else ""
        
        paragraphs = re.findall(r'§\s*(\d+)[.\s]+(.*?)(?=§\s*\d+|$)', article_text, re.DOTALL)
        
        if not paragraphs:
            return {
                "article_num": article_num,
                "content": article_text.strip(),
                "has_paragraphs": False
            }
        
        return {
            "article_num": article_num,
            "paragraphs": paragraphs,
            "has_paragraphs": True
        }

    def split_into_chunks(self, text: str, metadata: Dict) -> List[Dict]:
        chunks = []
        chapters = re.split(r'(Rozdział \d+\n\n[^\\n]+)\n', text)
        current_chapter = ""
        
        for i, section in enumerate(chapters):
            if section.startswith('Rozdział'):
                current_chapter = section.strip()
                continue
            
            articles = re.split(r'(Art\.\s*\d+.*?)(?=Art\.\s*\d+|$)', section)
            
            for article in articles:
                if not article.strip():
                    continue
                
                if article.startswith('Art.'):
                    processed_article = self.process_article(article)
                    
                    chunk_metadata = {
                        **metadata,
                        "chapter": current_chapter,
                        "article": processed_article["article_num"]
                    }
                    
                    if processed_article["has_paragraphs"]:
                        for par_num, par_content in processed_article["paragraphs"]:
                            chunks.append({
                                "text": f"Art. {processed_article['article_num']} § {par_num}. {par_content}",
                                "metadata": {**chunk_metadata, "paragraph": par_num}
                            })
                    else:
                        chunks.append({
                            "text": processed_article["content"],
                            "metadata": chunk_metadata
                        })
        
        return chunks

    def process_file(self, filepath: str) -> None:
        print(f"Przetwarzanie pliku: {filepath}")
        
        with open(filepath, 'r', encoding='utf-8') as file:
            content = file.read()
        
        header, main_content = self.split_header_and_content(content)
        metadata = self.extract_metadata(main_content)
        metadata['filename'] = os.path.basename(filepath)
        
        chunks = self.split_into_chunks(main_content, metadata)
        
        for i, chunk in enumerate(chunks):
            self.collection.add(
                documents=[chunk["text"]],
                metadatas=[chunk["metadata"]],
                ids=[f"{metadata['filename']}_{chunk['metadata']['article']}_{i}"]
            )
        
        print(f"Dodano {len(chunks)} chunków z pliku {metadata['filename']}")

    def process_all_files(self, directory: str) -> None:
        for filename in os.listdir(directory):
            if filename.endswith('.txt'):
                filepath = os.path.join(directory, filename)
                self.process_file(filepath)

    def search(self, query: str, n_results: int = 3) -> Dict:
        results = self.collection.query(
            query_texts=[query],
            n_results=n_results
        )
        return results