Spaces:
Sleeping
Sleeping
Laurine Sottani
commited on
Commit
·
4b0ec54
1
Parent(s):
d9f69e5
minor changes
Browse files- file_cleaning.py +0 -107
- file_cleaning_ui.py +2 -2
- webscraping_cleaning.py +0 -101
file_cleaning.py
DELETED
|
@@ -1,107 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python
|
| 2 |
-
import argparse
|
| 3 |
-
import re
|
| 4 |
-
import pdfplumber
|
| 5 |
-
import os
|
| 6 |
-
|
| 7 |
-
def clean_text_for_rag(text: str) -> str:
|
| 8 |
-
"""
|
| 9 |
-
Nettoie le texte pour RAG : normalise les caractères spéciaux,
|
| 10 |
-
puis garde uniquement lettres (accents), chiffres, espaces et ponctuation simple.
|
| 11 |
-
"""
|
| 12 |
-
# Normalisation des caractères spéciaux
|
| 13 |
-
text = re.sub(
|
| 14 |
-
r"[’‘“”«»–—\u00A0\u202F…œŒæÆ©®™§°±×÷]",
|
| 15 |
-
lambda m: {
|
| 16 |
-
"’": "'", "‘": "'", "“": '"', "”": '"', "«": '"', "»": '"',
|
| 17 |
-
"–": "-", "—": "-", "…": "...", "œ": "oe", "Œ": "OE", "æ": "ae", "Æ": "AE",
|
| 18 |
-
"©": "(c)", "®": "(R)", "™": "TM", "§": "§", "°": "°", "±": "+/-", "×": "x", "÷": "/"
|
| 19 |
-
}.get(m.group(0), m.group(0)),
|
| 20 |
-
text
|
| 21 |
-
)
|
| 22 |
-
|
| 23 |
-
# Nettoyage strict : garder lettres, chiffres, espaces, ponctuation simple
|
| 24 |
-
text = re.sub(r'[^a-zA-ZÀ-ÿæ-œ0-9\s\.\,\:\;\!\?\-\_\'\"\\(\)\–\…]', '', text)
|
| 25 |
-
text = re.sub(r'\s+', ' ', text).strip()
|
| 26 |
-
|
| 27 |
-
return text
|
| 28 |
-
|
| 29 |
-
def extract_and_clean_pdf(pdf_path, output_path):
|
| 30 |
-
"""
|
| 31 |
-
Extrait le texte du PDF et le nettoie pour RAG.
|
| 32 |
-
"""
|
| 33 |
-
print(f"[+] Extraction du texte depuis : {pdf_path}")
|
| 34 |
-
|
| 35 |
-
with pdfplumber.open(pdf_path) as pdf:
|
| 36 |
-
all_text = []
|
| 37 |
-
for page in pdf.pages:
|
| 38 |
-
text = page.extract_text()
|
| 39 |
-
if text:
|
| 40 |
-
all_text.append(text)
|
| 41 |
-
|
| 42 |
-
# Concaténer tout le texte
|
| 43 |
-
full_text = ' '.join(all_text)
|
| 44 |
-
# Nettoyer pour RAG
|
| 45 |
-
cleaned_text = clean_text_for_rag(full_text)
|
| 46 |
-
|
| 47 |
-
# Sauvegarder
|
| 48 |
-
with open(output_path, 'w', encoding='utf-8') as f:
|
| 49 |
-
f.write(cleaned_text)
|
| 50 |
-
|
| 51 |
-
print(f"[+] Texte nettoyé sauvegardé dans : {output_path}")
|
| 52 |
-
|
| 53 |
-
def extract_text(file_path, output_path):
|
| 54 |
-
"""
|
| 55 |
-
Extrait le texte des autres formats de fichier et le nettoie.
|
| 56 |
-
"""
|
| 57 |
-
print(f"[+] Extraction du texte depuis : {file_path}")
|
| 58 |
-
try:
|
| 59 |
-
with open(file_path, 'r', encoding='utf-8') as f:
|
| 60 |
-
lines = f.readlines()
|
| 61 |
-
|
| 62 |
-
# Nettoyer chaque ligne
|
| 63 |
-
cleaned_lines = [clean_text_for_rag(line.strip()) for line in lines if line.strip()]
|
| 64 |
-
|
| 65 |
-
# Joindre avec sauts de ligne pour un bon format .md
|
| 66 |
-
cleaned_text = '\n'.join(cleaned_lines)
|
| 67 |
-
|
| 68 |
-
# Sauvegarder dans le fichier .md
|
| 69 |
-
with open(output_path, 'w', encoding='utf-8') as f:
|
| 70 |
-
f.write(cleaned_text)
|
| 71 |
-
|
| 72 |
-
print(f"[+] Texte nettoyé sauvegardé dans : {output_path}")
|
| 73 |
-
except Exception as e:
|
| 74 |
-
print(f"Erreur lors de l'ouverture du fichier {file_path} : {e}")
|
| 75 |
-
raise
|
| 76 |
-
|
| 77 |
-
def main():
|
| 78 |
-
parser = argparse.ArgumentParser(description='Extraire et nettoyer un fichier pour le RAG. Sortie toujours en .md.')
|
| 79 |
-
parser.add_argument('input_file', type=str, help='Chemin vers le fichier à traiter (PDF, TXT, MD, etc.).')
|
| 80 |
-
parser.add_argument('output_md', type=str, help='Chemin du fichier Markdown (.md) de sortie.')
|
| 81 |
-
|
| 82 |
-
args = parser.parse_args()
|
| 83 |
-
|
| 84 |
-
input_path = args.input_file
|
| 85 |
-
output_path = args.output_md # On garde le nom original
|
| 86 |
-
|
| 87 |
-
# Vérifier que le fichier existe
|
| 88 |
-
if not os.path.exists(input_path):
|
| 89 |
-
print(f"Erreur : Le fichier {input_path} n'existe pas.")
|
| 90 |
-
return
|
| 91 |
-
|
| 92 |
-
# Détecter l'extension du fichier d'entrée
|
| 93 |
-
_, ext = os.path.splitext(input_path.lower())
|
| 94 |
-
|
| 95 |
-
# Vérifier et corriger l'extension de sortie si nécessaire
|
| 96 |
-
if not output_path.lower().endswith('.md'):
|
| 97 |
-
print(f"[!] Avertissement : Le fichier de sortie n'a pas l'extension .md. Il sera renommé en .md.")
|
| 98 |
-
output_path = os.path.splitext(output_path)[0] + '.md'
|
| 99 |
-
|
| 100 |
-
# Appeler la bonne fonction selon le format
|
| 101 |
-
if ext == '.pdf':
|
| 102 |
-
extract_and_clean_pdf(input_path, output_path)
|
| 103 |
-
else:
|
| 104 |
-
extract_text(input_path, output_path)
|
| 105 |
-
|
| 106 |
-
if __name__ == '__main__':
|
| 107 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
file_cleaning_ui.py
CHANGED
|
@@ -81,9 +81,9 @@ def process_file(input_file: gr.File, output_name: str) -> str:
|
|
| 81 |
return out_path
|
| 82 |
|
| 83 |
with gr.Blocks(title="Nettoyage de texte pour RAG") as demo:
|
| 84 |
-
gr.Markdown("# 📄 Nettoyage de texte pour RAG
|
| 85 |
gr.Markdown(
|
| 86 |
-
"Déposez un fichier
|
| 87 |
"et vous pourrez le télécharger sous **le nom que vous choisissez**."
|
| 88 |
)
|
| 89 |
|
|
|
|
| 81 |
return out_path
|
| 82 |
|
| 83 |
with gr.Blocks(title="Nettoyage de texte pour RAG") as demo:
|
| 84 |
+
gr.Markdown("# 📄 Nettoyage de texte pour RAG")
|
| 85 |
gr.Markdown(
|
| 86 |
+
"Déposez un fichier, le contenu textuel sera extrait, nettoyé "
|
| 87 |
"et vous pourrez le télécharger sous **le nom que vous choisissez**."
|
| 88 |
)
|
| 89 |
|
webscraping_cleaning.py
DELETED
|
@@ -1,101 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python
|
| 2 |
-
# -*- coding: utf-8 -*-
|
| 3 |
-
|
| 4 |
-
import re, tempfile, requests, os
|
| 5 |
-
from bs4 import BeautifulSoup
|
| 6 |
-
from markdownify import markdownify as md
|
| 7 |
-
from readability import Document
|
| 8 |
-
import gradio as gr
|
| 9 |
-
|
| 10 |
-
def clean_text_for_rag(text: str) -> str:
|
| 11 |
-
text = re.sub(
|
| 12 |
-
r"[’‘“”«»–—\u00A0\u202F…œŒæÆ©®™§°±×÷]",
|
| 13 |
-
lambda m: {
|
| 14 |
-
"’": "'", "‘": "'", "“": '"', "”": '"',
|
| 15 |
-
"«": '"', "»": '"', "–": "-", "—": "-",
|
| 16 |
-
"…": "...", "œ": "oe", "Œ": "OE",
|
| 17 |
-
"æ": "ae", "Æ": "AE", "©": "(c)", "®": "(R)",
|
| 18 |
-
"™": "TM", "§": "§", "°": "°", "±": "+/-",
|
| 19 |
-
"×": "x", "÷": "/"
|
| 20 |
-
}.get(m.group(0), m.group(0)),
|
| 21 |
-
text,
|
| 22 |
-
)
|
| 23 |
-
text = re.sub(r'[^a-zA-ZÀ-ÿæ-œ0-9\s\.\,\:\;\!\?\-\_\'\"\\\(\)]', '', text)
|
| 24 |
-
return re.sub(r'\s+', ' ', text).strip()
|
| 25 |
-
|
| 26 |
-
def fetch_html(url: str) -> str:
|
| 27 |
-
hdr = {"User-Agent":
|
| 28 |
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
| 29 |
-
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
| 30 |
-
"Chrome/124.0 Safari/537.36"}
|
| 31 |
-
r = requests.get(url, headers=hdr, timeout=20)
|
| 32 |
-
r.raise_for_status()
|
| 33 |
-
if r.encoding == "ISO-8859-1":
|
| 34 |
-
r.encoding = r.apparent_encoding
|
| 35 |
-
return r.text
|
| 36 |
-
|
| 37 |
-
def extract_main(html: str) -> str:
|
| 38 |
-
"""
|
| 39 |
-
Extraction du contenu principal d'une page web en éliminant les éléments parasites.
|
| 40 |
-
- Utilise readability.Document pour identifier la zone de l'article.
|
| 41 |
-
- Supprime les éléments de navigation, commentaires, widgets, publicités, etc.
|
| 42 |
-
- Ne garde que les balises sémantiquement pertinentes (article, p, h1-6, li, etc.).
|
| 43 |
-
"""
|
| 44 |
-
doc = Document(html)
|
| 45 |
-
article_html = doc.content()
|
| 46 |
-
soup = BeautifulSoup(article_html, "html.parser")
|
| 47 |
-
|
| 48 |
-
# 🔹 Supprimer les éléments non pertinents (pubs, menus, scripts, etc.)
|
| 49 |
-
for tag in soup(["script", "style", "noscript", "footer", "header", "nav", "form", "iframe", "aside", "button"]):
|
| 50 |
-
tag.decompose()
|
| 51 |
-
|
| 52 |
-
# 🔹 Supprimer les div ou sections trop petites ou sans texte utile
|
| 53 |
-
for div in soup.find_all(["div", "section"]):
|
| 54 |
-
text = div.get_text(strip=True)
|
| 55 |
-
if len(text) < 80: # seuil ajustable
|
| 56 |
-
div.decompose()
|
| 57 |
-
|
| 58 |
-
# 🔹 Conserver uniquement les éléments textuels pertinents
|
| 59 |
-
keep_tags = ["article", "p", "h1", "h2", "h3", "h4", "h5", "h6", "blockquote", "ul", "ol", "li", "pre", "code"]
|
| 60 |
-
clean_parts = []
|
| 61 |
-
|
| 62 |
-
for tag in soup.find_all(keep_tags):
|
| 63 |
-
text = tag.get_text(" ", strip=True)
|
| 64 |
-
if text and not re.match(r"^(Partager|Suivre|Commentaires|Lire aussi|Publicité)", text, re.I):
|
| 65 |
-
clean_parts.append(text)
|
| 66 |
-
|
| 67 |
-
clean_text = "\n\n".join(clean_parts).strip()
|
| 68 |
-
|
| 69 |
-
return clean_text
|
| 70 |
-
|
| 71 |
-
def to_markdown(text: str) -> str:
|
| 72 |
-
md_raw = md(f"<div>{text}</div>", heading_style="ATX")
|
| 73 |
-
return clean_text_for_rag(md_raw)
|
| 74 |
-
|
| 75 |
-
def process(url: str, out_name: str) -> str:
|
| 76 |
-
html = fetch_html(url)
|
| 77 |
-
main_text = extract_main(html) # **seul le contenu pertinent**
|
| 78 |
-
markdown = to_markdown(main_text)
|
| 79 |
-
|
| 80 |
-
out_name = out_name.strip()
|
| 81 |
-
if not out_name.lower().endswith(".md"):
|
| 82 |
-
out_name += ".md"
|
| 83 |
-
tmp_dir = tempfile.mkdtemp()
|
| 84 |
-
out_path = os.path.join(tmp_dir, out_name)
|
| 85 |
-
with open(out_path, "w", encoding="utf-8") as f:
|
| 86 |
-
f.write(markdown)
|
| 87 |
-
return out_path
|
| 88 |
-
|
| 89 |
-
with gr.Blocks(title="Web → Markdown") as demo:
|
| 90 |
-
gr.Markdown("# 🌐 Scraper web vers Markdown")
|
| 91 |
-
with gr.Row():
|
| 92 |
-
with gr.Column():
|
| 93 |
-
url_in = gr.Textbox(label="URL à scraper")
|
| 94 |
-
out_name = gr.Textbox(label="Nom du fichier (.md)", value="output.md")
|
| 95 |
-
btn = gr.Button("🛠️ Générer", variant="primary")
|
| 96 |
-
with gr.Column():
|
| 97 |
-
file_out = gr.File(label="Fichier Markdown")
|
| 98 |
-
btn.click(fn=process, inputs=[url_in, out_name], outputs=file_out)
|
| 99 |
-
|
| 100 |
-
if __name__ == "__main__":
|
| 101 |
-
demo.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|