Spaces:
Sleeping
Sleeping
File size: 10,340 Bytes
413592b d6e49e1 413592b e6218e4 3264dd4 7de02ad 52844bf 116c368 d6e49e1 413592b 116c368 413592b fab1175 413592b 5a95216 413592b 55203fb 413592b 7de02ad fab1175 7de02ad 5a95216 7de02ad 413592b 7de02ad 116c368 7de02ad 116c368 7de02ad 5a95216 7de02ad 116c368 7de02ad 413592b d6e49e1 413592b 55203fb 413592b 7de02ad a571385 d6e49e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
import PyPDF2
from openpyxl import load_workbook
from pptx import Presentation
import gradio as gr
import io
import re
import zipfile
import xml.etree.ElementTree as ET
import filetype
import os
import mimetypes
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import urllib3
# Constants
CHUNK_SIZE = 32000
# --- Custom HTTP Session and Response Classes ---
class CustomSession:
def __init__(self):
self.pool_manager = urllib3.PoolManager()
def get(self, url):
response = self.pool_manager.request('GET', url)
return CustomResponse(response)
class CustomResponse:
def __init__(self, response):
self.status_code = response.status
self.headers = response.headers
self.content = response.data
def json(self):
import json
return json.loads(self.content)
def text(self):
return self.content.decode('utf-8')
def soup(self):
return BeautifulSoup(self.content, 'lxml')
def clean_text(self):
soup = self.soup()
cleaned_text = soup.get_text().replace('\n', ' ').replace('\r', ' ').replace(' ', ' ')
while ' ' in cleaned_text:
cleaned_text = cleaned_text.replace(' ', ' ')
return cleaned_text.strip()
def get(url):
session = CustomSession()
return session.get(url)
# --- Utility Functions ---
def xml2text(xml):
"""Extracts text from XML data."""
text = u''
root = ET.fromstring(xml)
for child in root.iter():
text += child.text + " " if child.text is not None else ''
return text
def clean_text(content):
"""Cleans text content based on the 'clean' parameter."""
content = content.replace('\n', ' ')
content = content.replace('\r', ' ')
content = content.replace('\t', ' ')
content = re.sub(r'\s+', ' ', content)
return content
def extract_texts(soup):
"""Extracts all text content from the soup."""
return [text for text in soup.stripped_strings]
def extract_links(soup, base_url):
"""Extracts all valid links from the soup."""
links = []
for link in soup.find_all('a', href=True):
href = link['href']
full_url = urljoin(base_url, href) if not href.startswith(("http://", "https://")) else href
link_text = link.get_text(strip=True) or "No Text"
links.append({"Text": link_text, "URL": full_url})
return links
def extract_images(soup, base_url):
"""Extracts all valid image URLs and their alt text from the soup."""
images = []
for img in soup.find_all('img', src=True):
img_url = img['src']
full_img_url = urljoin(base_url, img_url) if not img_url.startswith(("http://", "https://")) else img_url
alt_text = img.get('alt', 'No Alt Text')
images.append({"Alt Text": alt_text, "Image URL": full_img_url})
return images
def format_detailed_output(structured_data):
"""Formats the structured data into a Markdown string."""
result = "### Structured Page Content\n\n"
result += "**Texts:**\n" + (" ".join(structured_data["Texts"]) if structured_data["Texts"] else "No textual content found.") + "\n\n"
result += "**Links:**\n"
if structured_data["Links"]:
result += "\n".join(f"[{link['Text']}]({link['URL']})" for link in structured_data["Links"]) + "\n"
else:
result += "No links found.\n"
result += "**Images:**\n"
if structured_data["Images"]:
result += "\n".join(f"![{img['Alt Text']}]({img['Image URL']})" for img in structured_data["Images"]) + "\n"
else:
result += "No images found.\n"
return result
# --- Document Reading Functions ---
def extract_text_from_docx(docx_data, clean=True):
"""Extracts text from DOCX files."""
text = u''
zipf = zipfile.ZipFile(io.BytesIO(docx_data))
filelist = zipf.namelist()
header_xmls = 'word/header[0-9]*.xml'
for fname in filelist:
if re.match(header_xmls, fname):
text += xml2text(zipf.read(fname))
doc_xml = 'word/document.xml'
text += xml2text(zipf.read(doc_xml))
footer_xmls = 'word/footer[0-9]*.xml'
for fname in filelist:
if re.match(footer_xmls, fname):
text += xml2text(zipf.read(fname))
zipf.close()
if clean:
text = clean_text(text)
return text, len(text)
def extract_text_from_pptx(pptx_data, clean=True):
"""Extracts text from PPT files."""
text = u''
zipf = zipfile.ZipFile(io.BytesIO(pptx_data))
filelist = zipf.namelist()
# Extract text from slide notes
notes_xmls = 'ppt/notesSlides/notesSlide[0-9]*.xml'
for fname in filelist:
if re.match(notes_xmls, fname):
text += xml2text(zipf.read(fname))
# Extract text from slide content (shapes and text boxes)
slide_xmls = 'ppt/slides/slide[0-9]*.xml'
for fname in filelist:
if re.match(slide_xmls, fname):
text += xml2text(zipf.read(fname))
zipf.close()
if clean:
text = clean_text(text)
return text, len(text)
def read_document(file_path, clean=True, url=""):
with open(file_path, "rb") as f:
file_content = f.read()
kind = filetype.guess(file_content)
if kind is None:
mime = "text/html"
else:
mime = kind.mime
if mime == "application/pdf":
try:
pdf_reader = PyPDF2.PdfReader(io.BytesIO(file_content))
content = ''
for page in range(len(pdf_reader.pages)):
content += pdf_reader.pages[page].extract_text()
if clean:
content = clean_text(content)
return content, len(repr(content))
except Exception as e:
return f"Error reading PDF: {e}", 0
elif mime == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
try:
wb = load_workbook(io.BytesIO(file_content))
content = ''
for sheet in wb.worksheets:
for row in sheet.rows:
for cell in row:
if cell.value is not None:
content += str(cell.value) + ' '
if clean:
content = clean_text(content)
return content, len(repr(content))
except Exception as e:
return f"Error reading XLSX: {e}", 0
elif mime == "text/plain":
try:
content = file_content.decode('utf-8')
if clean:
content = clean_text(content)
return content, len(repr(content))
except Exception as e:
return f"Error reading TXT file: {e}", 0
elif mime == "text/csv":
try:
content = file_content.decode('utf-8')
if clean:
content = clean_text(content)
return content, len(repr(content))
except Exception as e:
return f"Error reading CSV file: {e}", 0
elif mime == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
try:
return extract_text_from_docx(file_content, clean)
except Exception as e:
return f"Error reading DOCX: {e}", 0
elif mime == "application/vnd.openxmlformats-officedocument.presentationml.presentation":
try:
return extract_text_from_pptx(file_content, clean)
except Exception as e:
return f"Error reading PPTX: {e}", 0
elif mime == "text/html": # Handle HTML content
try:
soup = BeautifulSoup(file_content, 'lxml')
structured_data = {
"Texts": extract_texts(soup),
"Links": extract_links(soup, url),
"Images": extract_images(soup, url)
}
return format_detailed_output(structured_data), 0
except Exception as e:
return f"Error parsing HTML content: {e}", 0
else:
try:
content = file_content.decode('utf-8')
if clean:
content = clean_text(content)
return content, len(repr(content))
except Exception as e:
return f"Error reading file: {e}", 0
def download_and_process_file(url, clean=True):
"""Downloads a file from a URL and returns the local file path."""
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url # Prepend "http://" if not present
try:
response = get(url)
original_filename = os.path.basename(url)
safe_filename = re.sub(r'[^\w\-_\. ]', '_', original_filename)
temp_filename = f"{safe_filename}"
content_type = response.headers['content-type']
ext = mimetypes.guess_extension(content_type)
if ext and not temp_filename.endswith(ext): # Append extension if not already present
temp_filename += ext
with open(temp_filename, 'wb') as f:
f.write(response.content)
kind = filetype.guess(temp_filename)
if kind and kind.mime.startswith('image/'):
return f"![]({url})", 0 # Return markdown image syntax if it's an image
else:
return read_document(temp_filename, clean, url) # Otherwise, process as a document
except urllib3.exceptions.HTTPError as e:
return f"Error: {e}", 0
except Exception as e:
return f"Error downloading file: {e}", 0
# --- Gradio Interface ---
iface = gr.Interface(
fn=download_and_process_file,
inputs=[
gr.Textbox(lines=1, placeholder="Enter URL of the file"),
gr.Checkbox(label="Clean Text", value=True),
],
outputs=[
gr.Markdown(label="Document Content/Image Markdown/Web Page Content"),
gr.Number(label="Document Length (characters)"),
],
title="Enhanced File and Web Page Processor for Hugging Face Chat Tools",
description="Enter the URL of an image, video, document, or web page. The tool will handle it accordingly: images will be displayed as Markdown, documents will have their text extracted, and web pages will have their content structured and displayed. This tool is designed for use with Hugging Face Chat Tools. \n [https://hf.co/chat/tools/66f1a8159d41ad4398ebb711](https://hf.co/chat/tools/66f1a8159d41ad4398ebb711)",
concurrency_limit=None,
api_name="main"
)
iface.launch() |