The dataset viewer is taking too long to fetch the data. Try to refresh this page.
Server-side error
Error code:   ClientConnectionError
# -*- coding: utf-8 -*-
"""
Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1iAhLoc8FxHXijhyljdKhrIJbn342bhPD
"""

# Commented out IPython magic to ensure Python compatibility.
# %pip install --upgrade langchain datasets

import requests
from bs4 import BeautifulSoup

CONFIG = {
    'title': 'Constitución Española',
    'url': "https://www.boe.es/diario_boe/xml.php?id=BOE-A-1978-31229",
    'chunk_size': 1300,
    'chunk_overlap': 150,
}

"""# Downloading BOE document"""

response = requests.get(CONFIG['url'])
response.raise_for_status()
soup = BeautifulSoup(response.text, "lxml")

filename = "constitucion.txt"
with open(filename, 'w') as fn:
    text = soup.select_one("documento > texto").get_text()
    fn.write(text)

"""# Splitting by chunks the document"""

from langchain_community.document_loaders import TextLoader

loader = TextLoader(filename)
document = loader.load()

from langchain.text_splitter import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=CONFIG["chunk_size"],
    chunk_overlap=CONFIG["chunk_overlap"],
)

docs_chunks = text_splitter.split_documents(document)

print(len(docs_chunks))

docs_chunks

"""# Loading chunks in a dataset"""

from datasets import Dataset

data_dict = {
    'id': [],
    'url': [],
    'title': [],
    'content': []
}

for idx, chunk in enumerate(docs_chunks):
  data_dict['id'].append(idx)
  data_dict['url'].append(CONFIG['url'])
  data_dict['title'].append(CONFIG['title'])
  data_dict['content'].append(chunk.page_content)

dataset = Dataset.from_dict(data_dict)

"""# Loading to HuggingFace"""

# !huggingface-cli login

dataset.push_to_hub("dariolopez/justicio-BOE-A-1978-31229-constitucion-100-chunks")
Downloads last month
9
Edit dataset card