ariG23498 HF staff commited on
Commit
66f114c
1 Parent(s): feb6557

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +74 -0
README.md CHANGED
@@ -19,3 +19,77 @@ configs:
19
  - split: train
20
  path: data/train-*
21
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  - split: train
20
  path: data/train-*
21
  ---
22
+
23
+
24
+ Created by the following code:
25
+ ```py
26
+ !pip install -Uq datasets
27
+
28
+ import requests
29
+ from bs4 import BeautifulSoup, Comment
30
+ import pandas as pd
31
+ from datasets import Dataset
32
+
33
+
34
+ def get_content(url):
35
+ response = requests.get(url)
36
+ if response.status_code == 200:
37
+ soup = BeautifulSoup(response.text, 'html.parser')
38
+ return soup
39
+
40
+ url = "https://huggingface.co/blog/community"
41
+
42
+ soup = get_content(url)
43
+ articles = soup.find_all("article")
44
+ titles = [article.h4.text for article in articles]
45
+ links = [f'https://hf.co{article.find("a", class_="block px-3 py-2 cursor-pointer").get("href")}' for article in articles]
46
+
47
+ def get_article(soup):
48
+ # Find all comments in the document
49
+ comments = soup.find_all(string=lambda text: isinstance(text, Comment))
50
+
51
+ # Initialize variables to store the start and end comments
52
+ start_comment = None
53
+ end_comment = None
54
+
55
+ # Identify the start and end comments
56
+ for comment in comments:
57
+ comment_text = comment.strip()
58
+ if comment_text == 'HTML_TAG_START':
59
+ start_comment = comment
60
+ elif comment_text == 'HTML_TAG_END':
61
+ end_comment = comment
62
+
63
+ # Check if both comments were found
64
+ if start_comment and end_comment:
65
+ # Collect all elements between the start and end comments
66
+ contents = []
67
+ current = start_comment.next_sibling
68
+ while current and current != end_comment:
69
+ contents.append(current)
70
+ current = current.next_sibling
71
+
72
+ # Convert the contents to a string
73
+ between_content = ''.join(str(item) for item in contents)
74
+
75
+ # Output the extracted content
76
+ return between_content
77
+ else:
78
+ return "Start or end comment not found."
79
+
80
+ article_soups = [get_content(link) for link in links]
81
+ articles = [get_article(article_soup) for article_soup in article_soups]
82
+
83
+ # Assuming titles, links, articles are your lists
84
+ df = pd.DataFrame({
85
+ 'title': titles,
86
+ 'link': links,
87
+ 'article': articles
88
+ })
89
+
90
+ # Create a Hugging Face Dataset object
91
+ dataset = Dataset.from_pandas(df)
92
+
93
+ # Push the dataset to the Hugging Face Hub
94
+ dataset.push_to_hub("ariG23498/community-blogs")
95
+ ```