Makima57 commited on
Commit
e67d52b
·
verified ·
1 Parent(s): c2d6566

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -31
app.py CHANGED
@@ -1,19 +1,26 @@
1
- import streamlit as st
2
- from googlesearch import search
3
- import requests
4
- from bs4 import BeautifulSoup
5
 
6
- # Function to perform Google search and return the first link
7
  def google_search(query):
8
  try:
9
  query = query + "/t site:https://medium.com/"
10
  # Perform the search and get an iterator of results
11
  search_results = search(query, num_results=10) # Get up to 10 results
12
- first_link = next(search_results, None) # Get the first result
13
- return first_link
 
 
 
 
 
 
 
14
  except Exception as e:
15
  st.error(f"An error occurred: {e}")
16
- return None
17
 
18
  # Function to fetch webpage content
19
  def fetch_webpage_content(url):
@@ -23,7 +30,7 @@ def fetch_webpage_content(url):
23
  return response.text
24
  except Exception as e:
25
  st.error(f"Failed to fetch the webpage content: {e}")
26
- return None
27
 
28
  # Function to scrape text from webpage content using Beautiful Soup
29
  def scrape_text(webpage_content):
@@ -43,35 +50,38 @@ def scrape_text(webpage_content):
43
  return text
44
  except Exception as e:
45
  st.error(f"Failed to scrape text from webpage content: {e}")
46
- return None
47
 
48
  # Streamlit app UI
49
- st.title("Search Link Finder")
50
 
51
  # Input field for search query
52
- query = st.text_input("Enter search query", "")
53
 
54
  # Button to trigger search
55
  if st.button("Search"):
56
  if query:
57
- first_link = google_search(query)
58
- if first_link:
59
- st.success(f"First link: [Click here]({first_link})")
60
- # Fetch webpage content
61
- webpage_content = fetch_webpage_content(first_link)
62
- if webpage_content:
63
- # Scrape text from webpage content
64
- scraped_text = scrape_text(webpage_content)
65
- if scraped_text:
66
- st.write(scraped_text)
67
- # Download button for the webpage content
68
- st.download_button(
69
- label="Download Webpage Content",
70
- data=scraped_text,
71
- file_name="webpage_content.txt",
72
- mime="text/plain"
73
- )
 
 
 
74
  else:
75
- st.warning("No results found")
76
  else:
77
- st.error("Please enter a query")
 
1
+ import streamlit as st
2
+ from googlesearch import search
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
 
6
+ # Function to perform Google search and return the first two links
7
  def google_search(query):
8
  try:
9
  query = query + "/t site:https://medium.com/"
10
  # Perform the search and get an iterator of results
11
  search_results = search(query, num_results=10) # Get up to 10 results
12
+ first_two_links = []
13
+
14
+ # Get the first two results
15
+ for i, link in enumerate(search_results):
16
+ if i < 2:
17
+ first_two_links.append(link)
18
+ else:
19
+ break
20
+ return first_two_links
21
  except Exception as e:
22
  st.error(f"An error occurred: {e}")
23
+ return None
24
 
25
  # Function to fetch webpage content
26
  def fetch_webpage_content(url):
 
30
  return response.text
31
  except Exception as e:
32
  st.error(f"Failed to fetch the webpage content: {e}")
33
+ return None
34
 
35
  # Function to scrape text from webpage content using Beautiful Soup
36
  def scrape_text(webpage_content):
 
50
  return text
51
  except Exception as e:
52
  st.error(f"Failed to scrape text from webpage content: {e}")
53
+ return None
54
 
55
  # Streamlit app UI
56
+ st.title("Search Link Finder")
57
 
58
  # Input field for search query
59
+ query = st.text_input("Enter search query", "")
60
 
61
  # Button to trigger search
62
  if st.button("Search"):
63
  if query:
64
+ first_two_links = google_search(query)
65
+ if first_two_links:
66
+ for i, link in enumerate(first_two_links):
67
+ st.success(f"Link {i+1}: [Click here]({link})")
68
+
69
+ # Fetch webpage content
70
+ webpage_content = fetch_webpage_content(link)
71
+ if webpage_content:
72
+ # Scrape text from webpage content
73
+ scraped_text = scrape_text(webpage_content)
74
+ if scraped_text:
75
+ st.write(f"Scraped Content from Link {i+1}:")
76
+ st.write(scraped_text)
77
+ # Download button for the webpage content
78
+ st.download_button(
79
+ label=f"Download Webpage Content from Link {i+1}",
80
+ data=scraped_text,
81
+ file_name=f"webpage_content_{i+1}.txt",
82
+ mime="text/plain"
83
+ )
84
  else:
85
+ st.warning("No results found")
86
  else:
87
+ st.error("Please enter a query")