#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：url_spider.py
@File ：twitter_content_spider.py
@IDE  ：PyCharm
@Author ：李涵彬
@Date ：2024/5/9 下午10:35
"""

import os
import random
import time
from typing import List

import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from tqdm import tqdm
from webdriver_manager.chrome import ChromeDriverManager


def download_image(url: str, folder_path: str, index: int) -> None:
	"""
	下载图片到指定文件夹
	:param url: 图片链接
	:param folder_path: 文件夹路径
	:param index: 图片索引
	"""
	response = requests.get(url)
	with open(os.path.join(folder_path, f"{index}.jpg"), "wb") as f:
		f.write(response.content)


def save_text(text: str, folder_path: str, index: int) -> None:
	"""
	保存文本内容到指定文件夹
	:param text: 文本内容
	:param folder_path: 文件夹路径
	:param index: 文本索引
	"""
	with open(os.path.join(folder_path, f"{index}.txt"), "w", encoding="utf-8") as f:
		f.write(text)


def scrape_posts(url: str) -> None:
	"""
	爬取指定 Twitter 用户主页的所有帖子
	:param url: Twitter 用户主页链接
	"""
	# 创建 Chrome 浏览器实例
	options = Options()
	options.add_argument("--disable-blink-features=AutomationControlled")  # 隐藏自动化特征
	service = Service(ChromeDriverManager().install())
	driver = webdriver.Chrome(service=service, options=options)

	# 访问目标页面
	driver.get(url)
	time.sleep(random.randint(5, 10))  # 添加随机等待时间,模拟人类浏览

	try:
		# 解析页面
		soup = BeautifulSoup(driver.page_source, "html.parser")

		# 获取所有帖子
		posts = soup.find_all("div", class_="css-175oi2r r-j5o65s r-qklmqi r-1adg3ll r-1ny4l3l")
		total_posts = len(posts)
		pbar = tqdm(total=total_posts, desc="Scraping posts")  # 创建进度条

		for i, post in enumerate(posts, start=1):
			# 获取帖子的发布日期
			date_element = post.find("a",
									 class_="css-1rynq56 r-bcqeeo r-qvutc0 r-1qd0xha r-a023e6 r-rjixqe r-16dba41 r-xoduu5 r-1q142lx r-1w6e6rj r-9aw3ui r-3s2u2q r-1loqt21").find(
				"time")
			date = date_element["datetime"].split("T")[0]  # 仅保留日期部分

			# 创建文件夹
			folder_path = os.path.join(os.getcwd(), date)
			if not os.path.exists(folder_path):
				os.makedirs(folder_path)

			# 获取帖子的文本内容
			text_element = post.find("div",
									 class_="css-1rynq56 r-8akbws r-krxsd3 r-dnmrzs r-1udh08x r-bcqeeo r-qvutc0 r-1qd0xha r-a023e6 r-rjixqe r-16dba41 r-bnwqim")
			text = text_element.text.strip() if text_element else ""

			# 保存文本内容
			save_text(text, folder_path, i)

			# 获取帖子的图片链接
			image_elements: List[str] = post.find_all("img", class_="css-9pa8cd")
			for j, image_element in enumerate(image_elements, start=1):
				image_url = image_element["src"].replace("&name=small", "")
				# 排除含有部分字符串的图片链接
				if "_normal.jpg" in image_url:
					continue

				# 下载图片
				download_image(image_url, folder_path, j)
			pbar.update(1)  # 更新进度条
			time.sleep(random.randint(5, 10))  # 添加随机等待时间,模拟人类浏览
	except Exception as e:
		print(f"Error occurred: {e}")
	finally:
		pbar.close()  # 关闭进度条
		driver.quit()


if __name__ == "__main__":
	page_url = "https://twitter.com/wugudehaore"
	scrape_posts(page_url)
