import requests
from bs4 import BeautifulSoup
import pandas as pd
from beeize.scraper import Scraper
import os
scraper = Scraper()
page_all = 2
code = '9824'
proxies = {
            'http': os.getenv('PROXY_URL'),
            'https': os.getenv('PROXY_URL'),
        }  # 读取代理配置

page_all=int(os.getenv('PAGE_ALL'))
code=int(os.getenv('CODE'))


data_list = []

for page in range(1, page_all + 1):
    # 发送请求
    url = f"https://www.dongchedi.com/community/{code}/hot-{page}"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
    }

    response = requests.get(url, headers=headers,proxies=proxies)
    html_content = response.text

    # 解析HTML内容
    soup = BeautifulSoup(html_content, 'html.parser')

    # 提取信息
    cards = soup.find_all('section', class_='community-card')

    for card in cards:
        news_item = {}

        # 提取用户名和用户主页链接
        user_info = card.find('a', href=True, title=True)
        if user_info:
            user_name = user_info['title']
            user_link = 'https://www.dongchedi.com' + user_info['href']
            news_item["username"] = user_name
            news_item["user_profile_link"] = user_link
            print(f"用户名: {user_name}")
            print(f"用户主页链接: {user_link}")

        # 提取内容
        content = card.find('p', class_='tw-relative')
        if content:
            news_item["content"] = content.text.strip()
            print(f"内容: {content.text.strip()}")

        # 提取时间戳
        timestamp = card.find('span', class_='tw-text-video-shallow-gray')
        if timestamp:
            news_item["timestamp"] = timestamp.text.strip()
            print(f"时间戳: {timestamp.text.strip()}")

        # 提取评论量
        comments = card.find('a', text=lambda text: text and "评论" in text)
        if comments:
            news_item["comments"] = comments.text.strip()
            print(f"评论量: {comments.text.strip()}")

        # 提取点赞量
        likes = card.find('button', text=lambda text: text and "点赞" in text)
        if likes:
            news_item["likes"] = likes.text.strip()
            print(f"点赞量: {likes.text.strip()}")

        # 将数据推送到 Scraper
        scraper.push_data(news_item)
        data_list.append(news_item)

        print("\n" + "-"*50 + "\n")


