from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import pandas as pd
import time
import random
import sys

def data_option(data, attribute):
    """提取元素属性并过滤空值"""
    return [item.get_attribute(attribute) for item in data if item.get_attribute(attribute)]

class BrowserMain:
    def __init__(self, search_key, max_pages=5):
        # 浏览器配置
        chrome_options = Options()
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_argument('--start-maximized')
        
        self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
        self.keyword = search_key
        self.max_pages = max_pages
        self.BASE_URL = f'https://search.bilibili.com/upuser?keyword={search_key}&from_source=webtop_search&spm_id_from=333.1007&search_source=5'
        self.page = 1
        self.o = 12
        self.user_data = {
            '用户名': [],
            '简介': [],
            '头像': []
        }

    def init_driver(self):
        """初始化浏览器"""
        print(f"正在打开页面: {self.BASE_URL}")
        self.driver.get(self.BASE_URL)
        # 等待关键元素加载
        WebDriverWait(self.driver, 10).until(
            EC.presence_of_element_located((By.CLASS_NAME, "b_text"))
        time.sleep(random.uniform(1, 3))  # 随机延迟

    def collect_data(self):
        """收集当前页面的数据"""
        print(f"正在收集第 {self.page} 页数据...")
        
        # 用户简介
        user_desc_ele = self.driver.find_elements(by=By.CLASS_NAME, value='b_text')
        self.user_data['简介'].extend(data_option(user_desc_ele, 'title'))
        
        # 用户用户名
        user_name_ele = self.driver.find_elements(by=By.CLASS_NAME, value='p_relative')
        self.user_data['用户名'].extend(data_option(user_name_ele, 'title'))
        
        # 用户头像
        user_avatar_ele = self.driver.find_elements(by=By.CLASS_NAME, value='bili-avatar-img')
        self.user_data['头像'].extend(data_option(user_avatar_ele, 'src'))
        
        print(f"当前已收集 {len(self.user_data['用户名'])} 条数据")

    def save_to_excel(self):
        """保存数据到Excel"""
        excel_path = './bilibili_up_data.xlsx'
        df = pd.DataFrame(self.user_data)
        
        try:
            existing_df = pd.read_excel(excel_path)
            combined_df = pd.concat([existing_df, df], ignore_index=True)
        except FileNotFoundError:
            combined_df = df
        
        combined_df.to_excel(excel_path, index=False, engine='openpyxl')
        print(f"数据已保存到 {excel_path}")

    def next_page(self):
        """跳转到下一页"""
        if self.page >= self.max_pages:
            print(f"已达到最大页数 {self.max_pages}，停止采集")
            return False
        
        # 模拟滚动
        for i in range(5):
            self.driver.execute_script(f"window.scrollTo(0, {(i+1)*300})")
            time.sleep(0.5)
        
        # 尝试点击下一页按钮
        try:
            next_btn = WebDriverWait(self.driver, 10).until(
                EC.element_to_be_clickable((By.XPATH, '//button[contains(@class, "vui_pagenation--btn-side")][2]')))
            next_btn.click()
            
            self.page += 1
            self.o += 12
            time.sleep(random.uniform(2, 4))  # 随机等待
            return True
        except Exception as e:
            print(f"翻页失败: {str(e)}")
            return False

    def run(self):
        """执行爬虫"""
        try:
            self.init_driver()
            
            while True:
                self.collect_data()
                if not self.next_page():
                    break
            
            self.save_to_excel()
            print("数据采集完成！")
            
        except Exception as e:
            print(f"程序运行出错: {str(e)}")
            self.save_to_excel()  # 出错时保存已收集的数据
        finally:
            self.driver.quit()

if __name__ == '__main__':
    if len(sys.argv) > 1:
        keyword = sys.argv[1]
    else:
        keyword = input("请输入要搜索的关键词: ")
    
    # 可设置最大爬取页数（默认5页）
    crawler = BrowserMain(keyword, max_pages=5)
    crawler.run()