from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd

# 常量定义
URL = 'https://listxbrl.sse.com.cn/'
STOCK_CODE = '603949'
DELAY_SEC = 2
TABLE_IDS = ['zcfzb', 'lrb', 'xjllb']
TABLE_NAMES = {
    'zcfzb': '资产负债表',
    'lrb': '利润表',
    'xjllb': '现金流量表'
}


# 初始化WebDriver并打开目标网页
def setup_driver(url):
    driver = webdriver.Chrome()
    driver.get(url)
    time.sleep(DELAY_SEC)
    return driver


# 抓取数据
def scrape_data(driver):
    all_data = []
    try:
        # 动态等待，直到找到表格的 tbody 元素
        wait = WebDriverWait(driver, 10)
        tbody = wait.until(EC.presence_of_element_located(
            (By.XPATH, '/html/body/div[5]/div/div[1]/div/div/div[2]/div[2]/table/tbody')))

        # 获取所有行
        rows = tbody.find_elements(By.TAG_NAME, 'tr')

        for row in rows:
            cells = row.find_elements(By.TAG_NAME, 'td')
            row_data = []
            for cell in cells:
                divs = cell.find_elements(By.TAG_NAME, 'div')
                if divs:
                    row_data.append(divs[0].text.strip())
                else:
                    row_data.append('')
            all_data.append(row_data)
    except Exception as e:
        print(f"Error scraping data: {e}")
    return all_data


# 保存数据到Excel文件
def save_to_excel(data, output_file):
    df = pd.DataFrame(data)
    df.to_excel(output_file, index=False, header=False)


# 主函数
def main():
    driver = setup_driver(URL)

    # 输入股票代码并搜索
    stock_code_input = driver.find_element(By.XPATH, '//*[@id="sel"]')
    stock_code_input.clear()
    stock_code_input.send_keys(STOCK_CODE)
    search_button = driver.find_element(By.XPATH, '//*[@id="search"]')
    search_button.click()
    time.sleep(DELAY_SEC)

    # 遍历不同的报表类型
    for table_id in TABLE_IDS:
        # 切换到指定的报表
        table = driver.find_element(By.XPATH, f'//*[@id="{table_id}"]')
        table.click()
        time.sleep(DELAY_SEC)

        # 抓取数据
        print(f'爬取雪龙集团{TABLE_NAMES[table_id]}数据...')
        data = scrape_data(driver)

        # 保存数据到Excel文件
        output_file = f"雪龙集团{TABLE_NAMES[table_id]}.xlsx"
        save_to_excel(data, output_file)
        print(f"数据已保存到 {output_file}")

    # 关闭浏览器
    driver.quit()


if __name__ == "__main__":
    main()
