from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import time
import pandas as pd

# 网页加载延迟时间, 网络缓慢可以增大该数值
DELAY_SEC = 1
# 爬取年份区间
START_YEAR = 2018
END_YEAR = 2023


def generate_quarter_dates(start_year=START_YEAR, end_year=END_YEAR):
    """生成指定年份范围内的季度日期列表"""
    quarters = ['03-31', '06-30', '09-30', '12-31']
    return [f"{year}-{quarter}" for year in range(start_year, end_year + 1) for quarter in quarters]


def setup_driver(url):
    """初始化WebDriver并打开目标网页"""
    driver = webdriver.Chrome()
    driver.get(url)
    return driver


def select_industry(driver, industry_name):
    """选择指定的行业"""
    industry_button = driver.find_element(By.XPATH, f'//*[@id="filter_hy"]/ul/li[@data-value="{industry_name}"]')
    industry_button.click()


def scrape_data(driver, dates):
    """根据指定日期列表抓取数据"""
    all_data = []
    select_element = driver.find_element(By.XPATH, f'//*[@id="filter_date"]')
    select = Select(select_element)

    for date in dates:
        select.select_by_value(date)
        time.sleep(DELAY_SEC)
        print(f"抓取{date.split('-')[0]}年第{int(date.split('-')[1]) // 3}季度的利润表数据...")

        page_buttons = driver.find_elements(By.XPATH, '//*[@id="dataview"]/div[3]/div[1]/a')
        page_num = len(page_buttons)

        input_element = driver.find_element(By.XPATH, '//*[@id="gotopageindex"]')
        submit_button = driver.find_element(By.XPATH, '//*[@id="dataview"]/div[3]/div[2]/form/input[2]')
        for i in range(1, page_num):
            input_element.clear()
            input_element.send_keys(str(i))
            submit_button.click()
            time.sleep(DELAY_SEC)

            data_table = driver.find_elements(By.XPATH, '//*[@id="dataview"]/div[2]/div[2]/table/tbody/tr')
            for tr in data_table:
                tds = tr.find_elements(By.TAG_NAME, 'td')
                row_data = [td.text for td in tds]
                all_data.append(row_data)

    return all_data


def save_to_excel(header, data, output_file='汽车零部件行业利润表.xlsx'):
    """将抓取的数据保存到Excel文件"""

    df = pd.DataFrame(data, columns=header)
    df.to_excel(output_file, index=False)
    print(f"数据已保存到 {output_file}")


def get_table_header(driver):
    """获取表头文字"""
    # 等待表格加载
    driver.implicitly_wait(1)

    # 查找表头元素
    top_table_headers = driver.find_elements(By.XPATH, '//*[@id="dataview"]/div[2]/div[2]/table/thead/tr[1]/th/div')
    bottom_table_headers = driver.find_elements(By.XPATH, '//*[@id="dataview"]/div[2]/div[2]/table/thead/tr[2]/th/div')

    # 提取表头文字，处理换行符
    headers1 = [txt.text.replace("\n", '') for txt in top_table_headers]
    headers2 = [txt.text.replace("\n", '') for txt in bottom_table_headers]

    # 合并表头
    headers = headers1[:8] + headers2 + headers1[8:]
    headers.remove('营业总支出')

    return headers


if __name__ == "__main__":
    url = "https://data.eastmoney.com/bbsj/201803/lrb.html"
    industry_name = '汽车零部件'
    dates = generate_quarter_dates()

    driver = setup_driver(url)

    try:
        select_industry(driver, industry_name)
        all_data = scrape_data(driver, dates)
        headers = get_table_header(driver)
    finally:
        driver.quit()

    save_to_excel(headers, all_data)
