import time
from io import StringIO
from selenium import webdriver
from selenium.webdriver.common.by import By
import pandas as pd
from bs4 import BeautifulSoup

# 打开“研报中心”页面
browser = webdriver.Chrome()
browser.maximize_window()
browser.get('https://data.eastmoney.com/report/')
time.sleep(2)

# 搜索指定股票
stock_code = '601633'
search_box = browser.find_element(By.CSS_SELECTOR, 'input.sinput.noieclear')
search_box.send_keys(stock_code)
time.sleep(1)
search_button = browser.find_element(By.CSS_SELECTOR, 'input.submit_new_btn')
search_button.click()

# 转到指定股票的研报明细页面
handles = browser.window_handles
browser.switch_to.window(handles[-1])

# 获取前两页的网页源代码
html_list = []
max_page = 2
for page in range(1, max_page + 1):
    time.sleep(3)
    html_list.append(browser.page_source)
    if page < max_page:
        next_page = browser.find_element(By.LINK_TEXT, '下一页')
        next_page.click()
browser.quit()

# 从网页源代码中提取数据
data_list = []
for html in html_list:
    # 使用read_html()函数提取数据表格
    table_list = pd.read_html(io=StringIO(html), attrs={'class': 'table-model'})
    data = table_list[0]
    # 使用BeautifulSoup模块提取研报详情页的网址
    soup = BeautifulSoup(html, 'lxml')
    a_elements = soup.select('table.table-model > tbody > tr > td:nth-child(2) > a')
    data['详情页'] = [a.get('href') for a in a_elements]
    data_list.append(data)

# 合并所有数据
df = pd.concat(objs=data_list, ignore_index=True)
# 对详情页的网址进行补全
df['详情页'] = 'https://data.eastmoney.com' + df['详情页']
# 导出数据
df.to_csv(f'研报数据_{stock_code}.csv', index=False, encoding='utf-8-sig')
