from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import csv
from bs4 import BeautifulSoup
import re


# 解析商品信息
def get_product_info(driver, div, csv_writer):
    try:
        # 解析基础信息
        name = div.find("p", class_="name").get_text().strip()
        price = div.find("span", class_="search_now_price").get_text().strip()

        # 解析作者、出版社、出版日期
        isbn_info = div.find("p", class_="search_book_author")
        if isbn_info:
            spans = isbn_info.find_all("span")
            author = spans[0].find("a").get_text().strip().replace("/", "") if spans[0].find("a") else "未知"
            publisher = spans[2].find("a").get_text().strip().replace("/", "") if len(spans) > 2 else "未知"
            publish_date = spans[1].get_text().strip().replace("/", "") if len(spans) > 1 else "未知"
        else:
            author_elem = div.find("a", class_="search_book_author")
            author = author_elem.get_text().strip() if author_elem else "未知"
            publisher_elem = div.find_all("a", class_="search_book_author")[1] if len(
                div.find_all("a", class_="search_book_author")) > 1 else None
            publisher = publisher_elem.get_text().strip() if publisher_elem else "未知"
            publish_date_elem = div.find_all("span", class_="search_book_author")[1] if len(
                div.find_all("span", class_="search_book_author")) > 1 else None
            publish_date = publish_date_elem.get_text().strip() if publish_date_elem else "未知"

        # 获取商品链接
        link = div.find("p", class_="name").find("a").get("href", "")
        if link and not link.startswith("http"):
            link = "https:" + link

        # 打开新标签页获取评论信息
        driver.execute_script(f'''window.open("{link}","_blank");''')
        windows = driver.window_handles
        driver.switch_to.window(windows[-1])

        # 等待评论页面加载
        try:
            WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "li[id='comment_tab']")))
            remark_link = driver.find_element(By.CSS_SELECTOR, "li[id='comment_tab']")
            driver.execute_script("arguments[0].click();", remark_link)
            WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "span.on")))

            remark_page_html = driver.page_source
            remark_page_soup = BeautifulSoup(remark_page_html, "html.parser")

            # 获取评论数量
            comment_num = "0"
            comment_elem = remark_page_soup.find("span", class_='on')
            if comment_elem:
                comment_text = comment_elem.get_text()
                comment_num = re.search(r"（(\d+)）", comment_text).group(1) if comment_text else "0"

            # 获取好评、中评、差评数量
            good_comment = remark_page_soup.find("span", {"data-type": "2"})
            good_count = re.search(r"（(\d+)）", good_comment.get_text()).group(1) if good_comment else "0"

            common_comment = remark_page_soup.find("span", {"data-type": "3"})
            common_count = re.search(r"（(\d+)）", common_comment.get_text()).group(1) if common_comment else "0"

            bad_comment = remark_page_soup.find("span", {"data-type": "4"})
            bad_count = re.search(r"（(\d+)）", bad_comment.get_text()).group(1) if bad_comment else "0"

        except Exception as e:
            print(f"获取评论失败: {e}")
            comment_num = "0"
            good_count = "0"
            common_count = "0"
            bad_count = "0"

        finally:
            driver.close()
            driver.switch_to.window(windows[0])

        # 构造数据字典
        info = {
            "书名": name,
            "价格": price,
            "作者": author,
            "出版社": publisher,
            "出版年份": publish_date,
            "评论数量": comment_num,
            "好评数": good_count,
            "中评数": common_count,
            "差评数": bad_count,
        }

        # 写入 CSV
        csv_writer.writerow(info)
        csvfile.flush()  # 确保数据实时写入文件
        print(f"已保存: {name}")

    except Exception as e:
        print(f"解析商品失败: {e}")


if __name__ == "__main__":
    # Chrome 配置
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument("--disable-gpu")
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--window-size=1920,1080")
    chrome_options.add_argument(
        "user-agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'"
    )

    driver = webdriver.Chrome(options=chrome_options)
    keyword = "人工智能"
    total_pages = 5
    filename = f"{keyword}.csv"
    fields = ["书名", "价格", "作者", "出版社", "出版年份", "评论数量", "好评数", "中评数", "差评数"]

    try:
        with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
            csv_writer = csv.DictWriter(csvfile, fieldnames=fields)
            csv_writer.writeheader()

            for page_num in range(1, total_pages + 1):
                url = f"http://search.dangdang.com/?key={keyword}&act=input&page_index={page_num}"
                driver.get(url)
                WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "li[class^='line']")))

                soup = BeautifulSoup(driver.page_source, "html.parser")  # 确保缩进正确
                div_list = soup.find_all("li", class_=re.compile("line\d+"))

                for div in div_list:
                    get_product_info(driver, div, csv_writer)

                print(f"第 {page_num} 页爬取完成")

    finally:
        driver.quit()
        print(f"数据已保存到 {filename}")