import math
import time
from calendar import month

import pymysql
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver import Keys
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import chardet  # 自动检查编码，防止乱码
from selenium.webdriver.common.by import By

print("新华网数据爬虫抓取")
# 打开新华网搜索
# 指定chromedriver的本地路径
chrome_driver_path = "D:/ChromeDriver/chromedriver-win64/chromedriver.exe"  # 请替换为你的chromedriver实际路径
service = ChromeService(executable_path=chrome_driver_path)
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(service=service, options=options)
count = 0#计数
# 解析网页数据
def soupXml(page_source):
    print("准备解析列表数据")
    soup = BeautifulSoup(page_source, 'lxml')
    item_ele = soup.find_all("div", class_="item")
    # current_page = soup.find("li", class_="ant-pagination-item-active").find("a").get_text()
    # print(f"正在解析第{current_page}页数据")
    for item in item_ele:
        title_ele = item.find("div", class_="title")
        link = title_ele.a.get("href")
        yield soupDetailsxml(link)

    print("开始执行下一页")
    # 将元素滚动到视图中
    next_button = WebDriverWait(driver, 30).until((
        EC.presence_of_element_located((By.CLASS_NAME, "ant-pagination-next"))
    ))
    driver.execute_script("arguments[0].scrollIntoView(true);", next_button)
    WebDriverWait(driver, 30).until((
        EC.element_to_be_clickable((By.CLASS_NAME, "ant-pagination-next"))
    ))
    if next_button is not None:
        print("准备跳转下一页")
        # driver.execute_script("arguments[0].click();", next_button)
        next_button.click()
        print("跳转下一页成功")
        WebDriverWait(driver, 30).until(
            EC.staleness_of(driver.find_element(By.CLASS_NAME, "ant-spin-dot"))
        )
        print("下一页抓取完成")
        yield from soupXml(driver.page_source)  # 使用 yield from 来递归生成
    else:
        print("没有下一页了")

# 解析新闻详情数据
def soupDetailsxml(url):
    print("正在解析新闻详情数据"+url)
    response = requests.get(url)
    encoding = chardet.detect(response.content)['encoding']
    response.encoding = encoding
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'lxml')
        try:
            year = soup.find("span", class_="year").find("em").get_text()
            day_date = soup.find("span", class_="day").get_text() #02/24
            month = day_date.split("/")[0]
            day = day_date.split("/")[1]
            #当前链接前缀
            img_link_prefix = url.rsplit("/", 1)[0]
            img_tags = soup.find("span", id="detailContent").find_all("img")
            if img_tags is not None:
                img_srcs = ','.join(img_link_prefix+"/"+img.get("src") for img in img_tags)
            else:
                img_srcs = ""

            return {
                "title": soup.find("span", class_="title").get_text(),
                "source": soup.find("div", class_="source").get_text().strip().split("：")[1],
                "link":url,
                "time": f"{year}年{month}月{day}日",
                "content": soup.find("span", id="detailContent").get_text(),
                "img": img_srcs
            }
        except Exception as e:
            print(f"解析新闻详情数据失败:{e}")
            return {}
    else:
        print('请求失败')
        return {}


# 连接数据库
def mysqlConnect():
    print("正在连接数据库")
    conn = pymysql.connect(
        host='127.0.0.1',
        port=3306,
        user='root',
        password='123456',
        db='runboot',
        charset='utf8mb4'
    )
    cursor = conn.cursor()
    # 创建表news
    cursor.execute("CREATE TABLE IF NOT EXISTS news("
                   "id INT AUTO_INCREMENT PRIMARY KEY, "
                   "title VARCHAR(255), "
                   "source VARCHAR(255), "
                   "link VARCHAR(255), "
                   "time VARCHAR(255), "
                   "content LONGTEXT, "
                   "img TEXT)"
                   )
    conn.commit()
    print("数据库连接成功")
    return conn


# 保存数据到数据库
def saveDataToMysql(data):
    global count
    for item in data:
        # if item and item["img"]:
        try:
            count += 1
            print(f"正在保存第{count}条数据")
            # 截断 content 数据，确保其长度不超过数据库中 content 列的最大长度
            max_content_length = 4294967295  # LONGTEXT 类型的最大长度
            content = item["content"][:max_content_length]
            conn.cursor().execute("INSERT INTO news(title, source,link,time,content,img) VALUES(%s, %s, %s, %s, %s, %s)",
                                  (item['title'], item["source"], item["link"], item["time"], content, item["img"]))
            conn.commit()
        except Exception as e:
            print(f"保存数据失败:{e}")
    else:
        conn.close()

try:
    # 打开新华网搜索页
    #连接数据库
    conn = mysqlConnect()
    print("正在打开新华网搜索页")
    driver.get("https://so.news.cn")
    driver.set_window_size(1920, 1080)

    # 查找搜索框元素
    input_ele = driver.find_element(By.CLASS_NAME, "input")
    input_ele.send_keys("加勒比")
    input_ele.send_keys(Keys.RETURN)
    # 等待搜索结果加载
    # 等待加载10秒钟，直到id是content_left的元素出现
    WebDriverWait(driver, 30).until(
        EC.presence_of_element_located((By.CLASS_NAME, "item"))
    )

    print(driver.title)

    cursor = conn.cursor()
    cursor.execute("select count(*) from news")
    total = cursor.fetchone()[0]
    print(f"数据库中已有数据:{total}条")
    if total > 0:
        count = total
        jumpPage = math.ceil(total / 10)+1
        print(f"将要跳转到第几页：{jumpPage}")
        jump_input = (driver.find_element(By.CLASS_NAME, "ant-pagination-options-quick-jumper")
                      .find_elements("tag name", "input"))[0]
        jump_input.send_keys(str(jumpPage))
        jump_input.send_keys(Keys.RETURN)
        WebDriverWait(driver, 30).until(
            EC.staleness_of(driver.find_element(By.CLASS_NAME, "ant-spin-dot"))
        )
    # 隐式等待
    # driver.implicitly_wait(30)
    # 解析列表数据

    # 解析数据
    data = soupXml(driver.page_source)  # 将生成器转换为列表
    # 保存数据
    saveDataToMysql(data)

except Exception as e:
    print(e)
finally:
    print("程序结束")
    # driver.quit()
    pass
