import scrapy
from bs4 import BeautifulSoup
from mySpider.items import MyspiderItem
import time
import random


def remove_html_tags(text):
    soup = BeautifulSoup(text, 'html.parser')
    return soup.get_text()  # 提取文本内容，去除HTML标签


class MySpider(scrapy.Spider):
    name = 'mySpider'
    start_urls = ['https://www.invt.com.cn/news-detail-3179-52.html']

    def parse(self, response):
        title = remove_html_tags(response.xpath("/html/body/div[4]/div[1]/div/div[1]/h3").get())
        detail = remove_html_tags(response.xpath("/html/body/div[4]/div[1]/div/div[2]").get())
        news = title+detail
        print(news)
        item = MyspiderItem()  # 创建一个Item实例
        item['news'] = news  # 将数据赋值给Item
        yield item  # 产出Item以供Pipeline处理

        # 定位到包含“下一篇”链接的<div>元素
        next_page_div = response.css('div.page-to-next.page-to-btn')

        # 从<div>元素中提取<a>标签的href属性
        next_page_url = next_page_div.css('a::attr(href)').get()

        if next_page_url:
            # 如果找到了“下一篇”的链接，就构造一个绝对的URL
            absolute_next_page_url = response.urljoin(next_page_url)

            delay = random.uniform(1, 60)
            time.sleep(delay)

            # 发起对“下一篇”页面的请求
            yield scrapy.Request(absolute_next_page_url, callback=self.parse)

