# -*- coding: utf-8 -*-
import scrapy
from cnblog.items import CnblogItem
from scrapy import signals
from selenium import webdriver
from lxml import html
from bs4 import BeautifulSoup

class CnblogSpiderSpider(scrapy.Spider):
    name = "cnblog_spider_tjfxbg"
    allowed_domains = ["stjj.guizhou.gov.cn"]
    offset = 0
    csvUrls = []
    
    #读取gzstjj_tjfbyjd.csv第二列的url并组装成列表
    with open('gzstjj_tjfbyjd.csv', 'r', encoding='utf-8') as f:
        for line in f.readlines():
            if line.strip():
                url = line.split(',')[1].strip()
                csvUrls.append(url)    
    #csvUrls数组做去重处理
    csvUrls = list(set(csvUrls))
    #设置起始url    
    start_urls = [csvUrls[0]]


    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(CnblogSpiderSpider, cls).from_crawler(crawler, *args, **kwargs)
        spider.driver = webdriver.Chrome()
        crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
        return spider


    def spider_closed(self, spider):
        spider.logger.info('Spider closed: %s', spider.name)
        spider.driver.close()

    def parse(self, response):

   
       

        print("第{0}页爬取完成".format(self.offset))
        self.offset += 1
        #取csvUrls的下一个值作为下一页的url
        if self.offset < len(self.csvUrls):
            url2 = self.csvUrls[self.offset]
        else:
            print("所有页面爬取完成")
            return

        item = CnblogItem()

        title = response.xpath('//div[@id="NewsArticleTitle"]/text()').extract() 

        item['title'] = title   #使用xpath搜索
        item['type'] = response.xpath('//div[@id="NewsArticleType"]/text()').extract()
        item['source'] = response.xpath('//div[@id="NewsArticleSource"]/text()').extract()
        item['publish_date'] = response.xpath('//div[@id="NewsArticlePubDay"]/text()').extract()
        item['url'] = self.csvUrls[self.offset]
        #对response.body进行xpath搜索，提取p标签下面的文章内容
        #print("网页源码:" + str(response.body))
        # 假设这是你的网页内容，存储在一个字符串变量html_content中

        # 创建BeautifulSoup对象
        soup = BeautifulSoup(response.body.decode('utf-8'), 'html.parser')
        #print("网页源码:" + response.body.decode('utf-8'))
        # 查找包含正文内容的div标签
        main_content_div = soup.find('div', class_='TRS_UEDITOR')
        main_content_text = ''

        # 提取正文文本
        if main_content_div:
            main_content_text = main_content_div.get_text(strip=True)
            main_content_text = str(main_content_text).replace('未经同意请勿转载', '').replace('未经允许禁止转载','')
            print("正文内容:" + main_content_text)
        else:
            print("未找到正文内容")
        item['content'] = main_content_text
        # item['content'] = response.xpath('//table//p/text()').extract()
        
        yield item


        yield scrapy.Request(url=url2, callback=self.parse)

