import scrapy
from HFUUScrapy.items import HfuuscrapyItem
from fake_headers import Headers




class HfuuspiderSpider(scrapy.Spider):
    name = 'HFUUSpider'
    allowed_domains = ['hfuu.edu.cn']
    # start_urls = ['http://hfuu.edu.cn/']
    url_format = 'http://www.hfuu.edu.cn/4155/list{}.htm'

    header = Headers(
        browser="chrome",  # Generate only Chrome UA
        os="win",  # Generate ony Windows platform
        headers=True  # generate misc headers
    )

    def start_requests(self):
        for i in range(1, 43):
            url = self.url_format.format(i)
            yield scrapy.Request(url=url, callback=self.parse, headers=self.header.generate())

    def parse(self, response):
        aList = response.xpath("//ul[@class='wp_article_list']/li/div/span/a")
        for a in aList:
            href = a.attrib.get("href")
            url = response.urljoin(href)
            # print(url)
            yield scrapy.Request(url=url, callback=self.parse2, headers=self.header.generate())

    def parse2(self, response):
        title = response.xpath("//h1[@class='arti_title']/text()")[0].get()
        arti_publisher = response.xpath("//span[@class='arti_publisher']/text()").get().split("：")[1]
        arti_update = response.xpath("//span[@class='arti_update']/text()").get().split("：")[1]
        arti_content = response.xpath("//div[@class='wp_articlecontent']")[0].xpath("string(.)").get().strip()
        hfuuItem = HfuuscrapyItem()
        hfuuItem['title'] = title
        hfuuItem['arti_publisher'] = arti_publisher
        hfuuItem['arti_update'] = arti_update
        hfuuItem['arti_content'] = arti_content
        yield hfuuItem
