import scrapy
from fake_headers import Headers
from MasuScrapy.items import *


class MasuspiderSpider(scrapy.Spider):
    name = 'MasuSpider'
    allowed_domains = ['masu.edu.cn']
    url_format = 'http://masu.edu.cn/338/list%d.htm'

    fake_headers = Headers(
        os="mac",
        browser="chrome",
        headers=True
    )

    def start_requests(self):
        for i in range(1, 21):
            url = self.url_format % i
            yield scrapy.Request(url=url, callback=self.parse, headers=self.fake_headers.generate())

    def parse(self, response):
        liList = response.xpath("//ul[@class='cols_list clearfix']/li")
        for li in liList:
            a = li.xpath("span/a")
            title = a.xpath("text()").get()
            report_link = a.attrib.get("href")
            report_date = li.xpath("span[@class='cols_meta']/text()").get()
            masuRC = MasuScrapyReportCategory()
            masuRC["title"] = title
            masuRC["report_link"] = response.urljoin(report_link)
            masuRC["report_date"] = report_date
            # yield masuRC
            if report_link.endswith(".htm"):
                yield scrapy.Request(url=response.urljoin(report_link), headers=self.fake_headers.generate(),
                                     callback=self.reportParse)

    def reportParse(self, response):
        article = response.xpath("//div[@class='article']")
        # 非校内地址有写网页可能无法获取
        if len(article) > 0:
            content_div = article[0]
            title = content_div.xpath("h1/text()").get()
            report_date = content_div.xpath("p/span[@class='arti-update']/text()").get()
            report_source = content_div.xpath("p/span[@class='arti-info']/text()").get()
            report_views = content_div.xpath("p/span[@class='arti-views']/span[@class='WP_VisitCount']/text()").get()
            content = content_div.xpath("string(div[@class='entry'])").get()

            masuSR = MasuScrapyReportArrticle()
            masuSR["title"] = title.replace(u'\u200b', u'')
            masuSR["report_date"] = report_date.split("：")[1]
            masuSR["report_source"] = report_source.split("：")[1]
            masuSR["report_views"] = report_views
            masuSR["content"] = content.replace(u'\xa0', u' ').replace(u'\u200b', u'')
            yield masuSR
