import scrapy
import os
from fake_headers import Headers


header = Headers(
        browser="chrome",  # Generate only Chrome UA
        os="win",  # Generate ony Windows platform
        headers=True  # generate misc headers
    )


class ABCSpider(scrapy.Spider):
    name = "ABCSpider"
    allowed_domains = ["www.abc.edu.cn"]

    headers = header.generate()

    def start_requests(self):
        urlList = ['https://www.abc.edu.cn/xwb/4.html']
        urlFormat = 'https://www.abc.edu.cn/xwb/4_{}.html'
        for i in range(2, 70):
            urlList.append(urlFormat.format(i))
        page = 0
        for url in urlList:
            page += 1
            path = "output/{}".format(page)
            if not os.path.exists(path):
                os.mkdir(path)
            yield scrapy.Request(url=url, callback=self.parse, meta={"page": page}, headers=self.headers)

    def parse(self, response):
        page = response.meta['page']
        liList = response.xpath("//ul[@class='newslist']/li")
        for li in liList:
            a = li.xpath("div/div/a")
            link = a.attrib.get("href")
            url = response.urljoin(link)
            yield scrapy.Request(url=url, callback=self.parseReport, meta={"page": page}, headers=self.headers)

    def parseReport(self, response):
        page = response.meta['page']
        url = response.url
        attr = url.split("abc.edu.cn/")[1].split("/")[0]
        if 'xxy' == attr or 'zhaoshengxx' == attr:
            articleDiv = response.xpath("//div[@class='article']")[0]
            header = articleDiv.xpath("h1")[0].xpath('string(.)').get()
            article = articleDiv.xpath("div[@class='section']")[0].xpath("string(.)").get()
        elif 'jwc' == attr:
            articleDiv = response.xpath("//div[@class='service-content']")[0]
            header = articleDiv.xpath("h3/text()").get()
            article = articleDiv.xpath("string(.)").get()
        elif 'jiuye' == attr:
            header = response.xpath("//div[@class='title']/h3/text()").get()
            article = response.xpath("//div[@id='xwcontentdisplay']")[0].xpath("string(.)").get()
        elif 'imc' == attr:
            # 信息管理中心
            header = response.xpath("//h3[@class='education-title']/text()").get()
            article = response.xpath("//div[@class='education-content']")[0].xpath("string(.)").get()
        elif 'tuanwei' == attr:
            # 团委
            header = response.xpath("//div[@id='title']/text()").get()
            article = response.xpath("//div[@id='newscontent']")[0].xpath("string(.)").get()
        else:
            print("*"*20)
            print(url)
            print("*"*20)
        with open(r"output/{}/{}".format(page, header.replace("/", "、")), mode="w", encoding='utf-8') as f:
            f.write(article.replace(u'\xa0', u' ').strip())
