import scrapy


class FynuscrapySpider(scrapy.Spider):
    name = 'FynuScrapy'
    # allowed_domains = ['www.fynu.edu.cn/']
    url_format = "http://www.fynu.edu.cn/ch2009/article/tzgg/list_2_{}.html"
    start_urls = []
    for i in range(1, 4):
        # [ http://www.fynu.edu.cn/ch2009/article/tzgg/list_2_1.html,
        #  http://www.fynu.edu.cn/ch2009/article/tzgg/list_2_2.html,
        #  http://www.fynu.edu.cn/ch2009/article/tzgg/list_2_3.html,
        #  http://www.fynu.edu.cn/ch2009/article/tzgg/list_2_4.html
        # ]
        start_urls.append(url_format.format(i))
    # 自定义请求头，让我们的爬虫程序 看起来更像是人在操作
    header = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Cookie": "ASPSESSIONIDAASQQCAD=BBGAKGCCOEIJLBHDHHAFEFEC; PHPSESSID=e4ea6ad2cad2f8742ecc233011dab3be",
        "Host": "www.fynu.edu.cn",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36"
    }

    def parse(self, response):
        # 第一步：通过分析html页面，取出我们需要的a标签里的 一个链接
        alist = response.xpath("//div[@class='cat_box_1']/table[last()]/tr/td/a")
        for eachA in alist:
            title = eachA.attrib.get("title")
            link = response.urljoin(eachA.attrib.get("href"))
            # 第二步：发起一个新的请求，将我们获取到的a标签的 链接 作为请求的url
            # 过滤掉 http://jyzd.fynu.edu.cn/
            if not link.startswith("http://jyzd.fynu.edu.cn/"):
                yield scrapy.Request(url=link, callback=self.newParse, headers=self.header)

    def newParse(self, response):
        # 取每篇公告的 标题 作为文件名
        title = response.xpath("//h1/font[@color='#FF6600']/text()").get()
        if title is not None:
            with open(r"C:\Users\zzk10\JingDong\JingDong\data\{}.txt".format(title.strip()), mode="w",
                      encoding="utf-8") as f1:
                divList = response.xpath("//div[@class='content_text']")
                for eachDivList in divList:
                    # replace 过滤掉 网页中的空格
                    f1.write(eachDivList.xpath("string(.)").get().replace(u'\xa0', u' '))
