import random

import scrapy
import requests
from lxml import etree
import re
from school.items import SchoolItem


class JiangsuSpider(scrapy.Spider):
    name = "jiangsu"
    # allowed_domains = ["www.school.com"]
    start_urls = ["https://news.jiangnan.edu.cn/yw.htm"]
    # url通用模板
    url = "https://news.jiangnan.edu.cn/yw/%d.htm"
    # 页码号
    Page_num = 281

    def parse(self, response):
        new_list = response.xpath("//*[@class='listpage_list']//dt")
        for dt in new_list:
            # 标题
            title = dt.xpath("./a/text()").extract()[0]
            # 取日期
            data = dt.xpath("./span/text()").extract()[0]
            # 取链接
            url = dt.xpath("./a/@href").extract()
            # 拼接完整的url
            handle_url = "https://news.jiangnan.edu.cn/"+url[0]
            # UA伪装
            headers = {
                "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
            }
            page_text = requests.get(url=handle_url,headers=headers).content
            html = etree.HTML(page_text)
            # 此篇文章的内容
            new_content = html.xpath("//*[@class='v_news_content']//text()")
            content = ''.join(new_content)
            # 来源字段获取
            Provenance = html.xpath("//*[@class='con_title']/span/text()")
            match = re.search(r'来源：([^ ]*)', Provenance[0])
            source = match.group(1)

            item = SchoolItem()
            item["school"] = "江南大学"
            item["Col"] = "综合新闻"
            item["Heat"] = random.randint(500, 1000)
            item["FWLCount"] = random.randint(100, 2000)

            item["Time"] = data
            item["URL"] = handle_url
            item["Title"] = title
            item["Text"] = content
            item["Provenance"] = source
            yield item

        if self.Page_num >= 280:
            new_url = format(self.url%self.Page_num)
            self.Page_num -= 1
            print("下一页："+new_url)
            yield scrapy.Request(url=new_url,callback=self.parse)

