import time

from lxml import etree

import scrapy
from selenium import webdriver

class ScujwcSpider(scrapy.Spider):
    name = 'scujwc_spider'
    allowed_domains = ['jwc.scu.edu.cn']
    start_urls = ['https://jwc.scu.edu.cn/']

    def parse(self, response):
        # 教务处通知
        yield scrapy.Request('https://jwc.scu.edu.cn/tzgg.htm', callback=self.parse_jwctz)
        # 教务处新闻
        yield scrapy.Request('https://jwc.scu.edu.cn/xwbd/xwbd.htm', callback=self.parse_jwcxw)

    # 教务处通知
    def parse_jwctz(self,response):
        # 文章url
        urls = response.xpath('//ul[@class="list-d-list"]//li//a/@href').extract()
        for url in urls:
            if url[0:3] == '../':
                url = url[3:]
                print(url)
            yield scrapy.Request(self.start_urls[0]+url, callback=self.parse_article)
        # 下页
        nextUrl = response.xpath('//span[@class="p_next p_fun"]/a/@href').extract_first()
        if nextUrl == None:
            pass
        else:
            if nextUrl[0:5] == 'tzgg/':
                yield scrapy.Request(self.start_urls[0] + nextUrl, callback=self.parse_jwctz)
            else:
                yield scrapy.Request(self.start_urls[0] + 'tzgg/' + nextUrl, callback=self.parse_jwctz)


    # 教务处新闻
    def parse_jwcxw(self,response):
        # 文章url
        urls = response.xpath('//ul[@class="list-d-list"]//li//a/@href').extract()
        for url in urls:
            if url[0:3] == '../':
                url = url[3:]
                print(url)
            yield scrapy.Request(self.start_urls[0]+url, callback=self.parse_article)
        # 下页
        nextUrl = response.xpath('//span[@class="p_next p_fun"]/a/@href').extract_first()
        if nextUrl == None:
            pass
        else:
            if nextUrl[0:5] == 'xwbd/':
                yield scrapy.Request(self.start_urls[0] + nextUrl, callback=self.parse_jwcxw)
            else:
                yield scrapy.Request(self.start_urls[0] + 'xwbd/' + nextUrl, callback=self.parse_jwcxw)

    def parse_article(self,response):
        title = response.xpath('//h3[@class="page-title"]//text()').extract_first()
        date = response.xpath('//p[@class ="page-date"]/span[1]/text()').extract_first()[5:]
        contentArray = response.xpath('//div[@class="Section1"]//text()').extract()
        if len(contentArray) == 0:
            contentArray = response.xpath('//div[@class="page-content"]//text()').extract()
        content = "".join(contentArray)
        pic = response.xpath('//div[@class="page-content"]//img/@src').extract_first()
        if pic == None:
            pic = response.xpath('//div[@class ="page-content"]//img/@src').extract_first()
            if pic == None:
                pic = ""
            else:
                pic = self.start_urls[0] + pic
        else:
            pic = self.start_urls[0]+pic
        yield{
            'title':title,
            'date':date,
            'content':content,
            'pic':pic,
            'url':response.url,
            'source':'教务处'
        }








