import scrapy
from scrapy.http import Request
from urllib import parse

from SearchEngines.sites.IT.ITItem import ITItem, ITItemLoader

from SearchEngines.utils.common import get_md5


class ITSpider(scrapy.Spider):
    name = "IT"
    allowed_domains = ["cec.jmu.edu.cn"]
    start_urls = ['http://cec.jmu.edu.cn/']
    


    def parse(self, response):
        """
        1. 获取文章列表页中的文章url交给scrapy下载并进行解析
        2. 获取下一页的url并交给scrapy进行下载,  下载完成后交给parse
        """
        # 解析列表页中的所有文章url并交给scrapy下载后并进行解析
        
        #post_urls = response.css("a::attr(href)").extract()
        post_urls = response.xpath("//@href").extract()
        for post_url in post_urls:
            #request下载完成之后，回调parse_content进行文章详情页的解析
            # Request(url=post_url,callback=self.parse_detail)
            #print(response.url)
##        url= response.url
##        print(url)
##        url_object_id=get_md5(response.url)
##        print(url_object_id)
##        create_date=response.xpath('//span[@class="timestyle124904"]/text()').extract()[0].strip().replace(".","").strip()
##        print(create_date)
##        content=response.xpath('//td[@class="contentstyle124904"]').extract()[0]
##        print(content)
            #print(post_url)
            url=parse.urljoin(response.url,post_url)
            #print(url)
            if url.startswith("http://cec.jmu.edu.cn/info"):
                print(url)
                yield Request(url,callback=self.parse_content)
                print("222")

    @staticmethod
    def parse_content(response):
        print("11111")
        IT_item = ITItem()
        # 通过item loader加载item
        item_loader = ITItemLoader(item=IT_item, response=response)

        # 将后面的指定规则进行解析。
        item_loader.add_xpath("title", "//td[@class='titlestyle124904']/span/text()")
        print("3333")
              
        item_loader.add_value("url", response.url)
        print("4444")
        item_loader.add_value("url_object_id", get_md5(response.url))
        print("5555")
##        item_loader.add_xpath("create_date", "//td[@class='timestyle124904]/span/text()")
        print("666")
##        item_loader.add_value("front_image_url", [front_image_url])
        item_loader.add_xpath("content", "//td[@class='contentstyle124904']")
        print("777")

        # 调用这个方法来对规则进行解析生成item对象
        IT_item = item_loader.load_item()
        print("888")
        # 已经填充好了值调用yield传输至pipeline
        yield IT_item

