#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   unsi.py    
@Contact :   291622538@qq.com

@Modify Time      @Author    @Version    @Description
------------      -------    --------    -----------
2021/4/11 14:43   fan        1.0         None
"""
from abc import ABC
from scrapy import Spider, Request

from website.items import UniversityItem
import tldextract


class UniversityScrapy(Spider, ABC):
    # 爬虫名称
    name = "university_spider"
    # 专用的通道
    custom_settings = {
        'ITEM_PIPELINES': {'website.pipelines.SaveUniversityPipeline': 600},
    }

    # 爬虫入口
    def start_requests(self):
        URL = "https://www.universityrankings.ch/results/Times/2021?ranking=Times&year=2021&region=&q=&s=%s"

        for i in range(0, 1001, 50):
            print(i)

            # if i > 2:
            #     break

            # dont_filter 相同的url只请求一次： False
            yield Request(URL % i, callback=self.university_page_parse, dont_filter=False)

    def university_page_parse(self, response):
        # print(response)
        # //*[@id="RankingResults"]/tbody/tr/td[1]/div/span/text()
        rank_list = response.xpath('//*[@id="RankingResults"]/tbody/tr/td[1]/div/span/text()').extract()
        # 纠错
        # rank_list = response.xpath('//tbody/tr//span[@class="rank"]/text()').extract()
        rank_list = [x.replace(' ', '') for x in rank_list]
        # //*[@id="RankingResults"]/tbody/tr/td[2]/div/a/@href
        href_list = response.xpath('//tbody/tr/td/div/a/@href').extract()
        # print(rank_list)
        # print(href_list)
        for rank, href in zip(rank_list, href_list):
            item = UniversityItem()
            item["world_ranking"] = rank

            yield Request(response.urljoin(href), meta={"item": item}, callback=self.university_detailed_page_parse,
                          dont_filter=False)

    def university_detailed_page_parse(self, response):

        item = response.meta["item"]
        university_name = "".join(response.xpath('//h1/text()').extract())
        country = "".join(response.xpath('//*[@id="middle"]/div[1]/div[2]/div[1]/a[1]/text()').extract())
        url = "".join(response.xpath('//*[@id="middle"]/div[1]/div[2]/div[1]/a[2]/@href').extract())
        val = tldextract.extract(url)
        # val.suffix 表示顶级域名
        domain_name = val.domain + "." + val.suffix
        item["university_name"] = university_name
        item["country"] = country
        item["url"] = url
        item["domain_name"] = domain_name
        print(item)

        yield item
