import scrapy

from qianmu.items import UniversityItem


class UsnewsSpider(scrapy.Spider):
    name = 'usnews'
    # 允许爬取的域名
    allowed_domains = ['www.qianmu.org']
    # 爬取起始url
    start_urls = ['http://www.qianmu.org/ranking/1528.htm']

    # 当框架请求start_urls成功时，自动调用该方法
    def parse(self, response):
        # 提取链接
        links = response.xpath('//div[@class="rankItem"]/table//tr[position()>1]/td[2]/a/@href').getall()

        # 3、解析大学链接获取表格数据
        for link in links:
            yield response.follow(link, self.parse_university)

    def parse_university(self, response):
        """解析大学链接获取详细信息"""
        # 解析并获取获取大学名称
        item = UniversityItem()
        data = {}
        item['name'] = response.xpath('//div[@id="wikiContent"]/h1/text()').get()
        # 获取表格第一列
        table = response.xpath('//div[@id="wikiContent"]/div[@class="infobox"]/table')
        if table:
            table = table[0]
            keys = table.xpath('.//td[1]/p/text()').getall()
            # 获取表格第二列，如果有多个p合并
            cols = table.xpath('.//td[2]')
            values = [''.join((col.xpath('.//text()').getall())).replace('\t', '') for col in cols]
            if len(keys) == len(values):
                data.update(zip(keys, values))
                item['rank'] = data.get('排名')
                item['country'] = data.get('国家')
                item['state'] = data.get('州省')
                item['city'] = data.get('城市')
                item['undergraduate_count'] = data.get('本科生人数')
                item['postgraduate_count'] = data.get('研究生人数')
                item['website'] = data.get('网址')
        yield item
