from multiprocessing import Pool
import time

import requests
from lxml import etree

threads = []
thread_num = 10
download_pages = 0


def fetch(url):
    """请求页面链接返回页面内容"""
    r = requests.get(url)
    if r.status_code != 200:
        r.raise_for_status()
    global download_pages
    download_pages += 1
    return r.text


def parse_university(url):
    """解析大学链接获取详细信息"""
    s = etree.HTML(fetch(url))
    # 获取大学名称
    data = {'name': s.xpath('//div[@id="wikiContent"]/h1/text()')[0]}
    # 获取表格第一列
    table = s.xpath('//div[@id="wikiContent"]/div[@class="infobox"]/table')
    if not table:
        return
    table = table[0]
    keys = table.xpath('.//td[1]/p/text()')
    # 获取表格第二列，如果有多个p合并
    cols = table.xpath('.//td[2]')
    values = [''.join(col.xpath('.//text()')).replace('\t', '') for col in cols]
    if len(keys) != len(values):
        return
        # for i in range(len(keys)):
    #     data[keys[i]] = values[i]
    data.update(zip(keys, values))
    return data


def process_data(data):
    if data:
        print(data)


def download(url):
    data = parse_university(url)
    process_data(data)


if __name__ == '__main__':
    start_time = time.time()
    # 1、请求入口页面
    selector = etree.HTML(fetch('http://www.qianmu.org/ranking/902.htm'))
    # 2、解析并获取大学链接
    links = selector.xpath('//div[@class="rankItem"]/table//tr[position()>1]/td[2]/a/@href')

    # 3、解析大学链接获取表格数据

    pool = Pool(4)
    pool.map(download, links)
    pool.close()
    pool.join()
    cost_time = (time.time() - start_time) / 1000
    print('download %s pages, cost %s seconds' % (download_pages, cost_time))
