from lxml import etree
import time
import gevent
from gevent import monkey
monkey.patch_all()
import requests

url = 'https://movie.douban.com/top250'
headers ={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
def fetch_page(url):
    response = requests.get(url, headers=headers)
    print(response.text)
    return response

def fetch_content(url):
    response = fetch_page(url)
    page = response.content
    return page

def parse(url):
    page = fetch_content(url)
    html = etree.HTML(page)
    # print('html',html)

    xpath_movie = '//*[@id="content"]/div/div[1]/ol/li'
    xpath_title = './/span[@class="title"]'
    xpath_pages = '//*[@id="content"]/div/div[1]/div[2]/a'

    pages = html.xpath(xpath_pages)
    fetch_list = []
    result = []

    for element_movie in html.xpath(xpath_movie):
        result.append(element_movie)

    for p in pages:
        fetch_list.append(url + p.get('href'))

    jobs = [gevent.spawn(fetch_content, url) for url in fetch_list]
    # jobs = []
    # for url in fetch_list:
    #     jobs.append(gevent.spawn(fetch_content, url))

    gevent.joinall(jobs)
    print('完成11')
    # var = [job.value for job in jobs]

    for page in [job.value for job in jobs]:
        html = etree.HTML(page)
        for element_movie in html.xpath(xpath_movie):
            result.append(element_movie)

    for i, movie in enumerate(result, 1):
        title = movie.find(xpath_title).text
        print(i, title)

if __name__ == '__main__':
    start = time.time()
    parse(url)
    end = time.time()
    print(end - start)
    print('完成')