import requests, lxml.html, csv, multiprocessing


etree = lxml.html.etree
url = 'https://movie.douban.com/top250?start={}&filter='

def start(url):
    print('正在抓取: ' + url)
    response = requests.get(url)
    response.encoding = 'utf-8'
    html = etree.HTML(response.text)
    hrefs = html.xpath('//div[@class="hd"]/a/@href')
    movies_name = html.xpath('//div[@class="hd"]/a/span[1]/text()')
    score = html.xpath('//div[@class="bd"]//span[2]/text()')
    with open('./douban_top\\' + '3-6.csv', 'a', encoding='utf-8') as f:
            w = csv.DictWriter(f, fieldnames=['电影名称', '电影地址', '电影评分'])
            w.writeheader()
            for movie_name, movie_url, movie_score in zip(movies_name, hrefs, score):
                print('正在保存' + movie_name)
                w.writerow({'电影名称': movie_name, '电影地址': movie_url, '电影评分': movie_score})
            print('保存成功')


if __name__ == '__main__':
    # pool = multiprocessing.Pool()
    # pool.map(start, [url.format(i * 25) for i in range(10)])
    for i in range(10):
        start(url.format(i * 25))
