import re
import requests
import os
import hashlib
import time
from concurrent.futures import ThreadPoolExecutor
p=ThreadPoolExecutor(30) #创建1个程池中，容纳线程个数为30个；

def get_index(url):
    respose = requests.get(url)
    if respose.status_code == 200:
        return respose.text


def parse_index(res):
    res = res.result()# 进程执行完毕后，得到1个对象
    urls = re.findall(r'class="items".*?href="(.*?)"', res ,re.S)
    # print(urls)
    for url in urls:
        # get_detail(url)
        p.submit(get_detail(url))# 获取详情页 提交到线程池

def get_detail(url):# 只下载一个对象
    if not url.startswith('http'):
        url = 'http://www.xiaohuar.com%s'%url
    result = requests.get(url)
    if result.status_code == 200:
        mp4_url_list = re.findall(r'id="media".*?src="(.*?)"',result.text, re.S)
        if mp4_url_list:
            mp4_url = mp4_url_list[0]

            save(mp4_url)

def save(url):
    video = requests.get(url)
    if video.status_code == 200:
        filename = os.path.basename(url)
        filepath = r'D:\\kk\\%s' %filename
        with open(filepath, 'wb') as f:
            f.write(video.content)
            print("下载完成：url="+url)

def main():
    for i in range(5):
        print('http://www.xiaohuar.com/list-3-%s.html'%i)
        p.submit(get_index,'http://www.xiaohuar.com/list-3-%s.html'%i).add_done_callback(parse_index)
        # res1 = get_index('http://www.xiaohuar.com/list-3-%s.html'%i)
        # parse_index(res1)

if __name__ == '__main__':
    main()