import re
import requests
import hashlib
import time
import random
from concurrent.futures import ThreadPoolExecutor
p=ThreadPoolExecutor(30)

def get_index(url):
    respose = requests.get(url)
    if respose.status_code==200:
        return respose.text
    else:
        return None

def parse_index(res):
    urls = re.findall(r'<a .*?href="/view(.*?)"', res,re.S)  # re.S 把文本信息转换成1行匹配
    for url in urls:
        get_detail(url)  #获取详情页 提交到线程池


urlDicts = {}

def get_detail(url):
    url = r"http://www.369ef.com/view%s"%url
    try:
        result = requests.get(url)
    except TimeoutError:
        print(r"%s：失败"%url)
    except BaseException:
        print(r"%s：失败"%url)
    else:
        if result.status_code==200 :
            mp4_url_list = re.findall(r'.*?f:"(.*?)"', result.text, re.S)
            if mp4_url_list:
                mp4_url=mp4_url_list[0]
                if urlDicts.get(mp4_url) != 1:
                    urlDicts[mp4_url] = 1
                    # print(mp4_url)
                    save(mp4_url)

def save(url):
    try:
        video = requests.get(url)
    except BaseException:
        print(r"%s：失败"%url)
    else:
        if video.status_code==200:
            print(r"%s：处理成功"%url)
            nameM=re.findall(r'\/(.+?)\.',url)

            filename=r'%s.mp4'% nameM[len(nameM) -1]
            filepath=r'downloads/%s'%filename
            with open(filepath, 'wb') as f:
                f.write(video.content)

def runOne(url):
        res1 = get_index(url)
        if res1 is None:
            print(r"异常链接:%s"%url)
        else:
            parse_index(res1)

def main():
    for i in range(2,103):
        url = r'http://www.369ef.com/list/8_%d.html'%i
        p.submit(runOne, url)


if __name__ == '__main__':
    main()