# 实战（一）之使用自带urllib和re正则表达式获取电影详情页链接
from urllib import request
import re
import random
import time

# 构造一个模仿浏览器的opener
base_url = 'https://www.ygdy8.com'

# headers = {
#     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
#     'Connection':'keep-alive'
# }
user_agents = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                              '(KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'), ]
headers = [random.choice(user_agents), ("Connection","keep-alive")]
# 构建opener
opener = request.build_opener()
# 添加请求头
opener.addheaders = headers

# 1、获取最大的页码数
# 2、你可以到页面查看最大页码数
def get_pageNum():
    url = 'https://www.ygdy8.com/html/gndy/dyzz/index.html'
    bytes_data = opener.open(url).read()
    data = str(bytes_data)
    # print(type(data))  # str
    result = re.findall(r'list_23_(\d*)', data)
    # print(type(result)) # list
    # print(result)
    if result:
        max_page = result[-1]
        if max_page.isdigit():
            print('最大的页码数：',max_page)
            return int(max_page)
        else:
            return 0
    else:
        return 0


# 根据页数爬取每页的电影列表
def find_by_page(page=1):
    if page%10 == 0:
        # 每个10页进行延时，防止ip被禁止
        time.sleep(random.randint(1,3))
    url = 'https://www.ygdy8.com/html/gndy/dyzz/list_23_{}.html'.format(str(page))
    bytes_data = opener.open(url).read()
    data = str(bytes_data)
    result = re.findall(r'class="co_content8"[\s\S]*</ul>',data)[0]
    result = re.findall(r'/html/gndy/dyzz/\d{8}/\d{5}\.html',str(result))
    urls = []
    for url in result:
        information_url = base_url + url
        urls.append(information_url)
    print('爬取第%s页：'% page)
    for i in range(len(urls)):
        s = '爬取第%s页，第%s条：'% (page,i+1)+ urls[i]
        print(s)
    return urls


# 汇总所有的列表，上面的函数是每页返回一个电影详情页链接列表
def get_urls(pages=1):
    all_url = []  # [[],[]]  列表里面嵌套着列表，每一个列表里面保存一页的数据
    for page in range(1, pages+1):
        for r in find_by_page(page):
            all_url.append(r)
    # 去重
    all_url = set(all_url)  # 为set类型
    print('获取到%s条链接' % len(all_url))
    return all_url

if __name__ == '__main__':
    out = ''
    start = time.clock()
    for url in get_urls(get_pageNum()):
        # print(type(url))

        # print(str(url))
        url = str(url) + '\n'
        out = out + url
    end = time.clock()
    print('一共运行了{}秒'.format(end-start))
    with open('all_urls.txt', 'w') as f:
        f.write(out)

