from urllib import request
import re
import time
import os

url_id = set()

# 解析详情页，提取图片链接 下载
def parse_detail(response,title):
    # 创建文件夹
    path = os.path.join('./weiyi/', title)
    if not os.path.exists(path):
        os.makedirs(path)

    html = response.read().decode('gbk')
    # 编写图片地址正则
    img_pat = re.compile(r'align="center".+?src="(.+?)"',re.S)
    img_url = img_pat.search(html)
    if img_url is not None:
        img_url = img_url.group(1)
        fname = img_url.split('/')[-1]
        # 下载图片
        request.urlretrieve(img_url,os.path.join(path,fname))
    else:
        print('未找到图片地址')

# 发起分页请求
def parse_list(response,title):
    # 提取总页数
    html = response.read().decode('gbk')
    total_pat = re.compile(r'class="pages">.+?(\d+)',re.S)
    res = total_pat.search(html)
    if res is not None:
        total = res.group(1)

        # 构建分页请求
        base_url = response.url
        for i in range(1,int(total) + 1):
            if i == 1:
                fullurl = base_url
            else:
                url_info = base_url.split('.')
                url_info[-2] = url_info[-2] + '_' + str(i)
                fullurl = '.'.join(url_info)
            print(fullurl)

            # 发起请求过快
            response = request.urlopen(fullurl)
            parse_detail(response,title)
            time.sleep(0.5)


# 获取分类链接
def get_cat():
    base_url = 'http://www.mmonly.cc/mmtp/'
    response = request.urlopen(base_url)

    html = response.read().decode('gbk')
    url_pat = re.compile('(/mmtp/\w+/\d+\.html).+?<span><a.+?>(.+?)</a>',re.S)
    url_list = url_pat.findall(html)
    # for item in url_list:
    #     print(item)
    for url in url_list:
        if url[0] not in url_id:
            if '244877' in url[0]:
                print(url)
                url_l = url[0] # 提取元组第一项就是链接地址
                fullurl = request.urljoin(base_url,url_l)
                # print(fullurl)
                response = request.urlopen(fullurl)
                parse_list(response,url[1])
                # 休息一下
                time.sleep(0.5)
                url_id.add(url[0])
        else:
            print('爬过了')

if __name__ == '__main__':
    get_cat()
