# -*- coding: utf-8 -*-

"""
meitulu 美图录爬虫

用别人代码改的
原始文件：https://github.com/GuoBinxs/MeituluSpider/blob/0fb943b2c17c8455bfc0d51e7518d9f5c6e5ef1a/meituluspider.py

❤ ❤ ❤ Evelyn艾莉 ❤ ❤ ❤
"""

import codecs
import hashlib
import itertools
import json
import logging
import os
import random
import re
import time
from bs4 import BeautifulSoup
from multiprocessing import Pool

import requests
from pyquery import PyQuery
from requests.exceptions import RequestException

logging.basicConfig(level=logging.WARNING,
                    format='%(asctime)s \0[%(levelname)s] \0[%(process)d] \0%(filename)s[%(lineno)d] \0%(message)s')

BASE_URL = 'https://www.meitulu.com'
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
                  'AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/59.0.3071.115 '
                  'Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}

SESSION = requests.Session()

DIR_CURRENT = os.path.dirname(os.path.abspath(__file__))
DIR_IMAGES = os.path.join(DIR_CURRENT, 'meitulu')
if not os.path.exists(DIR_IMAGES):
    os.mkdir(DIR_IMAGES)


def md5sum(s):
    if isinstance(s, unicode):
        s = s.encode('utf-8')
    md5 = hashlib.md5()
    md5.update(s)
    return md5.hexdigest()


def get_page(url):
    cache_path = os.path.join(DIR_IMAGES, '_cache', 'page_%s.html' % md5sum(url))
    logging.debug(u'cache path: %s', cache_path)

    if not os.path.exists(cache_path):
        try:
            response = SESSION.get(url, headers=HEADERS)  # 下载页面
            if response.status_code == 200:
                response.encoding = 'utf-8'
                with codecs.open(cache_path, 'w', encoding='utf-8') as fp:
                    fp.write(response.text)
                return response.text
        except RequestException as error:
            logging.error(u'get_page %s error: %s', url, error)
        finally:
            seconds = random.randint(1, 5)
            logging.debug('sleep %s seconds', seconds)
            time.sleep(seconds)
        logging.error(u'Get nothing from %s', url)

    HEADERS['Referer'] = url
    with codecs.open(cache_path, 'r+', encoding='utf-8') as fp:
        return fp.read()


def download(url, save_path):
    logging.info(u'准备下载：%s', url)
    if os.path.exists(save_path):
        kb = os.path.getsize(save_path) >> 10
        logging.info(u'文件已经存在（%sKB），跳过。。。', kb)
        if kb < 100:
            logging.warning(u'疑，文件怎么小于 100K(%s)? %s', kb, save_path)
            if kb < 50:  # 实在太小的话，停顿一下，看看什么情况
                time.sleep(3)
        return
    response = SESSION.get(url, headers=HEADERS)
    logging.info(u'正在下载：%s', url)
    if response.status_code == 200:
        content = response.content  # 下载图片
        with open(save_path, 'wb') as f:
            f.write(content)
        logging.info(u'下载完成：%s', url)
    else:
        logging.warning(u'-' * 50)
        logging.warning(u'status code: %s', response.status_code)
        logging.warning(u'    content: %s', response.content)
        logging.warning(u'-' * 50)


def parse_set_page(html):
    doc = PyQuery(html)
    elements = doc(u'.boxs li')
    results = re.findall(u'<a href="(https://www.meitulu.com/item/\d+.*?)".*?alt="(.*?)".*?</a>', str(elements))
    if results:
        return results


def get_model_urls(url):
    # 获取模特照片url
    html = get_page(url)
    # 判断该合集总页数
    maxpage = re.search('html">(\d+)</a> <a class', html)
    url_num = re.search('/item/(\d+).html', url).group(1)
    if maxpage:
        # 将该合集所有页码url遍历成一个list
        url_all = ['https://www.meitulu.com/item/' + str(url_num) + '_' + str(num) + '.html'
                   for num in range(2, int(maxpage.group(1)) + 1)]
        url_all.insert(0, url)
        return url_all


def parse_model_page(html):
    json_name = 'mode_path_%s.json' % md5sum(html)
    json_path = os.path.join(DIR_IMAGES, '_cache', json_name)
    if not os.path.exists(json_path) or True:
        soup = BeautifulSoup(html, 'lxml')
        img_html = soup.find_all('center')[0]
        img_urls = re.findall('.*?alt="(.*?)".*?src="(.*?)"', str(img_html))
        json.dump({'url': '', 'images': img_urls}, fp=open(json_path, 'wb'), indent=4, ensure_ascii=False)
    return json.load(fp=codecs.open(json_path, encoding='utf-8'))['images']


def download_set(urls, set_dir):
    for url in urls:
        for url_pic in parse_model_page(get_page(url)):
            save_path = os.path.join(set_dir, os.path.basename(url_pic[1]))
            download(url_pic[1], save_path)


def start(page_no=1, subject=None):
    url = BASE_URL
    if subject:
        url += '/t/%s/' % subject
    if page_no > 1:
        url += '%d.html' % page_no

    html_home_page = get_page(url)
    html_model_pages = parse_set_page(html_home_page)

    import HTMLParser
    h = HTMLParser.HTMLParser()
    html_model_pages = [[url, h.unescape(name)] for url, name in html_model_pages]

    # logging.info(json.dumps(html_model_pages, indent=4, ensure_ascii=False))
    # return

    pool = Pool(3)
    pool.map(_download_set, html_model_pages)


def _download_set(page):
    set_name = ''.join(itertools.chain(*re.split('/|\\|:|<|>|\?|"|\*| ', page[1])))
    pic_num = re.search('.*?(\d+?)]', set_name)
    logging.info(u'合集名称： %s, URL: %s', set_name, page[0])
    set_dir = os.path.join(DIR_IMAGES, set_name)
    model_urls = get_model_urls(page[0])
    if not os.path.exists(set_dir):
        os.mkdir(set_dir)

    download_set(model_urls, set_dir)
    if int(len([x for x in os.listdir(set_dir)])) < int(pic_num.group(1)) * 7 // 10:
        logging.warning(u'Not Finished: %s', page[0])


def main():
    start(1, 'evelyn-aili')


if __name__ == '__main__':
    main()
