#!/usr/bin/env python
# -*- coding: utf-8 -*-

import bs4
import json
import logging
import os
import sys
import time

import requests

# import codecs
# sys.stdout = codecs.getwriter('utf-8')(sys.stdout)

reload(sys)
sys.setdefaultencoding('utf-8')

logging.BASIC_FORMAT = '%%(levelname)s - %(filename)s[%(lineno)d]- %(message)s'
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
REQUESTS_LOGGER = logging.getLogger("requests.packages.urllib3")
REQUESTS_LOGGER.setLevel(logging.DEBUG)
REQUESTS_LOGGER.propagate = True
LOG = logging.getLogger(__name__)

# 加入 Header 模拟浏览器
HEADERS = {
    'referer': 'http://jandan.net/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'
}
URL_TMPL = 'http://jandan.net/ooxx/page-{}#comments'
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAVE_DIR = os.path.join(CURRENT_DIR, 'jiandan')
METADATA = os.path.join(SAVE_DIR, 'metadata.json')
IMG_EXTS = '.jpg', '.gif', '.png',
SESSION = requests.session()


def mkdir(path):
    # try:
    #     os.makedirs(path)
    # except OSError as exc:
    #     if exc.errno == errno.EEXIST and os.path.isdir(path):
    #         pass
    #     else:
    #         raise
    if os.path.isdir(path):
        return
    os.makedirs(path)


def read_metadata(clear=False):
    if clear or not os.path.isfile(METADATA):
        open(METADATA, 'w').write('[]')
        metadata = []
    else:
        try:
            metadata = json.load(open(METADATA))
            assert isinstance(metadata, list)
        except:
            open(METADATA, 'w').write('[]')
            metadata = []
    return metadata


def write_metadata(data):
    json.dump(data, open(METADATA, 'w'), indent=2)


def ignore_it(img_url):
    ad_paths = 'cdn.jandan.net/static/gg', 's.jandan.com/static/gg',
    ignore_paths = 'http://img.jandan.net/img/blank.gif', '//img.jandan.net/img/blank.gif',
    if img_url in ignore_paths:
        return True
    for ad_path in ad_paths:
        if ad_path in img_url:
            return True
    return False


def main():
    mkdir(SAVE_DIR)
    metadata = read_metadata()
    img_urls = [i[0] for i in metadata if i[2] == 'ok']

    start_page = 290
    while start_page < 3000:
        url = URL_TMPL.format(start_page)
        req = SESSION.get(url, headers=HEADERS)
        html = bs4.BeautifulSoup(req.text, 'lxml')

        print u'开始爬第 %d 页数据~ %s' % (start_page, url)
        # for index, ele in enumerate(html.select('img')):
        for ele in html.select('img'):
            img_url = ele.attrs['src']
            # img_filename = os.path.basename(img_url)
            img_ext = os.path.splitext(img_url)[1]

            if ignore_it(img_url):
                print img_url, '广告或忽略文件，跳过...'
                continue

            if img_ext not in IMG_EXTS:
                print img_url, '后缀不对(%s)，跳过...' % img_ext
                continue

            # 处理 URL 协议
            if img_url.startswith('http'):
                pass
            elif img_url.startswith('//'):
                img_url = 'http:' + img_url
            else:
                img_url = 'http://' + img_url

            # filename=time.strftime('%Y%m%d%H%I%S',time.localtime(time.time()))+str(random.randint(12, 1000))
            # filename = '%03d_%03d%s' % (start_page, index, img_ext)
            filename = img_url.replace('/', '-').encode(sys.getfilesystemencoding())
            if img_ext == '.gif':
                filepath = os.path.join(SAVE_DIR, 'gif', filename)
            else:
                filepath = os.path.join(SAVE_DIR, filename)

            if img_url in img_urls:
                if os.path.isfile(filepath):
                    print img_url, '已下载（文件存在），跳过...'
                    continue
                else:
                    print img_url, '已下载（文件不存在），重新下载...'

            if os.path.isfile(filepath):
                print img_url, '=>', '文件已存在(%s)，跳过...' % filename
                continue

            status = 'unknown'
            try:
                try:
                    img_obj = SESSION.get(img_url, stream=True, timeout=5).content
                    if img_obj is None:
                        print u'%s 下载不成功' % img_url
                        status = 'null'
                        continue
                    if sys.getsizeof(img_obj) < 10240:
                        print u'%s 文件大小不符合，舍弃~' % img_url
                        status = 'small'
                        continue
                    open(filepath, 'wb').write(img_obj)
                    print u'%s 下载成功,文件名为: %s' % (img_url, filename)
                    status = 'ok'
                except Exception as error:
                    print u'%s 下载不成功，异常信息为 %s' % (img_url, error.message)
                    status = 'failed'
            except Exception as error:
                print u'出现异常，%s' % error.message
                status = 'expection'
            finally:
                metadata = read_metadata()
                metadata.append([img_url, filename, status])
                write_metadata(metadata)
                img_urls.append(img_url)

            time.sleep(0.5)

        if not html.select('.next-comment-page'):
            print html.contents
            break

        time.sleep(1)
        start_page += 1


if __name__ == '__main__':
    main()
