# -*- coding:utf-8 -*-

__author__ = 'Administrator'

import requests
from pyquery import PyQuery
import pymongo
import hashlib

db = pymongo.MongoClient('mongodb://127.0.0.1/img_mmonly').get_default_database()
db_common = pymongo.MongoClient('mongodb://127.0.0.1/common').get_default_database()

count = 0
img_count = 0

def md5(strr):
    if isinstance(strr, str):
        strr = strr.encode('utf8')

    m2 = hashlib.md5()
    m2.update(strr)
    return m2.hexdigest()

def get_html(url):

    headers = { "Accept":"text/html,application/xhtml+xml,application/xml;",
                "Accept-Encoding":"gzip",
                "Accept-Language":"zh-CN,zh;q=0.8",
                "Referer":"http://www.5442.com/",
                "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36"
                }
    try:
        r = requests.get(url, headers=headers, timeout=30)
        return r.text
    except Exception as ex:
        print(ex)
        return ''

def crawl_thumblist_page(tag_name, thumb_list_url):
    item_list = {}

    txt =  get_html(thumb_list_url)

    jq = PyQuery(txt)
    items =  jq('div.ABox')

    for i in items:
        ele_div_img =  items(i)
        ele_a = ele_div_img('a')
        ele_img = ele_div_img('img')
        link_url = ele_a.attr('href')

        try:
            item_list[link_url] = {'tag_name':tag_name,
                              'name':ele_img.attr('alt'),
                              'link_url':link_url,'cover_url':ele_img.attr('src')}
        except:
            pass

    return item_list

def get_thumblist_pages(cat_start_url):
    ret_list=[]
    maxpage=88

    for page in range(1, maxpage+1):
        #http://www.mmonly.cc/mmtp/qcmn/list_16_1.html
        ret_list.append( 'http://www.mmonly.cc/mmtp/qcmn/list_16_%s.html' %  (page) )

    return ret_list

def craw_one_cat_albums(tag_name, cat_start_url):
    global img_count
    album_list = {}
    urls = get_thumblist_pages(cat_start_url)
    for url in urls:
        print(url)

    for url in urls:
        one_thumbs = crawl_thumblist_page(tag_name, url)
        album_list.update(one_thumbs)

    print(u'%s下累计相册%s' % (tag_name, len(album_list)))

    for i, item in enumerate(album_list.items()):
        album = item[1]
        if True:
            #if True:
            try:
                exist_list = []
                cond = {'link_url': album['link_url']}
                if u'album' in db.collection_names():
                    exist_list = db.album.find(cond)
                    exist_list = [item for item in exist_list]
                    print('==========cnt:', len(exist_list))

                if len(exist_list) != 0:
                    print(u'标签[%s]下相册[%s]已存在于标签[%s]下，不再重复抓取' % (album['tag_name'],album['name'], exist_list[0]['tag_name']))
                    continue

                photo_list = craw_photos_by_album_linkurl(album['link_url'])

                img_count +=  len(photo_list)

                print(u'标签:%s,相册:%s %s/%s,相册图片张数:%s, %s' % \
                      (album['tag_name'],album['name'], i+1, len(album_list), len(photo_list), album['link_url']))
                if len(photo_list) < 3:
                    print(u'图片少于3张，不抓取')
                    continue

                photo_list =sorted(photo_list, key=lambda p : p['no'])
                photo_urls = [photo['photo_url'] for photo in photo_list]

                album['photos'] = photo_urls

                album['_id'] = int(gen_album_id())
                db.album.save(album)

                print('===================================')
            except Exception as ex:
                print(ex)


def craw_photos_by_album_linkurl(start_url):
    photo_list =[]

    url_list = set()

    url_list.add(start_url)

    html = get_html(start_url)

    jq = PyQuery(html)
    item = jq('div.pages > ul > li > a')

    for i in item:
        href = item(i).attr('href')
        if not href or '#' in href:
            continue

        url_list.add( '/'.join(start_url.split('/')[:-1])+'/'+href )

    #print url_list
    imgs= []
    for page_index,url in enumerate(url_list):
        imgs.extend(craw_photos(page_index+1, url))

    if len(imgs) == 0:
        return []

    for img in imgs:
        photo_list.append({'photo_url':img[0], 'no':img[1]})

    return photo_list

def craw_photos(page_index, url):
    #print 'photos', url
    urls = []
    html = get_html(url)

    #print html

    jq = PyQuery(html)
    item = jq('#big-pic > p > a > img')

    #print 'item', item

    index = 1
    for i in item:
        img_src = item(i).attr('src')
        src_lower = img_src.lower()
        if '.jpg' in src_lower or '.jpeg' in src_lower or '.gif' in src_lower or '.png' in src_lower:
            urls.append([img_src, page_index*10000+index])
            index+=1

    return urls

def test():
    # info={}
    # info['link_url'] = 'http://www.5442.com/meinv/20140120/8925.html'
    # craw_photos_by_albuminfo(info)
    #
    # print info
    #
    # #craw_photos('http://www.5442.com/meinv/20160602/32350.html')
    #ret = get_thumblist_pages('http://www.5442.com/tag/siwa.html')
    #ret = get_thumblist_pages('http://www.5442.com/tag/beautyleg.html')
    ret = get_thumblist_pages('http://www.5442.com/tag/zipai.html')
    for item in ret:
        print(item)
    print(len(ret))

def gen_album_id():
    cond = {'table':'album'}
    cur =  db_common.id_gen.find_and_modify(update={'$inc':{'id':1}},query=cond, fields={'_id':0, 'id': 1})
    return int(cur["id"])

def is_crawl_tag(tag_name):
    return True

if __name__ == '__main__':
    # test()
    # exit(1)
    # print gen_album_id()
    # test()
    # exit(0)
    #
    # html = get_html('http://www.5442.com/tag/zipai.html')
    # print type(html)

    cats = {u'清纯美女':''}

    for tag in cats:
        if is_crawl_tag(tag):
            res = {'_id':tag, 'url':cats[tag]}
            print(res)
            db.tag.save(res)
            print(res)

    for cat_name in cats:
        #print key, cats[key]

        #cat_name = u'Beautyleg'

        #print '=========='
        #print cat_name,  type(cat_name),  cats[cat_name]

        if is_crawl_tag(cat_name):
            craw_one_cat_albums(cat_name, cats[cat_name])

    print('total_img' , img_count)

