# encoding:utf-8

"""
@version: python2.7
@author : 'l00383533'
@file   : pictureman.py
@time   : 2017/3/24 17:05
@todo: 抓取新浪图片
"""
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')


import requests
import json
import time

class PictureMan:
    def __init__(self, url, path):
        self.apiUrl = url
        self.imgPath = path
        
        self.cookie = '_s_tentry=tech.lmtw.com; Apache=6294640651155.032.1489660914736; SINAGLOBAL=6294640651155.032.1489660914736; ULV=1489660914746:1:1:1:6294640651155.032.1489660914736:; YF-Page-G0=35f114bf8cf2597e9ccbae650418772f; login_sid_t=5c198cd0ab37594c3eee4fb17fdc2262; YF-Ugrow-G0=b02489d329584fca03ad6347fc915997; YF-V5-G0=c37fc61749949aeb7f71c3016675ad75; TC-Ugrow-G0=e66b2e50a7e7f417f6cc12eec600f517; TC-V5-G0=ffc89a27ffa5c92ffdaf08972449df02; TC-Page-G0=2b304d86df6cbca200a4b69b18c732c4; SCF=Ar6_aD_5DV43aJ4JJjbXqAXZHBKVKy9535MGKrrB68XqsJ0rt_s_FjZX3hiN4YMRkZed5Sto2ydqlRxPTWSh3Dg.; SUB=_2A2510JCyDeRxGeVO71QY9S_NwjuIHXVWp4V6rDV8PUNbmtBeLVOgkW8h-ccRlLLEDm3UD8fvm7Iz7vEKIg..; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5_vlE1nucMxu-AVAKY2exL5JpX5K2hUgL.Foe7Shq4SK2p1KM2dJLoI0YLxKqL1KnLB-qLxKML1KBL1-eLxKML1KBL1-eLxKML1-2L1hBLxKnL1hBL1K2LxKqLB--L12zLxKMLBozL1h.t; SUHB=0KAxANlfdSk0ZT; ALF=1521882208; SSOLoginState=1490346210; wvr=6; UOR=tech.lmtw.com,widget.weibo.com,cuiqingcai.com'
        self.proxy_info = {
            'http': 'http://xxx:xxx@proxycn2.huawei.com:8080',
            'https': 'https://xxx:xxx@proxycn2.huawei.com:8080'
        }

    def getImgUrlsInPage(self, page):
        """
        获取每页中的所有图片的url地址
        @card:{
          'scheme':'http://xxxxxxxxxxxxx',
          'text':'xxxxxxxxxxxx',
          pics:[
          {'url':'xxxxxxxxxxxxxx'},
          {'url':'xxxxxxxxxxxxxx'}
          ]
        }
        """
        json_string = ""
        # 是否还有下一页
        has_next_page = False
        # 解析后的卡片
        cards = []
        try:
            r = requests.get("%s&page=%d" % (self.apiUrl, page), proxies=self.proxy_info)
            if r.status_code != 200:
                print "error: %s" % r
                return
            json_string = r.content
#             print json_string
            # 加载成json对象
            _json = json.loads(json_string, encoding="utf-8")
            # 宗分片数
            total_size = _json['cardlistInfo']['total']
            # 每页的分片数量
            page_size = len(_json['cards'])
#             print page_size
            # 剩余分片数（按每页10个计算）
            left_num = int(total_size) - 10 * int(page - 1) - page_size
            if left_num > 0:
                has_next_page = True
            # 取出卡片关键信息
            for _card in _json['cards']:
                if _card['card_type'] != 9 or 'pics' not in _card['mblog']:
                    print 'invalid card and skiped.'
                    continue  # 过滤掉非图片
                card = {}
                card['pics'] = []
                card['scheme'] = _card['scheme']
                card['text'] = _card['mblog']['text']
                for pic in _card['mblog']['pics']:
                    card['pics'].append(pic['large']['url'])
                cards.append(card)
        except Exception as e:
            print e
        
        return cards, has_next_page
    
    def downloadImg(self, url, text):
        """
        下载图片
        """
        print "开始下载标题为[ %s ]下的图片: %s" % (text.encode('utf-8'), url)
        try:
            pic = requests.get(url, proxies=self.proxy_info, timeout=10)
            if not os.path.exists(self.imgPath):
                os.mkdir(self.imgPath)
            path = str(self.imgPath) + '/' + str(time.time()) + '.jpg'
            fp = open(path, 'wb')
            fp.write(pic.content)
            fp.close()
            print "下载完成，存储在 %s" % path
        except Exception as e:
            print '下载出错: %s' % e
    
    def parser(self):
        """
        开始解析所有图片
        """
        all_cards = []
        page = 1
        _next = True
        while _next:  # 循环处理
            cards, _next = self.getImgUrlsInPage(page)
            all_cards.extend(cards)
            page += 1
            time.sleep(2)  # 休眠3秒爬一次
        # 开始下载
        for _card in all_cards:
            for pic in _card['pics']:
                self.downloadImg(pic, _card['text'])
        print "总共发现 【%d】个图片微博信息." % len(all_cards)

if __name__ == "__main__":
    # 热门家装api
    url = 'http://m.weibo.cn/container/getIndex?jumpfrom=weibocom&containerid=1076035948336939'
    picture = PictureMan(url, u'F:\Python\imgs')
    picture.parser()
