﻿
# Filename: Dimage.py
# _*_codeing:utf-8_*_
'''
用来下载百度贴吧的图片
'''
import sys
import re
import urllib
import os
import shutil
import time
import threading

#抓取贴吧图片
def tieba():
    #Dimage 类定义
    class Dimage:

        #初始化
        def __init__(self, url):
            print '==============INIT=============='
            self.originalurl = url          #原始链接
            self.imagepath = os.getcwd() + os.sep + 'Image'
            self.imageindex = 0
            if 'Image' not in os.listdir(os.getcwd()):
                os.mkdir(self.imagepath)
            else:
                shutil.rmtree(self.imagepath)
                time.sleep(1)
                os.mkdir(self.imagepath)
         #获取网页内容   
        def __gethtml(self, url):
            page = urllib.urlopen(url)                              #创建一个远程url的类对象 保存网页内容
            html = page.read()                                      #读出保存的内容
            page.close()
            return html
        #获取html编码    
        def __getcharset(self,html):                               
            charset_re = '<meta charset="(\S*?)">'
            charset_cre = re.compile(charset_re)
            charsetmatch = charset_cre.search(html)
            if charsetmatch:
                charset = charsetmatch.groups(0)[0]
            else:
                charset = 'utf8'                           #默认utf-8
            return charset
            
            
        def __getsoureurl(self, html):    
            reg_re = 'src="(http://imgsrc.*?\.jpg)"'                #正则表达式 匹配图片源地址  获取符合规则的最短字符串（非贪婪模式*？）
            picture = re.compile(reg_re)                            #将匹配规则编译为RegexObject实例，方便使用RegexObject类的方法  提高效率
            htmlurl = picture.findall(html)                         #匹配符合正则表达式的所有字符串（图片源地址） 将匹配的字符串以列表的方式返回
            return htmlurl
        #获取网页总页数 
        def __getendpage(self, html):                              
            charset = self.__getcharset(html)
            end_re = u'href="/p/[0-9]*?\?pn=([0-9]*?)">尾页'          #
            endpage = re.compile(end_re)
            endpagenum = endpage.search(html.decode(charset))           #由网页信息提供的charset转为unicode
            if endpagenum:
                return int(endpagenum.groups(0)[0])
            else:
                return 1                                            #找不到尾页数，返回1   视为1页
      
        #通过图片源地址下载图片
        def __getimage(self, htmlurl):
            if 0 == len(htmlurl):
                print 'No image url!!!'
                return 0
            for imageurl in htmlurl:                                #遍历图片源地址
                filename = "%d.jpg" % self.imageindex               #图片命名   以顺序命名
                print "Download No.%d image " % (self.imageindex) + "Save to " + filename     #提示下载开始
                print 'finished ' ,
                urllib.urlretrieve(imageurl,filename,self.__report_hook)               #获取图片资源
                shutil.move(filename, self.imagepath)
                time.sleep(1)
                self.imageindex += 1 
            
            
        #回调函数  用来显示下载进度      
        def __report_hook(self, count, block_size,file_size):       #用一下烂英语
            '''count: block numbers
            block_size: block size
            total_size: file total size'''
            per = 100.0 * count * block_size / file_size        #换算下载百分比
            if per >= 100:
                per = 100
            print '%6.2f%%' % per ,                         #浮点数宽度 最大为100.00% 6位     '.'前面的数表面数据的总宽度，后面的说明精度
            sys.stdout.write('\b\b\b\b\b\b\b')              #回退  在本行更新进度数据  在加一个空格‘ ’7位  其实就是覆盖
            if 100 == per:          #下载完成 换行
                print ('\n')
        #下载图片    
        def download(self):
            html = self.__gethtml(self.originalurl)         #获取原始链接的html内容
            endpagenum = self.__getendpage(html)            #获取总页数
            if 0 > endpagenum:                              #总页数小于0   异常    退出
                print ('No finded image page....')
                time.sleep(3)
                sys.exit()
            else:
                print '==============TOTAL PAGE %d==============' % endpagenum
                for pagenumber in range(1, endpagenum + 1):
                    pageurl = self.originalurl + '?pn=' + str(pagenumber)           #资源页url
                    print '----------- Download No.%d page!------------' % pagenumber
                    print 'No.%d page url: ' %(pagenumber) + pageurl
                    sourehtml = self.__gethtml(pageurl)                             #获取资源页html
                    htmlurl = self.__getsoureurl(sourehtml)                         #获取资源页中的图片url
                    self.__getimage(htmlurl)                                        #下载图片资源
                    print '-------- Download No.%d page Done! --------' % pagenumber
            print '======================== ALL DONE ========================'
            
    url = raw_input("Input an url:")    #获取抓图网址
    print ('Get page ') + ('"') + url + ('"') + (' data')       #提示   输出输入网址
    image = Dimage(url)
    print ('Now,downloading...')
    # th = threading.Thread(target = image.download, args = ())     #先不用线程
    # th.start() 
    image.download()                 #下载图片


def report_hook(count, block_size, file_size):       #用一下烂英语
    '''count: block numbers
    block_size: block size
    total_size: file total size'''
    per = 100.0 * count * block_size / file_size        #换算下载百分比
    if per >= 100:
        per = 100
    print '%6.2f%%' % per ,                         #浮点数宽度 最大为100.00% 6位     '.'前面的数表面数据的总宽度，后面的说明精度
    sys.stdout.write('\b\b\b\b\b\b\b')              #回退  在本行更新进度数据  在加一个空格‘ ’7位  其实就是覆盖
    if 100 == per:          #下载完成 换行
        print ('\n')
#抓取百度图片
def image():
#百度默认是utf-8编码的
    url = 'http://image.baidu.com'
    print ('\nPlease wait... ')
    # maintag_key_string = u'data-nsclick="p=index&event_type=nav.col.click&col=(.*?)" >(\S*?)</a> </li>'
    # maintag_key_string = u'data-nsclick="p=index&event_type=nav.col.click&col=(\S*?)"\s?>(\S*?)</a>\s{1,2}</li>'
    maintag_key_string = u'a href="(\S*?)" hidefocus\s*?data-nsclick="p=index&event_type=nav.col.click&col=(\S*?)"\s?>(\S*?)</a>\s{1,2}</li>'
    mainhtml = urllib.urlopen(url).read()
    maintag_re = re.compile(maintag_key_string)
    maintag = maintag_re.findall(mainhtml)   #获取主标签
    # print maintag
    # print urllib.unquote(maintag[0][0]).decode("utf-8").encode('gbk')
    count = 0
    if 0 == len(maintag):
        print 'No find tags....'
        time.sleep(3)
        sys.exit()
    else:   
        while True:
            if len(maintag) == count:
                break
            print ('%d.%s ') % (count+1, maintag[count][2].decode("utf-8").encode('gbk')) ,
            count = count+1
    print ('0.exit')
    option_maintag = input('\nPlease input index of option:')       #选择主标签
    if 0 == option_maintag:
        print 'Good bye!!!'
        time.sleep(3)
        sys.exit()
    # subsidiary 次要的
    maintag_url = url+maintag[option_maintag-1][0]  #获取的主标签url
    print ('\nPlease wait... ')
    # print maintag_url
    subsidiary_key_str = u'p=channel&event_type=nav.tag.click&col=' + u'%s' % (maintag[option_maintag-1][1]) + u'&tag=(\S*?)"  title="\S*?>(\S*?)"'
    # print subsidiary_key_str
    # <a data-nsclick="p=channel&event_type=nav.tag.click&col=%E7%BE%8E%E5%A5%B3&tag=%E5%94%AF%E7%BE%8E"  title="美女>唯美"
    subsidiaryhtml = urllib.urlopen(maintag_url).read()
    subsidiary_re = re.compile(subsidiary_key_str)
    subsidiarytag = subsidiary_re.findall(subsidiaryhtml)   #获取副标签
    # print subsidiarytag
    count = 0
    if 0 == len(subsidiarytag):
        print 'No find subsidiary tag...'
        time.sleep(3)
        sys.exit()
    else:
        while True:
            if len(subsidiarytag) == count:
                break
            print ('%d.%s ') % (count+1, subsidiarytag[count][1].decode("utf-8").encode('gbk')) ,
            count = count+1
    print '0.exit'
    option_subsidiarytag = input('\nPlease input index of option:')         #选择副标签
    if 0 == option_subsidiarytag:
        print 'Good bye!!!'
        time.sleep(3)
        sys.exit()
    #下载文件路径设置
    imagepath = os.getcwd() + os.sep + 'Image'
    if 'Image' not in os.listdir(os.getcwd()):
        os.mkdir(imagepath)
    else:
        shutil.rmtree(imagepath)
        time.sleep(1)
        os.mkdir(imagepath)

    pn = 0
    rn = 30
    image_index = 1
    while True:

        subsidiary_url = url + u'/channel/listjson?fr=channel&' + 'tag1=' + maintag[option_maintag-1][1] + '&' \
                        + 'tag2=' + subsidiarytag[option_subsidiarytag-1][0] + '&' + 'sorttype=0&' \
                        + 'pn=' + str(pn) + '&' + 'rn=' + str(rn) + '&' + 'ie=utf8&oe=utf8&' + str( int(time.time()*1000) )
        # pn = pn + rn
        # rn = 60
        first_js_string = urllib.urlopen(subsidiary_url).read() #是一个字典  开心死了
        first_js = eval(first_js_string)        #string转换为dict
        # print (type(first_js))
        for i in range( 0, len(first_js['data'])-1 ):
            first_id = first_js['data'][i]['id']
            first_url = first_js['data'][i]['image_url']
            encode_first_url = urllib.quote_plus(first_url)
            download_url = url + '/channel/listdownload?word=download&fr=channel&ie=utf8&countop=0&' + 'url=' + encode_first_url + '&image_id=' + first_id
            # print download_url
            filename = '%d.jpg' % image_index
            print "Download No.%d image " % (image_index) + "Save to " + filename
            print 'finished    ' , 
            urllib.urlretrieve(download_url,filename,report_hook)   #下载数据
            shutil.move(filename, imagepath)
            time.sleep(0.5)
            if 0 == (image_index % 1000):
                choice = raw_input('continue?[Y/N]')
                if choice in ['n','N']:
                    print 'Download %d images'
                    print 'Good bye'
                    time.sleep(3)
                    sys.exit()
                elif choice in ['y','Y']:
                    imagepath = os.getcwd() + os.sep + 'Image%d' % (image_index / 1000)
                    if 'Image%d' % (image_index / 1000) not in os.listdir(os.getcwd()):
                        os.mkdir(imagepath)
                    else:
                        shutil.rmtree(imagepath)
                        time.sleep(1)
                        os.mkdir(imagepath)
                        image_index = 0
                else:
                    print 'Input error!!!'
                    print 'Download %d images'
                    print 'Good bye'
                    time.sleep(3)
                    sys.exit()

            image_index = image_index + 1
        pn = pn + rn
        rn = 60
def search():
    print 'search'

if __name__ == '__main__':
    print '==============MENU=============='
    print
    print '1.Baidu Tieba'
    print '2.Baidu Image'
    print '3.Image Search'
    print '0.exit'
    print '================================'
    print
    option = raw_input('Please input index of option:')
    if '1' == option :
        tieba()
    elif '2' == option:
        image()
    elif '3' == option:
        search()
    elif '0' == option:
        print 'God bye !!!'
        time.sleep(3)
        sys.exit()
    else:
        print "Input Error!!!"