# -*- coding: cp936 -*-
import re,urllib2,urllib
import os,cookielib,sys
from urllib import unquote #post数据编码

#网页源代码
def gethtml(url,ht=0):
    accept_str={0:'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                1:'image/png,image/*;q=0.8,*/*;q=0.5',
                2:'text/css,*/*;q=0.1'}
    res=urllib2.Request(url)
    res.add_header('User-Agent','Mozilla/5.0 (Windows NT 5.1; rv:17.0) Gecko/20100101 Firefox/17.0')
    res.add_header('Accept',accept_str[ht])
    res.add_header('Accept-Language','zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3')
    res.add_header('Accept-Encoding','deflate')
    return urllib2.urlopen(res).read()

#下载写真集
class loaddownXZ:
    def __init__(self):
        self.baseURL='http://www.zngirls.com'
        self.PATH=os.getcwd()+'\\spider\\'
    
    #保存图片,参数:URL=地址;fd:所在文件夹
    def saveImg(self,url,fd):
        fn=url.split('/')   #文件名
        data=gethtml(url,1)
        path= fd +fn[-1]    #保存路径
        f=open(path,'wb')
        f.write(data)
        print url +' 下载成功'
        f.close()
    #获取页面所有图片地址,参数:URL=抓取的地址,返回值type:list    
    def getImgList(self,url):
        code=gethtml(url)
        pat=re.compile('<ul id="hgallery">(.*?)</ul>',re.S)
        item=re.findall(pat,code)
        s=''.join(item)
        img=re.compile("[h-t]+://[^\s]+.jpg",re.S) #image addr
        return re.findall(img,s)
    
    #主程序 参数url:/g/XXXXX
    def ldmain(self,d_url,s_name):
        not_last=True
        i=1
        fp=self.PATH + s_name+'\\'
        while not_last:
            print '写真下载页面:',i
            ipage=str(i)+'.html'
            m_url=self.baseURL + d_url + ipage        
            print 'loaddownxz ldmain debug:'+ m_url
            lst_url=self.getImgList(m_url)
            print 'ldmain image list'
            print lst_url
            #判断是否尾页
            if len(lst_url)>0:
                for url in lst_url:
                    self.saveImg(url,fp)
                i=i+1
            else:
                not_last=False
            
#主爬虫
class spider:
    def __init__(self):
        self.URL='http://www.zngirls.com/rank/aiwei/'
        self.baseURL='http://www.zngirls.com'
        self.PATH=os.getcwd()+'\\spider\\'  #图片地址
        self.loadXZ=loaddownXZ()    # 写真
    #建立文件夹   
    def mkdir(self,fp):
        ie=os.path.exists(fp)
        if ie:
            return True
            exit()
        if not ie:
            os.makedirs(fp)
            return True
        else:
            return False
            
    
    
    #数据保存
    def savetext(self,s_name,data):
        fp=self.PATH + s_name + '\\list.txt'
        f=open(fp,'a')
        f.write(data)
        f.close()
    #查找页面提交
    #通过post爬网页
    def getpagehtml(self,ipage):
        
        uc='%E8%89%BE%E8%96%87%E5%A5%B3%E4%BC%98' #艾薇女优URL编码
        tmp=unquote(uc)
        prof=unquote(tmp)   #两次编码模仿浏览器post,草泥马调试了好几天才成功,直接post无法识别
        url='http://www.zngirls.com/ajax/girl_query_total.ashx' #提交地址
        headers={'host':'www.zngirls.com',
                 'User-Agent':'Mozilla/5.0 (Windows NT 5.1; rv:17.0) Gecko/20100101 Firefox/17.0',
                 'Accept':'*/*',
                 'Referer':'http://www.zngirls.com/find/'}
        
        post_data={'professional':prof,
                   'age':'20-50',
                   'cup':'B-K',
                   'curpage':ipage,
                   'pagesize':'20' }
        postdata=urllib.urlencode(post_data)
        req=urllib2.Request(url,postdata,headers)
        rs=urllib2.urlopen(req)
        return rs.read()
    
    #地址列表 返回值/g/XXXXX
    def getURLS(self,ipage):
        html=self.getpagehtml(ipage)
        tmp=re.compile("/girl/[\d]{5}/",re.S)
        url= re.findall(tmp,html)
        return list(set(url))
    #写真
    def addrXZ(self,url):
        html=gethtml(url)
        rs=re.compile("class='photo_ul'>(.*?)class='igalleryli_title'>",re.S)
        s=re.findall(rs,html)
        tmp=''.join(s)
        rse=re.compile('/g/[\d]{5}/',re.S)
        adr=re.findall(rse,tmp)
        return list(set(adr))
    #girl 网页处理=======================================
    #图片地址
    def imgLink(self,html):
        pat=re.compile("class='imglink' href='(.*?)'><img",re.I)
        ls = re.findall(pat,html)
        return ''.join(ls)
    #资料 返回 table里文字
    def girlInfo(self,html):
        pat=re.compile('<table>(.*?)</table><div',re.I)  #获取表格里数据
        li=re.findall(pat,html)
        tmp=''.join(li)
        s=re.sub('</tr>','\n',tmp)
        return re.sub('<[^>]+>','',s) #删除html标记(<>)
    #名字 返回 string
    def girlTitle(self,html):
        t=re.compile('style="font-size: 15px">(.*?)</h1><button',re.S)
        gn=re.findall(t,html)
        s=''.join(gn)
        return unicode(s,'utf-8')
    #详细资料
    def InfoContent(self,html):
        t=re.compile('"utf-8"></script></div>(.*?)</div></div><div class="clear">',re.S)
        tmp=re.findall(t,html)
        info=''.join(tmp)
        info=info.replace('&nbsp;','') #删除html空格
        return info.strip()
    #==================================================
    def xz_PageType(self,url):
        html = gethtml(self.baseURL+url)
        pat=re.compile('(,*?)<div id="pages">',re.I)
        rs=re.findall(pat,html)
        if len(rs)==0:
            return False
        else:
            return True
    #调用程序
    def girlinfo(self,ipage):
        url=self.getURLS(ipage)
        for item in url:
            addr='http://www.zngirls.com'+item
            html = gethtml(addr)
            
            g_name = self.girlTitle(html) #名字
            info = self.girlInfo(html)  #资料
            infoA = self.InfoContent(html) #详细资料
            tu=self.imgLink(html)
            
            fp=self.PATH + g_name +'\\'           
            self.mkdir(fp) #makedir
            if not os.path.exists(fp + 'list.txt'):
                #self.loadXZ.saveImg(tu,fp)
                #self.savetext(g_name,info)
                self.savetext(g_name,infoA)
            else:
                print '#folder exist ==========@@@'
            
            print 'save girl information===== ok'
            xzurl=self.addrXZ(addr) #写真集地址
            print xzurl
            if len(xzurl)>=1:
                print '发现写真集:'
                for xurl in xzurl:
                    self.loadXZ.ldmain(xurl,g_name)
                        
    #程序        
    def main(self,start,end):
        for i in range(start ,end+1):
            print '正在爬第:' + str(i) +' page'
            self.girlinfo(str(i))
               
spider=spider()
spider.main(1,60)

