#!/usr/bin/env python  
# encoding: utf-8   

""" 
@version: v1.0 
@author: null 
@software: PyCharm 
@file: main.py 
@time: 2017.3.19 10:33 
"""
import urllib.request
from lxml import etree
import os
import time
import sqlite3
import threading
import requests

header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
            , "Connection": "keep-alive"
         }
database="F:\Python\data\grilsinfo.db"


def getHtmlPath(url):
    req = urllib.request.Request(url,headers=header)
    html = urllib.request.urlopen(req)
    htmldata = etree.HTML(html.read())
    return htmldata

def mmRankSum():
    print('开始抓取MM')
    htmlpath = getHtmlPath('http://www.zngirls.com/rank/sum/')

    pages = htmlpath.xpath('//div[@class="pagesYY"]/div/a/@href')

    for i in range(len(pages) - 2):
    #for i in range(1):
        pagesitem = "http://www.zngirls.com/rank/sum/" + pages[i]
        print(pagesitem)
        mmRankitem(pagesitem)
    pass

def mmRankitem(pagesurl):
    htmlpath = getHtmlPath(pagesurl)

    mmitems = htmlpath.xpath('//div[@class="rankli_imgdiv"]/a/@href')
    mmranknum = htmlpath.xpath('//div[@class="rankli_div"]/span[@class="rank_num"]')
    mmnames = htmlpath.xpath("//div[@class='rankli_div']/span[@class='rank_name']/a/font")
    for i in range(len(mmitems)):
    #for i in range(1):
        albums= "http://www.zngirls.com" + mmitems[i] + "album/"
        print(albums)
        getAlbums(albums,mmranknum[i].text+mmnames[i].text)
        pass
    pass

def getAlbums(url,rootfolder):

    htmldata= getHtmlPath(url)

    pages = htmldata.xpath('//div[@class="igalleryli_div"]/a/@href')
    albumnames = htmldata.xpath('//div[@class="igalleryli_div"]/a/img/@title')
    for i in range(len(pages)):
    #for i in range(len(pages)):
        pic= "http://www.zngirls.com/"+pages[i]
        print(pic)
        getPagePicturess(pic,rootfolder,albumnames[i])
    pass

def getPagePicturess(url,rootfolder,albumfolder):
    htmlpath = getHtmlPath(url)
    pages = htmlpath.xpath('//div[@id="pages"]/a/@href')
    for i in range(len(pages)):
    #for i in range(1):
        pic="http://www.zngirls.com" + pages[i]
        print(pic+"=========")
        savePic("http://www.zngirls.com" + pages[i],rootfolder,albumfolder)
    pass

def savePic(url,rootfolder,albumfolder):
    con = sqlite3.connect(database)
    cu = con.cursor()
    header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
        , "Connection": "keep-alive"
        , "Referer": "image / webp, image / *, * / *;q = 0.8"
        , "Accept": "image/webp,image/*,*/*;q=0.8"
    }
    try:
        req = urllib.request.Request(url,headers=header)
        html = urllib.request.urlopen(req)
        htmlpath = etree.HTML(html.read())
        imgsrc = htmlpath.xpath('//div[@class="gallery_wrapper"]/ul/img/@src')
        names = htmlpath.xpath('//div[@class="gallery_wrapper"]/ul/img/@alt')
        print(imgsrc)
    except Exception:
        print('throw exception')
        time.sleep(10000)
        return
    for i in range(len(imgsrc)):
        folderpath = './' + rootfolder + '/' + albumfolder
        val = (imgsrc[i], names[i], folderpath, 0)
        cu.execute("INSERT INTO grils(url,filename,folderpath,status) VALUES (?,?,?,?)", val)
    con.commit()
    print('完成一组专辑入库')
    pass

def sqlQueryUtil(sql):
    try:
        sqlcon = sqlite3.connect(database)
        sqlcur = sqlcon.cursor()
        data = sqlcur.execute(sql).fetchall()
        sqlcur.close()
        sqlcon.close()
        return data
    except:
        print('查询出错')

def sqlUpateUtil(sql):
    try:
        sqlcon = sqlite3.connect(database)
        sqlcur = sqlcon.cursor()
        sqlcur.execute(sql)
        sqlcon.commit()
        sqlcur.close()
        sqlcon.close()
    except:
        print('更新出错')

def downloadPic():
    print('开始下载MM')
    threads = []
    num = sqlQueryUtil("select count(*) from grils where status=0")
    page = int(int(num[0][0])/2000)
    print("page:"+str(page))
    for i in range(page):
        datas=sqlQueryUtil("select * FROM grils WHERE status=0 LIMIT "+str(i*2000)+",2000")
        #print(i*2000)
        t = threading.Thread(target=multiDownload,args=(datas,))
        threads.append(t)
    print("threadnum:"+str(len(threads)))
    i=0
    for t in threads:
        t.setDaemon(True)
        t.start()
        print("thread"+str(t)+"启动")
        i+=1
    t.join()

    pass

def multiDownload(datas):

    for data in datas:
        try:
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
                , "Connection": "keep-alive"
                , "Referer": data[1]
            }
            req = urllib.request.Request(data[1], headers=headers)
            urlhtml = urllib.request.urlopen(req)
            responsehtml = urlhtml.read()

            # print(folderpath)
            if not os.path.exists(data[3]):
                os.makedirs(data[3])

            binfile = open(data[3] + '/%s.jpg' % (data[2]), 'wb')
            binfile.write(responsehtml)
            binfile.close()

            sqlUpateUtil("UPDATE grils SET status=1 WHERE id=" + str(data[0]))

            print("成功下载" + data[2])
        except Exception:
            print('savefile throw exception')
            print('开始代理下载')
            response = requests.get(url='http://127.0.0.1:8765/?types=0&count=100&country=%E5%9B%BD%E5%86%85')
            proxydata = response.json()
            print(str(proxydata[0][0]) +":"+ str(proxydata[0][1]))
            proxyhttp = urllib.request.ProxyHandler({'http': str(proxydata[0][0]) +":"+ str(proxydata[0][1])})
            opener = urllib.request.build_opener(proxyhttp)
            urllib.request.install_opener(opener)
            req = urllib.request.Request(data[1], headers=headers)
            urlhtml = urllib.request.urlopen(req)
            responsehtml = urlhtml.read()

            # print(folderpath)
            if not os.path.exists(data[3]):
                os.makedirs(data[3])

            binfile = open(data[3] + '/%s.jpg' % (data[2]), 'wb')
            binfile.write(responsehtml)
            binfile.close()

            sqlUpateUtil("UPDATE grils SET status=1 WHERE id=" + str(data[0]))

            print("代理---成功下载" + data[2])
    pass



if __name__ == "__main__":

    '''threads=[]
    urlthread=threading.Thread(target=mmRankSum)
    threads.append(urlthread)
    downloadthread=threading.Thread(target=downloadPic)
    threads.append(downloadthread)
    for t in threads:
        t.setDaemon(True)
        t.start()
        time.sleep(10)
    t.join()'''

    downloadPic()
    pass  
