# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import os
import time
import re
from pymongo import MongoClient
import datetime


def CrawlerMz_2017(url,folderPath):        ##用BeautifulSoup实现
    ## Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36
    headers={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"}
    all_url=url
    start_html=requests.get(all_url,headers=headers)

    Soup=BeautifulSoup(start_html.text,'lxml')

    ##li_list=Soup.find_all('li')

    all_li=Soup.find('ul',id="pins").find_all('li')    #图片太多只抓取当前年份中的额
    #print unicode(str(all_li),encoding='utf-8')
    for li in all_li:
        #print li
        all_a=li.find('span').find_all('a')     ##字符串也可以用find?
        #print all_a
        for a in all_a:
            title=a.get_text()
            #path=unicode(title,encoding='utf-8')
            filename = title.strip()            #去掉首尾空格
            filename=filename.replace('?',"")   #替换掉名称中的半角问号
            print filename
            #path=u"H:\\meizitu\\"+filename

            blnFile=makDirFile(folderPath,filename)
            href = a['href']
            if blnFile==False:
                continue
            # try:
            #     c=os.mkdir(path)    #创建文件夹
            # except WindowsError:
            #     print u"文件夹已经存在！"
            #     continue
            # finally:
            #     os.chdir(path)      #制定当前工作文件夹

            #print str(title.encode('utf-8'))

            html=requests.get(href,headers=headers)
            html_soup=BeautifulSoup(html.text,'lxml')
            max_span=html_soup.find('div',class_='pagenavi').find_all('span')[-2].get_text()

            for page in range(1,int(max_span)+1):
                page_url=href+'/'+str(page)

                img_html=requests.get(page_url,headers=headers)
                img_soup=BeautifulSoup(img_html.text,'lxml')
                img_url=img_soup.find('div',class_='main-image').find('img')['src']
                name=img_url[-9:-4]
                #print name
                img=requests.get(img_url,headers=headers)
                f=open(name+'.jpg','ab')
                f.write(img.content)
                f.close()
                time.sleep(0.3)

def get_headers(url):
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
        'Referer':url
    }
    return headers

def CrawlerMz_Reg(url,folderPath,db_collection):     ##用正则表达式实现
    #将爬取的文件记录到数据库中，避免重复爬取
    ##请求头文件
    headers=get_headers(url)
    all_url = url
    ##用Requests获取网址信息
    start_html = requests.get(all_url, headers=headers)
    #print start_html.text

    content_list=re.findall(u"<ul id=\"pins\">.*?</ul>",start_html.content.decode("utf-8"),re.S)     #取出UL标签相关的
    #print content_list[0]
    ##<a href="http://www.mzitu.com/82839" target="_blank">XXXXXXXXXXXXX</a>
    #for pageContent in content_list:
        #<a href="http://www.mzitu.com/87825" target="_blank">xxxxxxxxxxxxxxxxx</a>
    imge_list=re.findall("<span><a href=\"(.*?)\" target=\"_blank\">(.*?)</a></span>",content_list[0],re.S)  #将URL与名称取出来，只抓取数组下标为1的
    for imge in imge_list:      ##将网址与Img循环来获取图片
        filename=imge[1].strip().replace("?","")
        href=imge[0]
        #print href, filename
        makDirFile(folderPath,filename)        #建文件夹
        page_num=1

        cursor = db_collection.find_one({"主题页面": href})

        if cursor:      #如果文件夹存在跳出
            page_after = cursor[u"已爬取页数"]  # 已爬取的图片数量
            page_max = cursor[u"总页数"]  # 总图片数量
            if page_after<>page_max:        #如果已经抓取的页数不等于总页数就重新抓取
                download_image(href, page_max, db_collection, headers)

            # print cursor[u"标题"]
            # print cursor[u"主题页面"]
            # print cursor[u"获取时间"]

        else:
            html = requests.get(href, headers=headers)
            html_soup = BeautifulSoup(html.text, 'lxml')
            max_span = html_soup.find('div', class_='pagenavi').find_all('span')[-2].get_text()     ##获取总共有多少页图片

            post={
                "标题":filename,
                "主题页面":href,
                "获取时间":datetime.datetime.now(),
                "已爬取页数":0,
                "总页数":int(max_span)
            }
            db_collection.save(post)

            download_image(href,max_span,db_collection,headers)
            # for page in range(1,int(max_span)+1):       ##每页循环处理，从第一页开始循环
            #     page_url=href+'/'+str(page)     ##每页的网址
            #
            #     #更新爬取页面的页数
            #     db_collection.update({u"主题页面":href},{"$set":{u"已爬取页数":page}})
            #
            #     img_html=requests.get(page_url,headers=headers)
            #     img_soup=BeautifulSoup(img_html.text,'lxml')
            #     img_url=img_soup.find('div',class_='main-image').find('img')['src']
            #     name=img_url[-9:-4] ##图片的名称
            #     #print name
            #     img=requests.get(img_url,headers=headers)       ##打开图片的请求
            #     f=open(name+'.jpg','ab')        ##打开.jpg文件
            #     f.write(img.content)        ##将文件写入到f对象中


def download_image(href,max_span,db_collection,headers):
    for page in range(1,int(max_span)+1):
        page_url=href+'/'+str(page)         ##每页的网址
        db_collection.update({u"主题页面": href}, {"$set": {u"已爬取页数": page}})
        headers=get_headers(page_url)
        img_html = requests.get(page_url, headers=headers)
        img_soup = BeautifulSoup(img_html.text, 'lxml')
        img_url = img_soup.find('div', class_='main-image').find('img')['src']
        name = img_url[-9:-4]  ##图片的名称
        # print name
        img = requests.get(img_url, headers=headers)  ##打开图片的请求
        f = open(name + '.jpg', 'ab')  ##打开.jpg文件
        f.write(img.content)  ##将文件写入到f对象中
        f.close()  ##写入完成，关闭文件


def makDirFile(path,file):
    title = file
    filename = title.strip()  # 去掉首尾空格
    filename = filename.replace('?', "")  # 替换掉名称中的半角问号
    print u"创建文件夹："+filename
    folderPath =path + filename
    try:
        os.mkdir(folderPath)  # 创建文件夹
        return True
    except WindowsError:
        print u"文件夹已经存在！"
        return False
    finally:
        os.chdir(folderPath)  #指定当前工作文件夹


if __name__=="__main__":
    ##两种写法，一个是正则表达式，一个是用BeautifulSoup模块
    dbPath="mongodb://localhost:27017"
    dbClient=MongoClient(dbPath)
    db=dbClient["test"]

    db_collection=db["meizitu"]
    #标题
    #主题页面
    #获取时间

    for page in range(1,10):
        pageUrl="http://www.mzitu.com/page/"+str(page)

        folderPath=u"E:\\meizitu\\"+u"第"+str(page)+u"页\\"
        makDirFile(u"E:\\meizitu\\",u"第"+str(page)+u"页")

        CrawlerMz_Reg(pageUrl,folderPath,db_collection)
        #CrawlerMz_2017(pageUrl,folderPath)
        #CrawlerMz_2017("http://www.mzitu.com/all")      ##BeautifulSoup
        #CrawlerMz_Reg("http://www.mzitu.com/all")          ##正则表达式
        #<div class ="year" > 2016年 </div >.*?</ul>
