from bs4 import BeautifulSoup
import os
from Download import request
import datetime
from pymongo import MongoClient

class mzitu():
	def _init__(self):
		client=MongoClient()
		db=client['meinvxiezhenji']
		self.meizitu_collection=db['meizitu']
		self.title=''
		self.url=''
		self.img_urls=[]
	def all_url(self,url):
		html=request.get(url,3)
		all_a=BeautifulSoup(html.text,'lxml').find('div',class_='all').find_all('a')
		for a in all_a:
			title=a.get_text()
			self.title=title
			print(u'开始保存:',title)
			path=str(title).replace('?','_')
			self.mkdir(path)
			os.chdir("D:\Mzitu\\"+path)
			href=a['href']
			self.url=href
			if self.meizitu_collection.find_one({'主题页面':href}):
				print(u'这个页面已经爬取过了')
			else:
				self.html(href)
	def html(self,href):
		html=request.get(href,3)
		max_span=BeautifulSoup(html.text,'lxml').find_all('span')[10].get_text()
		page_num=0
		for page in range(1,int(max_span)+1):
			page_num=page_num+1
			page_url=href+'/'+str(page)
			self.img(page_url,max_span,page_num)
	def img(self,page_url,max_span,page_num):
		img_html=request.get(page_url,3)
		img_url=BeautifulSoup(img_html.text,'lxml').find('div',class_='main-image').find('img')['src']
		self.img_urls.append(img_url)
		if int(max_span)==page_num:
			self.save(img_url,page_url)
			post={
				'标题':self.title,
				'主题页面':self.url,
				'图片地址':self.img_urls,
				'获取时间':datetime.datetime.now()
			}
			self.meizitu_collection.save(post)
			print(u'插入数据库成功')
		else:
			self.save(img_url,page_url)
	def save(self,img_url,page_url):
		name=img_url[-9:-4]
		print(u'开始保存',img_url)
		img=request.getimg(img_url,page_url,3)
		f=open(name+'.jpg','ab')
		f.write(img.content)
		f.close()
	def mkdir(self,path):
		path=path.strip()
		isExists=os.path.exists(os.path.join("D:\Mzitu",path))
		if not isExists:
			print(u'建立一个名字叫做，',path,u'文件夹')
			os.makedirs(os.path.join("D:\Mzitu",path))
			return True
		else:
			print(u'名字叫做',path,u'已经存在')
			return False
Mzitu=mzitu()
Mzitu.all_url('http://www.mzitu.com/all')