import requests
from bs4 import BeautifulSoup
import random
import os
import time
from pymongo import MongoClient
import datetime
class jiandan():
	def __init__(self):
		client=MongoClient()
		db=client['jiandan']
		self.jiandan_collection=db['jd']
		self.imgurl=[]
		self.iplist=[]
		headers = {'referer': 'http://www.xicidaili.com/nn/1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'}
		html=requests.get('http://www.xicidaili.com/nn/1',headers=headers)
		ips=BeautifulSoup(html.text,'lxml').find_all('tr')
		for x in range(1,len(ips)):
			ip=ips[x]
			tds=ip.find_all('td')
			i=tds[1].contents[0]+':'+tds[2].contents[0]
			self.iplist.append(i)
		self.user_agent_list=[
			"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
	def get(self,url,timeout,proxy=None,num_retries=6):
		print(u'开始获取:',url)
		UA=random.choice(self.user_agent_list)
		headers={'User-Agent':UA,'referer': 'http://jandan.net/'}
		if proxy==None:
			try:
				return requests.get(url,headers=headers,timeout=timeout)
			except:
				if num_retries>0:
					time.sleep(3)
					print(u'获取网页出错，3s后将获取倒数第:',num_retries,u'次')
					return self.get(url,timeout,num_retries-1)
				else:
					print(u'开始使用代理:')
					time.sleep(3)
					IP=''.join(str(random.choice(self.iplist)).strip())
					proxy={'http':IP}
					return self.get(url,timeout,proxy,)
		else:
			try:
				IP=''.join(str(random.choice(self.iplist)).strip())
				proxy={'http':IP}
				return requests.get(url,headers=headers,proxies=proxy,timeout=timeout)
			except:
				if num_retries>0:
					time.sleep(10)
					IP=''.join(str(random.choice(self.iplist)).strip())
					proxy={'http':IP}
					print(u'正在更换代理，3s后将重新获取倒数第',num_retries,u'次')
					print(u'当前代理是：',proxy)
					return self.get(url,timeout,proxy,num_retries-1)
				else:
					print(u'代理也不好使，取消代理')
					return self.get(url,3)
	def page(self,url):
		html=self.get(url,3)
		m_page=BeautifulSoup(html.text,'lxml').find('div',class_='cp-pagenavi').find('a').get_text().strip()
		max_page=int(m_page)+1
		for page in range(1,max_page+1):
			page_url='http://jandan.net/ooxx/page-'+str(page)+'#comments'
			os.chdir("D:\Mzitu\\JIANDAN")
			self.img(page_url)
	def img(self,url):
		html=self.get(url,3)
		link=BeautifulSoup(html.text,'lxml').find_all('a',class_='view_img_link')
		for x in range(0,len(link)):
			href=link[x]['href']
			url='http:'+href
			if self.jiandan_collection.find_one({'图片网址':url}):
				print(u'这张图片已经爬取过了')
			else:
				self.imgurl.append(url)
				post={
					'图片网址':self.imgurl,
					'获取时间':datetime.datetime.now()
				}
				self.jiandan_collection.save(post)
				self.save(url)
	def save(self,url):
		print(u'开始保存:',url)
		name=url[-9:-4]
		try:
			img = self.get(url,3)
			f = open(name + '.jpg', 'ab')
			f.write(img.content)
			f.close()
		except FileNotFoundError: ##捕获异常，继续往下走
			print(u'图片不存在已跳过：', img_url)
			return False
 
jd=jiandan()
jd.page('http://jandan.net/ooxx')
	
		
		
			