# -*- coding:utf-8 -*-
import requests
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup


headers = {
	'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
}

proxy_pool_url = '127.0.0.1:8888/get'
max_count = 5
proxy = None


def get_proxy():
	try:
		response = requests.get(proxy_pool_url)
		if response.status_code == 200:
			return response.text
		return None
	except ConnectionError:
		return None


def get_detail_url(start_url):
	params = {
		'query': '风景',
		'type': '2',
	}
	req_doc = requests.get(start_url, headers=headers, params=params)
	if req_doc.status_code == 200:
		soup = BeautifulSoup(req_doc.text, 'lxml')
		url_data = soup.select('ul.news-list li div.txt-box h3 a')
		for url in url_data:
			detail_url = url.get('href')
			print detail_url
	return None


def get_detail_doc(detail_url, count=1):
	print ('Crawling', detail_url)
	print ('Tring Count', count)
	global proxy
	if count >= max_count:  # 判断请求次数
		print ('Tried Too Many Counts')
		return None
	try:
		if proxy:
			proxies = {
				'http': 'http://' + proxy
			}
			req_doc = requests.get(detail_url, headers=headers, proxies=proxies, allow_redirects=False)  # allow_redirects --是否允许自动跳转
		else:
			req_doc = requests.get(detail_url, headers=headers, allow_redirects=False)
		if req_doc.status_code == 200:
			return req_doc.text
		if req_doc.status_code == 302:
			print ('302')
			proxy = get_proxy()
			if proxy:
				print ('Using Proxy:', proxy)
				return get_detail_doc(detail_url)
			else:
				print ('Get Proxy Failed')
				return None
	except ConnectionError as e:
		print ('Error Occurred', e.args)
		proxy = get_proxy()
		count += 1
		return get_detail_doc(detail_url, count)


def get_detail_data(detail_doc):
	pass


if __name__ == '__main__':
	start_url = 'http://weixin.sogou.com/weixin'
	get_detail_url(start_url)
