# -*- coding:utf-8 -*-
import time
import requests
from bs4 import BeautifulSoup
import pymongo


headers = {
	'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
}

client = pymongo.MongoClient('127.0.0.1',27017,username='admin',password='@admin521')
db_admin = client.admin
tb_detail_urls = db_admin.tb_detail_urls
tb_detail_info = db_admin.tb_detail_info
# db_admin.drop.tb_info_urls()


# spider1
def get_detail_urls(page_url,pages,who_sells=0):
	pg_url = '{}{}/pn{}/'.format(page_url,str(who_sells),str(pages))
	# print lst_url
	doc_data = requests.get(pg_url,headers=headers)
	time.sleep(1)
	soup = BeautifulSoup(doc_data.text, 'lxml')
	if soup.find('td', 't'):
		for url in soup.select('td.t a.t'):
			# info_url = url.get('href').split('?')[0]
			detail_url = url.get('href')
			tb_detail_urls.insert({'detail_url':detail_url})
			print detail_url
	else:
		print 'No information was found on page {}'.format(pages)


def get_detail_info(detail_url):
	doc_data = requests.get(detail_url,headers=headers)
	soup = BeautifulSoup(doc_data.text, 'lxml')
	# no_exist_page = '404' in soup.find('script', type='text/javascript').get('src').split('/')
	# if no_exist_page:  # 判断页面是否为404
	# 	print 'No information was found on this page'
	# else:
	title = soup.title.text if soup.find_all('title') else None
	price = soup.select('span.price.c_f50')[0].text if soup.find_all('price') else None
	date = soup.select('.time')[0].text if soup.find_all('time') else None
	area = list(soup.select('.c_25d a')[0].stripped_strings) if soup.find_all('span', 'c_25d') else None
	tb_detail_info.insert({'title':title,'price':price,'date':date,'area':area})
	print ({'title':title,'price':price,'date':date,'area':area})





