# coding=utf-8
from bs4 import BeautifulSoup
import requests
from time import sleep

# 设置城市
City_Name = 'qd'
page = 'http://newhouse.{0}.fang.com/house/s'.format(City_Name)


# 定义Download_Newitem_List()函数是为了提取指定城市楼盘列表的链接，并存放到指定的文件中
def Download_Newitem_List(url, try_num=2):
	global City_Name
	print('正在下载:', url)
	try:
		all_html = requests.get(url, timeout=10)
	except Exception as e:
		print('下载错误：', e.reason)
		all_html = None
		if try_num > 0:
			if hasattr(e, 'code') and 500 <= e.code < 600:
				return Download_Newitem_List(url, try_num - 1)
	all_html.encoding = "gb18030"
	soup = BeautifulSoup(all_html.text, "html5lib")
	# 提取新楼盘项目总数量
	Item_Total = soup.find('a', id="allUrl").find('span').text.replace('(', '').replace(')', '')
	# 如果余数大于0那么总项目数整除每页20项目数+1
	if (int(Item_Total) % 20) > 0:
		Page_Num = (int(Item_Total) // 20) + 1
	else:
		Page_Num = (int(Item_Total) // 20)
	with open('{0}_list_link.txt'.format(City_Name), 'w', encoding='utf-8') as f:
		for i in range(1, Page_Num + 1):
			New_Page_Link = 'http://newhouse.{0}.fang.com/house/s/b9{1}'.format(City_Name, i)
			print(New_Page_Link)
			print(New_Page_Link, file=f)


# 定义Download_item_link(City)函数是为了提取指定城市列表的链接中每一个开盘项目的链接，并存放到指定的文件中。
def Download_item_link(City):
	import os
	# 判断文件文件是否存在如果存在则删除，该部分以后应进行优化而并非删除。
	if os.path.exists('{0}_Newall_link.txt'.format(City)):
		os.remove('{0}_Newall_link.txt'.format(City))
	with open('{0}_list_link.txt'.format(City), 'r', encoding='utf-8') as f:
		# print(f.readlines())
		for line in f.readlines():
			print('正在读取:', line)
			sleep(2)
			try:
				all_html = requests.get(line.strip(), timeout=10)
				all_html.encoding = "gb18030"
				# print(all_html.text)
			except Exception as e:
				print('下载错误：', e)
				# if try_num > 0:
				#    if hasattr(e, 'code') and 500 <= e.code < 600:
				#        return Download_Newitem_List(url, try_num - 1)
			soup = BeautifulSoup(all_html.text, "html5lib")
			master_html = soup.find_all('div', class_='nlcd_name')

			with open('{0}_Newall_link.txt'.format(City), 'a', encoding='utf-8') as d:
				for link in master_html:
					# print(link.get_text().rstrip() + ':' + link.a['href'].rstrip())
					print(link.a['href'].rstrip(), file=d)


def Download_item_info(City_Name):
	# 读取相应城市的新楼盘文件
	with open('{0}_Newall_link.txt'.format(City_Name), 'r', encoding='utf-8') as f:
		for url in f:
			print(url.rstrip())
			####首先读取指定页面的信息
			try:
				all_html = requests.get(url.rstrip(), timeout=10)
				all_html.encoding = "gb18030"
				# print(all_html.text)
			except Exception as e:
				print('下载错误：', e)
			soup = BeautifulSoup(all_html.text, "html5lib")
			# soup = BeautifulSoup(all_html.text, "lxml")
			# 楼盘名称
			Sale_item_name = soup.find('div', class_='inf_left1 ').find('strong').get_text()
			print(Sale_item_name)
			# 楼盘评分
			Sale_item_score = soup.find('div', class_="tit").find('a').get_text().strip()
			print(Sale_item_score)
			# 楼盘均价
			Sale_item_price = soup.find('span', class_="prib cn_ff").get_text()
			print(Sale_item_price)
			# 主力户型
			Sale_item_master_temp = soup.find('div', class_='fl zlhx').findAll('a')
			Sale_item_master = [i.get_text() for i in Sale_item_master_temp if len(i.get_text()) > 0 ]
			print('-'.join(Sale_item_master))

			# 近期开盘时间
			Sale_item_startdate = soup.find('a', class_="kaipan").get_text()
			print(Sale_item_startdate)
			# 先提取详细信息的链接地址
			####楼盘全部户型链接
			Sale_item_model_link = soup.find('a', class_='allhxt fl').get('href')
			print(Sale_item_model_link)
			####楼盘详情
			Sale_item_info_link_temp = soup.find('div', id="orginalNaviBox").findAll('a')
			Sale_item_info_list = [i['href'] for i in Sale_item_info_link_temp]
			Sale_item_info_link= Sale_item_info_list[1]
			print(Sale_item_info_link)
			try:
				Sale_item_info_html = requests.get(Sale_item_info_link, timeout=10)
				Sale_item_info_html.encoding = "gb18030"
			except Exception as e:
				print('下载错误：', e)
			Sale_item_info_soup=BeautifulSoup(Sale_item_info_html.text, "html5lib")
			####楼盘基本信息

			# 物业类别
			# Item_property_category=Sale_item_info_soup.find('ul', class_="list clearfix").find('div',class_='list-right').title
			# 建筑类别
			# Item_build_category=Sale_item_info_soup.find('span', class_="bulid-type").text
			# 产权年限
			# Item_right_years=Sale_item_info_soup.findall('div', class_='clearfix cqnx_512').text

			####楼盘销售信息

			####楼盘周边设施

			####小区规划

			####

			##开发商,这个数据提取可能存在问题，需要测试。
			# Item_company_info=Sale_item_info_soup.find(string="开发 商").find_parents("a").text
			# print(Item_company_info)


#
##项目特色
# Item_feature=Sale_item_info_soup.find(string="项目特色：").find_parents("span").text
####楼盘价格走势
# Sale_item_trend=soup.find('div', id="orginalNaviBox").find('a',id='xfdsxq_B03_11').get('href')
####二手房链接
# Sale_item_ershou=soup.find('div', id="orginalNaviBox").find('a',id='xfdsxq_B03_13').get('href')
# 提取页面中所需的关键信息，并进行定义


# soup.find('a', id="allUrl")

# Download_Newitem_List(page)
# Download_item_link('qd')
Download_item_info(City_Name)
