import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import os
from view import areaPieView, floorView, directionView, doubleView, houseBarView


def getUrls():
	urls = []  # 列表
	baseUrl = "https://gy.anjuke.com/sale/p"
	for i in range(1, 4):
		url = "{}{}/".format(baseUrl, i)
		urls.append(url)  # 把生成好的 url 添加到列表 urls 当中方便返回
	return urls


headers = {
	"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
}


def getHtml(url):
	try:
		r = requests.get(url, headers=headers)  # 发送请求
		r.raise_for_status()
		return r.text
	except:
		return ""


def ParsePage(html):
	if html == '':
		return
	soup = BeautifulSoup(html, features="html.parser")
	# "标题", "地址", "户型", "面积", "朝向", "楼层", "时间", "总价", "单价"
	info = soup.select("div .property-content")
	# 定义一个列表 用于存储一页的数据
	data = []
	# 格式打印
	for item in info:
		title = item.contents[0].contents[0].text
		house_modal = item.contents[0].contents[2].contents[0].contents[0].text.replace(" ", "")
		extent = item.contents[0].contents[2].contents[0].contents[2].text.replace("㎡", "").replace("\n", "").replace(" ", "")
		direct = item.contents[0].contents[2].contents[0].contents[4].text
		ceng = item.contents[0].contents[2].contents[0].contents[6].text.replace("\n", "").replace(" ", "").split("(")[0]
		create_time = item.contents[0].contents[2].contents[0].contents[8].text.replace("\n", "").replace(" ", "").split("年")[0]
		area = item.contents[0].contents[2].contents[2].contents[0].text
		price = item.contents[2].contents[2].text.replace("元/㎡", "")
		total_price = item.contents[2].contents[0].text.replace(" 万", "")
		data.append([title, area, house_modal, extent, direct, ceng, create_time, total_price, price])
	return data


def getData():
	if os.path.exists('data.csv'):
		os.remove('data.csv')
	# 添加一个标题行
	columns = ["标题", "地址", "户型", "面积", "朝向", "楼层", "时间", "总价", "单价"]
	df = pd.DataFrame([], columns=columns)
	df.to_csv("data.csv", mode="a", header=True, index=False, encoding="utf-8")
	urls = getUrls()  # 返回值是一个列表
	for i in range(len(urls)):
		print(urls[i])
		# 用 html 保存获取到的网页的所有信息
		html = getHtml(urls[i])
		# 解析每一页的数据
		data = ParsePage(html)
		# 把数据写入文件
		df = pd.DataFrame(data)  # DataFrame 数据帧 二维表格结构
		df.to_csv("data.csv", mode="a", index=False, header=False, encoding="utf-8_sig")  # 追加
		print("第{}页写入完毕！".format(i + 1))
		time.sleep(2)  # 延时等待2秒


def draw():
	if os.path.exists('data.csv'):
		df = pd.read_csv("data.csv")
		df = df.dropna()
		df = df.drop_duplicates()
		floorView(df)
		areaPieView(df)
		houseBarView(df)
		directionView(df)
		doubleView(df)


if __name__ == "__main__":
	getData()
	draw()
