# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
import csv
import logging

Url_head = "https://xian.zu.fang.com/house-"
Url_tail = "/?rfss=1-c38a909d0ab73949ee-4c"
Num = 0
Filename = "rent.csv"

# 配置日志输出
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def write_csv(msg_list):
    out = open(Filename, 'a', newline='', encoding='utf-8')
    csv_write = csv.writer(out, dialect='excel')
    # for msg in msg_list:
    csv_write.writerow(msg_list)
    out.close()

def acc_page_msg(page_url):
    # 发送请求并获取网页内容
    response = requests.get(page_url)
    web_data = response.text
    soup = BeautifulSoup(web_data, 'html.parser')

    # 遍历每条租房信息
    for dl in soup.find_all(attrs="list hiddenMap rel"):
        info_rel = dl.dd
        rent_address, rent_area, rent_price, rent_type, rent_house_type, rent_floor_area, rent_room_orientation = "", "", "", "", "", "", ""
        # 获得租房标题
        rent_title = info_rel.find(attrs="title").get_text().strip()
        # 获得租房细节信息
        detail = info_rel.find(attrs="font15 mt12 bold").get_text().strip().split("|")
        if len(detail) == 4:
            rent_type = detail[0]
            rent_house_type = detail[1]
            rent_floor_area = detail[2][:-2]
            rent_room_orientation = detail[3]

        # 获取租房地址、地区
        tag1 = info_rel.find(attrs="gray6 mt12")
        content = tag1.get_text().strip()
        data = content.split('-')
        rent_area = data[0]
        if len(data) == 3:
            rent_address = data[1] + "-" + data[2]
        elif len(data) == 2:
            rent_address = data[1]

        # 获得租房价格
        tag2 = info_rel.find(attrs="moreInfo")
        rent_price = tag2.p.span.string

        # 写入csv文件
        txt = (rent_address, rent_area, rent_price, rent_type, rent_house_type, rent_floor_area, rent_room_orientation)
        write_csv(txt)


def get_pages_urls():
    urls = []
    # 1. 雁塔区可访问页数6
    for i in range(1):
        urls.append(Url_head + "a016698/i3" + str(i+1) + Url_tail)
    # 2. 碑林区可访问页数3
    for i in range(3):
        urls.append(Url_head + "a016699/i3" + str(i+1) + Url_tail)
    # 3. 莲湖区可访问页数7
    for i in range(7):
        urls.append(Url_head + "a016701/i3" + str(i+1) + Url_tail)
    # 4. 未央区可访问页数11
    for i in range(11):
        urls.append(Url_head + "a016703/i3" + str(i+1) + Url_tail)
    # 5. 新城区可访问页数1
    for i in range(1):
        urls.append(Url_head + "a016700/i3" + str(i+1) + Url_tail)
    # 6. 长安区可访问页数4
    for i in range(4):
        urls.append(Url_head + "a0483/i3" + str(i+1) + Url_tail)
    # 7. 灞桥区可访问页数1
    for i in range(1):
        urls.append(Url_head + "a0924/i3" + str(i+1) + Url_tail)
    # 8. 高新区可访问页数11
    for i in range(11):
        urls.append(Url_head + "a0482/i3" + str(i+1) + Url_tail)
    # 9. 曲江新区可访问页数3
    for i in range(3):
        urls.append(Url_head + "a014681/i3" + str(i+1) + Url_tail)
    # 10. 浐灞区可访问页数2
    for i in range(2):
        urls.append(Url_head + "a011882/i3" + str(i+1) + Url_tail)
    # 11. 经开可访问页数6
    for i in range(6):
        urls.append(Url_head + "a011880/i3" + str(i+1) + Url_tail)
    # 12. 航天新城可访问页数1
    for i in range(1):
        urls.append(Url_head + "a016702/i3" + str(i+1) + Url_tail)
    # 13. 西咸新区可访问页数1
    for i in range(1):
        urls.append(Url_head + "a011881/i3" + str(i+1) + Url_tail)


    return urls


def run():
    logging.info("开始爬虫")
    out = open(Filename, 'a', newline='')
    csv_write = csv.writer(out, dialect='excel')
    title = ("address", "area", "price", "type", "house_type", "floor_area", "room_orientation")
    csv_write.writerow(title)
    out.close()

    url_list = get_pages_urls()
    for url in url_list:
        acc_page_msg(url)
        logging.info("url:{}".format(url))
    logging.info("结束爬虫")

if __name__ == '__main__':
    run()
