from bs4 import BeautifulSoup
from lxml import etree
import requests
import csv
from fake_useragent import UserAgent

districts = ['chaoyang', 'haidian', 'fengtai', 'xicheng', 'dongcheng', 'changping', 'daxing', 'tongzhou', 'fangshan',
             'shunyi', 'shijingshan', 'miyun', 'mentougou', 'huairou', 'yanqing', 'pinggu', 'yanjiao2',
             'beijingzhoubian2', 'lvyoudichan1', 'xionganxinqu1']


def save_excel():
    global name, layout, address
    name = e.xpath(pPath + '[' + str(i + 1) + ']' + '/div/div[2]/div[1]/div[1]/a/text()')
    layout = e.xpath(pPath + '[' + str(i + 1) + ']' + '/div/div[2]/div[2]/a/text()')
    address = e.xpath(pPath + '[' + str(i + 1) + ']' + '/div/div[2]/div[3]/div/a/text()')
    csvwriter.writerow([name, layout, address])


for item in districts:

    file = open('csv/' + item + '.csv', 'w', newline='', encoding = 'utf-8')
    csvwriter = csv.writer(file)
    csvwriter.writerow(['名称', '户型', '位置'])
    num = 1  # 初始化抓取每条信息的序号
    cont = 0  # 初始化抓取页数

    print(item)

    url = 'https://newhouse.fang.com/house/s/' + item
    headers = {"User-Agent": UserAgent().random}
    response = requests.get(url, headers)
    response.encoding = response.apparent_encoding
    bs = BeautifulSoup(response.text, 'html.parser')

    e = etree.HTML(response.text)

    pages = e.xpath('/html/body/div[9]/div/div[1]/div[2]/ul/li[2]/a')
    print(pages)

    if len(pages) == 0:
        pPath = '//div[@class="nl_con clearfix"]/ul/li'
        lis = e.xpath(pPath)

        for i in range(len(lis)):
            name = e.xpath(pPath + '[' + str(i + 1) + ']' + '/div/div[2]/div[1]/div[1]/a/text()')
            layout = e.xpath(pPath + '[' + str(i + 1) + ']' + '/div/div[2]/div[2]/a/text()')
            address = e.xpath(pPath + '[' + str(i + 1) + ']' + '/div/div[2]/div[3]/div/a/text()')
            csvwriter.writerow([name, layout, address])
    else:
        for page in range(len(pages) - 2):
            url = url + '/b9' + str(page + 1)
            headers_page = {"User-Agent": UserAgent().random}
            response_page = requests.get(url, headers_page)
            response_page.encoding = response_page.apparent_encoding
            bs_page = BeautifulSoup(response_page.text, 'html.parser')

            e_page = etree.HTML(response_page.text)

            pPath = '//div[@class="nl_con clearfix"]/ul/li'
            lis = e_page.xpath(pPath)

            for j in range(len(lis)):
                name = e.xpath(pPath + '[' + str(j + 1) + ']' + '/div/div[2]/div[1]/div[1]/a/text()')
                layout = e.xpath(pPath + '[' + str(j + 1) + ']' + '/div/div[2]/div[2]/a/text()')
                address = e.xpath(pPath + '[' + str(j + 1) + ']' + '/div/div[2]/div[3]/div/a/text()')
                csvwriter.writerow([name, layout, address])


