import csv
from html.parser import HTMLParser
from lxml.html import fromstring, tostring
import requests
import time
from lxml import etree


# 获取每一页的url
def get_url(url):
    all_url = []
    for i in range(1, 101):
        all_url.append(url + 'pg' + str(i) + '/')
    # 打印所有url
    print(all_url)
    return all_url


# 获取每一套房子详情信息的url
def get_house_url(all_url, headers):
    num = 0
    # 简单统计页数
    # 从pg1到pg100
    for i in all_url:
        #  获取每一页的url
        r = requests.get(i, headers = headers)
        # 打印状态码 <Response [200]>
        # print(r)

        #  解析
        html = etree.HTML(r.text)
        #  打印 <Element html at 0x26844c02c80>,应该是这里出了问题，需要进行转码
        # print(html)

        '''
        以下为新增内容    
        '''
        html_str = etree.tostring(html, method='html', encoding='utf-8').decode('utf-8')
        print(html_str)

        # 获取房子的url
        url_ls = html.xpath("//ul[@class='listContent']/li/a/@href")

        print(url_ls)
        analysis_html(url_ls, headers)
        time.sleep(4)
        print("第%s页爬完了" % i)
        num += 1


# 获取每一套房子的详情信息
def analysis_html(url_ls, headers):
    for i in url_ls:
        # num 记录爬取所成功的索引值
        r = requests.get(i, headers = headers)
        html = etree.HTML(r.text)
        # 获取房名
        # name = (html.xpath("//div[@class = 'wrapper']/text()"))[0].split()
        name = (html.xpath("//div[@class = 'title']/a/text()"))[0].split()

        # 获取价格
        # money = (html.xpath("//span[@class = 'dealTotalPrice']/i/text()"))
        money = (html.xpath("//div[@class = 'totalPrice']/text()"))


        area = html.xpath("//div[@class = 'deal-bread']/a/text()")[2]
        # 获取房子基本属性
        data = html.xpath("//div[@class = 'content']/ul/li/text()")
        save_data(name, money, area, data)


# 保存数据
def save_data(name, money, area, data):
    result = [name[0]] + money + [area] + data
    print(result)

    with open(r'新的二手房房价2.csv', 'a', encoding='utf-8_sig', newline='') as f:
        wt = csv.writer(f)
        wt.writerow(result)
        print("已写入")
        f.close()


if __name__ == '__main__':
    url = 'https://xm.lianjia.com/chengjiao/'
    headers = {

        "Upgrade-Insecure-Request": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
    }

    all_url = get_url(url)

    with open(r"新的二手房房价2.csv", 'a', encoding='utf_8_sig', newline='') as f:
        table_label = ['小区名', '价格/万', '地区', '房屋户型', '所在楼层', '建筑面积', '户型结构', '套内面积',
                       '建筑类型', '房屋朝向'
            , '建成年代', '装修情况', '建筑结构', '供暖方式', '梯户比例', '产权年限', '配备电梯', '链家编号',
                       '交易权属', '挂牌时间', '房屋用途'
            , '房屋年限', '房权所属']

        wt = csv.writer(f)
        wt.writerow(table_label)
    get_house_url(all_url, headers)

