import urllib3
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup
from requests_html import UserAgent

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 请求头
payload = {}
headers = {'User-Agent': ''}
count=1

def get_info(url):
  user_agent = UserAgent().random  # 创建随机请求头
  headers = {"User-Agent": user_agent}

  # 发送GET请求获取到网页的响应对象
  response = requests.request("GET", url, headers=headers, data=payload)
  # 创建BeautifulSoup对象，'html.parser'是BeautifulSoup库内置的HTML解析器
  soup = BeautifulSoup(response.text,'html.parser')
  # 抽取全部房屋信息，所有符合条件的<dl>标签,即所有详情页url
  all_dl = soup.find_all('dl',attrs={'class':'clearfix','dataflag':'bg'})

  infoo_list = []

  for dl in all_dl:
    global count
    print(f' 正在爬取第{count}条信息...')

    # 组装具体房子的详情页url,找到a标签下的href元素。
    href = 'https://bj.esf.fang.com'+dl.find('h4',class_='clearfix').find('a').get('href')
    response1 = requests.request("GET", href, headers=headers, data=payload)
    html = etree.HTML(response1.text)
    soup1 = BeautifulSoup(response1.text,'html.parser')

    price = html.xpath('//div[@class="tab-cont-right"]/div[1]/div[1]/div[1]/i/text()')

    house_type = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[1]/div[1]/text()')

    house_area = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[2]/div[1]/text()')

    price_area =html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[3]/div[1]/text()')

    house_aspect = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[1]/div[1]/text()')

    # 楼层情况分两种
    house_floot = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/text()')
    if len(house_floot)==0:
        house_floot = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/a[1]/text()')

    # 装修情况确定
    house_level =html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[3]/div[1]/a[1]/text()')

    local = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line"]/div[2]/div[2]/a[1]/text()')

    house_info_list = soup1.find('div',class_='cont clearfix')

    sub_title = [info.text for info in house_info_list.find_all('span',class_='lab')]

    sub_content = [info.text for info in house_info_list.find_all('span',class_='rcont')]

    price=("".join(price) + "万") or "无"
    house_type="".join(house_type) or "无"
    house_area="".join(house_area) or "无"
    price_area="".join(price_area) or "无"
    house_aspect="".join(house_aspect) or "无"
    house_floot="".join(house_floot) or "无"
    house_level="".join(house_level) or "无"
    local="".join(local) or "无"

    dictt = {'建筑年代':'无', '有无电梯':'无', '产权性质':'无' ,'住宅类别':'无' ,'挂牌时间':'无', '建筑形式':'无', '车库数量': '无','车位数量': '无','建筑结构':'无'}
    for i in range(len(sub_title)):
      if sub_title[i] in dictt.keys():
        dictt[sub_title[i]]=sub_content[i]

    print(href,price,house_area,house_type,price_area,house_floot,house_aspect,house_level,local,
          dictt['建筑年代'],dictt['有无电梯'],dictt['产权性质'],dictt['住宅类别'], dictt['建筑结构'],dictt['挂牌时间'],dictt['建筑形式'],
          dictt['车库数量'],dictt['车位数量'])

    infoo_list.append([href,price,house_area,house_type,price_area,house_floot,house_aspect,house_level,local,
          dictt['建筑年代'],dictt['有无电梯'],dictt['产权性质'],dictt['住宅类别'], dictt['建筑结构'],dictt['挂牌时间'],dictt['建筑形式'],
          dictt['车库数量'],dictt['车位数量']])
    count+=1


  df = pd.DataFrame(infoo_list,
                      columns=['网址', '价格', '建筑面积', '户型', '单价', '楼层', '朝向', '装修程度', "地区", '建筑年代',
                               '有无电梯', '产权性质', '住宅类别', '建筑结构', '挂牌时间', '建筑形式', '车库数量', '车位数量'])
  df.to_csv(R'D:\PYCHARM文件\zwz050418\main\house-info.csv', mode='a+', index=False, header=False, encoding='utf_8_sig')


if __name__ == '__main__':
    for j in range(1, 5):
        for i in range(1,101):
          url = f'https://bj.esf.fang.com/house-a0{j}/i3{i}/'
          try:
            get_info(url)
          except Exception as e:
            print(e)
            get_info(url)
    print("爬起结束!!!")

    df = pd.read_csv(R'D:\PYCHARM文件\zwz050418\main\house-info.csv', encoding='utf-8-sig')
    df.columns=['网址', '总价', '建筑面积', '户型', '单价', '楼层', '朝向', '装修程度', "地区", '建筑年代','有无电梯',
                '产权性质', '住宅类别', '建筑结构', '挂牌时间', '建筑形式', '车库数量', '车位数量']
    df.to_csv(R'D:\PYCHARM文件\zwz050418\main\house-info.csv', encoding='utf-8-sig', index=False)
    print("文件已保存csv!!!")