import urllib3
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


# headers = {
#     'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
#     'accept-language': 'zh-CN,zh;q=0.9',
#     'cache-control': 'max-age=0',
#     'cookie': 'global_cookie=xzhufzmfe5m344apv27uvbe9o1yly6wqwlk; __utmz=147393320.1720075849.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); g_sourcepage=esf_fy%5Elb_pc; otherid=8d1d74b7d661d2371182acfc686afb73; __utma=147393320.1817530191.1720075849.1720075849.1720093116.2; __utmc=147393320; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; city=www; city.sig=OGYSb1kOr8YVFH0wBEXukpoi1DeOqwvdseB7aTrJ-zE; csrfToken=lZfo5bPxAeO8YlXkvqVwgbQ8; __utmb=147393320.6.10.1720093116; unique_cookie=U_5hufnio6py8zd6667nka28rgo1lly7710o9*2',
#     'priority': 'u=0, i',
#     'referer': 'https://lz.esf.fang.com/',
#     'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
#     'sec-ch-ua-mobile': '?0',
#     'sec-ch-ua-platform': '"macOS"',
#     'sec-fetch-dest': 'document',
#     'sec-fetch-mode': 'navigate',
#     'sec-fetch-site': 'same-site',
#     'sec-fetch-user': '?1',
#     'upgrade-insecure-requests': '1',
#     'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
# }

# 请求头部信息
payload = {}
headers2 = {'cookie': 'global_cookie=xzhufzmfe5m344apv27uvbe9o1yly6wqwlk; __utmz=147393320.1720075849.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); g_sourcepage=esf_fy%5Elb_pc; otherid=8d1d74b7d661d2371182acfc686afb73; __utma=147393320.1817530191.1720075849.1720075849.1720093116.2; __utmc=147393320; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; city=www; city.sig=OGYSb1kOr8YVFH0wBEXukpoi1DeOqwvdseB7aTrJ-zE; csrfToken=lZfo5bPxAeO8YlXkvqVwgbQ8; __utmb=147393320.6.10.1720093116; unique_cookie=U_5hufnio6py8zd6667nka28rgo1lly7710o9*2',
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'}
count=1

def cut(list):
  if len(list):
    list = list[0]
  else:
    list = ''
  return list


def get_info(url):
  # 发送GET请求获取到网页的响应对象
  response = requests.request("GET", url, headers=headers2, data=payload)
  # 创建BeautifulSoup对象，'html.parser'是BeautifulSoup库内置的HTML解析器
  soup = BeautifulSoup(response.text,'html.parser')
  # 抽取全部房屋信息，所有符合条件的<dl>标签,即所有详情页url
  all_dl = soup.find_all('dl',attrs={'class':'clearfix','dataflag':'bg'})

  infoo_list = []

  for dl in all_dl:
    global count
    print(f' 正在爬取第{count}条信息...')

    # 组装具体房子的详情页url,找到a标签下的href元素。
    href = 'https://bj.esf.fang.com'+dl.find('h4',class_='clearfix').find('a').get('href')
    response1 = requests.request("GET", href, headers=headers2, data=payload)
    html = etree.HTML(response1.text)
    soup1 = BeautifulSoup(response1.text,'html.parser')


    price = html.xpath('//div[@class="tab-cont-right"]/div[1]/div[1]/div[1]/i/text()')

    house_type = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[1]/div[1]/text()')

    house_area = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[2]/div[1]/text()')

    price_area =html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[3]/div[1]/text()')

    house_aspect = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[1]/div[1]/text()')

    # 楼层情况分两种
    house_floot = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/text()')
    if len(house_floot)==0:
        house_floot = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/a[1]/text()')

    # 装修情况确定
    house_level =html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[3]/div[1]/a[1]/text()')

    local = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line"]/div[2]/div[2]/a[1]/text()')

    # # 'div'标签下 class_ 为 'trl-item price_esf sty1'的元素
    # price = soup1.find('div',class_='trl-item price_esf sty1').get_text()
    #
    # # house_type= soup1.find_all('div',class_='trl-item1 w146')[0].get_text().replace('户型','')
    # house_type = soup1.find_all('div', class_='trl-item1 w146')[0].get_text().split()
    # house_type = cut(house_type)
    #
    # # house_area= soup1.find_all('div',class_='trl-item1 w182')[0].get_text().replace('建筑面积','')
    # house_area = soup1.find_all('div', class_='trl-item1 w182')[0].get_text().split()
    # house_area = cut(house_area).replace('建筑面积','')
    #
    #
    # # price_area= soup1.find_all('div',class_='trl-item1 w132')[0].get_text().replace('单价','')
    # price_area = soup1.find_all('div', class_='trl-item1 w132')[0].get_text().split()
    # price_area = cut(price_area)
    #
    # # house_aspect = soup1.find_all('div', class_='trl-item1 w146')[1].get_text().replace('进门朝向', '')
    # house_aspect = soup1.find_all('div', class_='trl-item1 w146')[1].get_text().split()
    # house_aspect = cut(house_aspect).replace('进门朝向', '')
    #
    # # # house_floot = soup1.find_all('div', class_='trl-item1 w182')[1].get_text().replace('地上层数（共2层）', '')
    # # house_floot = soup1.find_all('div', class_='trl-item1 w182')[1].get_text().split()
    # # house_floot = cut(house_floot)
    #
    # # # house_level = soup1.find_all('div', class_='trl-item1 w132')[1].get_text().replace('装修程度', '')
    # # house_level = soup1.find_all('div', class_='trl-item1 w132')[1].get_text().split()
    # # house_level = cut(house_level).replace('装修程度', '')

    house_info_list = soup1.find('div',class_='cont clearfix')
    # print(house_info_list)

    sub_title = [info.text for info in house_info_list.find_all('span',class_='lab')]
    # print(sub_title)

    sub_content = [info.text for info in house_info_list.find_all('span',class_='rcont')]
    # print(sub_content)

    price=("".join(price) + "万") or "无"
    house_type="".join(house_type) or "无"
    house_area="".join(house_area) or "无"
    price_area="".join(price_area) or "无"
    house_aspect="".join(house_aspect) or "无"
    house_floot="".join(house_floot) or "无"
    house_level="".join(house_level) or "无"
    local="".join(local) or "无"


    dictt = {'建筑年代':'无', '有无电梯':'无', '产权性质':'无' ,'住宅类别':'无' ,'挂牌时间':'无', '建筑形式':'无', '车库数量': '无','车位数量': '无','建筑结构':'无'}
    for i in range(len(sub_title)):
      if sub_title[i] in dictt.keys():
        dictt[sub_title[i]]=sub_content[i]

    #print(dictt)
    print(href,price,house_area,house_type,price_area,house_floot,house_aspect,house_level,local,
          dictt['建筑年代'],dictt['有无电梯'],dictt['产权性质'],dictt['住宅类别'], dictt['建筑结构'],dictt['挂牌时间'],dictt['建筑形式'],
          dictt['车库数量'],dictt['车位数量'])

    infoo_list.append([href,price,house_area,house_type,price_area,house_floot,house_aspect,house_level,local,
          dictt['建筑年代'],dictt['有无电梯'],dictt['产权性质'],dictt['住宅类别'], dictt['建筑结构'],dictt['挂牌时间'],dictt['建筑形式'],
          dictt['车库数量'],dictt['车位数量']])
    count+=1


  df = pd.DataFrame(infoo_list,
                      columns=['网址', '价格', '建筑面积', '户型', '单价', '楼层', '朝向', '装修程度', "地区", '建筑年代',
                               '有无电梯', '产权性质', '住宅类别', '建筑结构', '挂牌时间', '建筑形式', '车库数量', '车位数量'])
  df.to_csv(R'D:\PYCHARM文件\zwz050418\main\house-info.csv', mode='a+', index=False, header=False, encoding='utf_8_sig')


if __name__ == '__main__':
    for j in range(1, 5):
        for i in range(1,101):
          url = f'https://bj.esf.fang.com/house-a0{j}/i3{i}/'
          try:
            get_info(url)
          except Exception as e:
            print(e)
            get_info(url)
    print("爬起结束!!!")

    df = pd.read_csv(R'D:\PYCHARM文件\zwz050418\main\house-info.csv', encoding='utf-8-sig')
    df.columns=['网址', '总价', '建筑面积', '户型', '单价', '楼层', '朝向', '装修程度', "地区", '建筑年代','有无电梯',
                '产权性质', '住宅类别', '建筑结构', '挂牌时间', '建筑形式', '车库数量', '车位数量']
    df.to_csv(R'D:\PYCHARM文件\zwz050418\main\house-info.csv', encoding='utf-8-sig', index=False)
    print("文件已保存csv!!!")