"""
- 日期: 2024/07/05
- 人员: 罗建明
- 内容:
    内容暂定，后续会持续更新
"""
from bs4 import BeautifulSoup
import requests
import pandas as pd

def extract_numbers(s):
    numbers = ""
    for char in s:
        if char.isdigit():  # 检查字符是否为数字
            numbers += char
    return numbers
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'}
urls = ["https://lz.esf.fang.com", "https://lz.esf.fang.com/house/i32"]
url_all = []
Building_Total = []
Building_Height = []
Building_Interior_Layout = []
Building_Area = []
Building_Floor = []
Building_Orientation = []
Building_Price_Right = []
Building_Resource = []
Building_Price = []
Building_Decoration = []
Building_Property_Type = []
Building_Type = []
Building_Residential_Category =[]
Building_Architectural_Structure = []
Building_Category = []
Building_Elevator = []
Building_Age = []
Building_Region = []
for url in urls:
    content = requests.get(url, headers=headers).text
    soup = BeautifulSoup(content, "html.parser")
    all_span=soup.find_all('span',attrs={'class':'red'})
    all_h4 = soup.find_all('h4')
    all_dl=soup.find_all('dl')
    all_dd = soup.find_all('dd')
    all_dd_price = soup.find_all('dd', attrs={'class': 'price_right'})
    for span in all_span:
        Building_Price.append(span.b.string)
    for price in all_dd_price:
        price = price.find_all('span')
        Building_Price_Right.append(price[1].string)  # 均价)
    for h4 in all_h4:
        a_all = h4.find_all('a')
        if h4.find('a'):
            url_h4=h4.find('a')['href']
            url_all.append(url+url_h4)
        if len(a_all) >= 1:
            a = a_all[0]
        if a:
            all_total_span = a.find_all('span', attrs={'class': 'tit_shop'})
            for span in all_total_span:
                Building_Total.append(span.contents)  # 总览
    for dd in all_dd:
        p = dd.find_all('p')
        if len(p) >= 1:
            dd_p0 = p[0]
        if dd_p0 and len(p) >= 1:
            contents = dd_p0.contents
            S = []
            for s in contents:
                if isinstance(s, str):
                    S.append(s)
            Building_Interior_Layout.append(S[0])
            Building_Area.append(S[1])
            Building_Floor.append(extract_numbers(S[2]))
            Building_Orientation.append(S[3])
            all_height_a = dd_p0.find_all('a', attrs={'class': 'link_rk'})
            for a in all_height_a:
                Building_Height.append(a.string)  # 楼层
    for dl in all_dl:
        dd = dl.find_all('dd')
        if dd and dd != []:
            ps = dd[0].find_all('p')
            if ps and ps != []:
                Building_Resource.append(ps[1].a.string)
            else:
                Building_Resource.append('数据缺失')
for urll in url_all[0:60]:
    content = requests.get(urll, headers=headers).text
    soup = BeautifulSoup(content, "html.parser")
    print(urll)
    all_div = soup.find_all('div',attrs={'class':'trl-item1 w132'})
    all_reg = soup.find_all('div',attrs={'class':'trl-item2 clearfix'})
    div_div_pro = soup.find_all('div',attrs={'class':'text-item clearfix'})
    for i in all_reg:
        all_reg_pro = i.find('div', attrs={'class': 'rcont'})
        if len(all_reg_pro) >= 1:
            Building_Region.append(all_reg_pro.a.string)
        else:
            Building_Region.append('数据缺失')
    x = 0
    if len(div_div_pro) >= 1:
        if extract_numbers(div_div_pro[x].contents[1].string):
            Building_Age.append(div_div_pro[0].contents[1].string)
            x = 1
        else:
            Building_Age.append('数据缺失')
    if len(div_div_pro) >= x+1 and div_div_pro[x].contents[0].string == '有无电梯':
        Building_Elevator.append(div_div_pro[x].contents[1].string)
    else:
        Building_Elevator.append('数据缺失')
    if len(div_div_pro) >= x+2 and div_div_pro[x+1].contents[0].string == '产权性质':
        Building_Property_Type.append(div_div_pro[x+1].contents[1].string)
    else:
        Building_Property_Type.append('数据缺失')
    if len(div_div_pro) >= x+3:
        Building_Residential_Category.append(div_div_pro[x+2].contents[1].string)
    else:
        Building_Residential_Category.append('数据缺失')
    if len(div_div_pro) >= x+4:
        Building_Architectural_Structure.append(div_div_pro[x+3].contents[1].string)
    else:
        Building_Architectural_Structure.append('数据缺失')
    if len(div_div_pro) >= x+5:
        Building_Category.append(div_div_pro[x+4].contents[1].string)
    else:
        Building_Category.append('数据缺失')
    for i in all_div:
        dec = i.div
        if dec.a:
            Building_Decoration.append(dec.a.string)
        else:
            Building_Decoration.append('数据缺失')
data = {
    '房源': Building_Resource[0:60],
    '区域': Building_Region[0:60],
    '建筑年代': Building_Age[0:60],
    '电梯': Building_Elevator[0:60],
    '产权性质': Building_Property_Type[0:60],
    '住宅类别': Building_Residential_Category[0:60],
    '建筑结构': Building_Architectural_Structure[0:60],
    '建筑类别': Building_Category[0:60],
    '楼层': Building_Height[0:60],
    '布局': Building_Interior_Layout[0:60],
    '面积': Building_Area[0:60],
    '层数': Building_Floor[0:60],
    '朝向': Building_Orientation[0:60],
    '装修': Building_Decoration[0:60],
    '单价(元/m²)': Building_Price_Right[0:60],
    '总价（万元）': Building_Price[0:60]
}
df = pd.DataFrame(data)
df.to_csv("Buildings_finall.csv",encoding='utf_8_sig',index=False)
print(df)