import csv
import urllib3
import requests
import pandas as pd
from lxml import etree

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def get_info(url):

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
        "Cookie": "global_cookie=3sxhzc5oindnuxjqz0uj2cdiopyly58xin1; sfut=FF04A7B17F57FA05B47C03DA7B4425EC8244CF29AF40F029908498D0361670E8F4393E9FD1FA0AF31EF410D8EA5E8F1C8E34926D8794047B2BE0673C01C44B316C81BF0DA8F2EFF2F729CB05626F715F3D8FDE2674FA9F3555C7E68AB75D5239; city.sig=OGYSb1kOr8YVFH0wBEXukpoi1DeOqwvdseB7aTrJ-zE; __utmz=147393320.1720069309.10.5.utmcsr=lz.esf.fang.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=147393320.1708965444.1719975381.1720080721.1720097525.12; otherid=7b595a6d98b490f5f503c8c898b5cc80; __utmc=147393320; city=www; csrfToken=upPEX8LsZPzGFCHhq_Vc4hfB; resourceDetail=1; g_sourcepage=esf_fy%5Elb_pc; __utmb=147393320.75.10.1720097525; unique_cookie=U_n3w41yz88apheu7ohofrp5rjb1gly7acpe5*7; new_loginid=129783847; login_username=fang44946405260"}

    r = requests.get(url, headers=headers, verify=False, timeout=60)
    html = etree.HTML(r.text)

    total_price = html.xpath('//div[@class="tab-cont-right"]/div[1]/div[1]/div[1]/i/text()')

    style = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[1]/div[1]/text()')

    area = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[2]/div[1]/text()')

    unit_price =html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[3]/div[1]/text()')

    direction = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[1]/div[1]/text()')

    # 楼层情况分两种
    floor = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/text()')
    if len(floor)==0:
        floor = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/a[1]/text()')

    # 装修情况确定
    decoration =html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[3]/div[1]/a[1]/text()')

    local = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line"]/div[2]/div[2]/a[1]/text()')

    school = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line"]/div[3]')

    if len(school):
        school = 1
    else:
        school = 0

    # '建筑年代'
    Construction_Year = html.xpath('//div[@class="cont clearfix"]/div[1]/span[2]/text()')
    # / html / body / div[4] / div[2] / div[1] / div[2] / div[2] / div[1] / span[2]
    # '有无电梯'
    Elevator= html.xpath('//div[@class="cont clearfix"]/div[2]/span[2]/text()')
    # '产权性质'
    Property_Rights = html.xpath('//div[@class="cont clearfix"]/div[3]/span[2]/a[1]/text()')
    # '住宅类别'
    Residence_Type = html.xpath('//div[@class="cont clearfix"]/div[4]/span[2]/a[1]/text()')
    # '建筑结构'
    Building_Structure = html.xpath('//div[@class="cont clearfix"]/div[5]/span[2]/a[1]/text()')
    # '建筑类别'
    Building_Category = html.xpath('//div[@class="cont clearfix"]/div[6]/span[2]/a[1]/text()')
    # '挂牌时间'
    Time = html.xpath('//div[@class="cont clearfix"]/div[7]/span[2]/text()')

    # 列表转字符串
    total_price="".join(total_price)
    style="".join(style)
    area="".join(area)
    unit_price="".join(unit_price)
    direction="".join(direction)
    floor="".join(floor)
    decoration="".join(decoration)
    local="".join(local)
    # 学校是整型int

    Construction_Year="".join(Construction_Year)
    Elevator="".join(Elevator)
    Property_Rights="".join(Property_Rights)
    Residence_Type="".join(Residence_Type)
    Building_Structure="".join(Building_Structure)
    Building_Category="".join(Building_Category)
    Time="".join(Time)

    data = {'总价': total_price,
            '户型': style,
            '建筑面积': area,
            '单价': unit_price,
            '朝向': direction,
            '楼层': floor,
            '装修': decoration,
            '区域': local,
            '学校': school,
            '建筑年代': Construction_Year,
            '有无电梯': Elevator,
            '产权性质': Property_Rights,
            '住宅类别': Residence_Type,
            '建筑结构': Building_Structure,
            '建筑类别': Building_Category,
            '挂牌时间': Time}

    print(data)
    to_csv(data)

def to_csv(data):

    with open('house-test1.csv', 'a+', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        writer.writerow([data['户型'], data['建筑面积'], data['朝向'], data['楼层'], data['装修'],
                        data['建筑年代'], data['有无电梯'], data['产权性质'], data['住宅类别'],
                        data['建筑结构'], data['建筑类别'], data['挂牌时间'], data['区域'],
                        data['学校'], data['总价'], data['单价']])

if __name__ == '__main__':

    house = []
    # 填写你自己的路径
    f = open(R"D:\PYCHARM文件\zwz050418\main\urls2.txt")
    texts = f.readlines()
    for text in texts:
        house.append(text.rstrip())
    house = list(set(house))
    f.close()

    for i in range(0,len(house)):
        print(f' 正在爬取第{i+1}条信息...目标链接为:{house[i]}')
        get_info(house[i])

    print("-" * 50)
    print('爬取结束!!')

    df = pd.read_csv(r'D:\PYCHARM文件\zwz050418\main\house-test1.csv', encoding='utf-8')
    df.columns=['户型','建筑面积','朝向','楼层','装修','建筑年代','有无电梯','产权性质','住宅类别','建筑结构','建筑类别','挂牌时间','区域','学校','总价','单价']
    df.to_csv('house-test1.csv',encoding="utf-8",index=False)

    print("-" * 50)
    print('全部数据已经保存到csv文件中!!')