# coding=utf-8
import requests
import xlwt

from bs4 import BeautifulSoup
from lxml import etree


def test_data():
    loupanId = []
    for i in range(1, 150):
        url = "https://map.fang.com/ajaxSearch.html?city=sh&type=esf&xfPrice=&esfPrice=&zfPrice=&room=&area=&purpose=0&keyword=&zoom=4&newcodeType=&newcode=&pagingType=&pagingNum=%d&x1=-91.76571662190372&x2=91.76571662190372&y1=-3.480091278123224&y2=3.480091278123224&subwayline=&subwaystation=&issamllcity=0" % (
            i)
        r1 = requests.get(url, timeout=100)
        result = r1.json()['data']['esflist']['fangyuanList']['hit']
        for i in result:
            loupanId.append(i['projcode'])
    print(len(loupanId))
    workbook = xlwt.Workbook()
    sheet1 = workbook.add_sheet('ID')
    m = 0
    for k in loupanId:
        sheet1.write(m, 0, k)
        m += 1
    workbook.save('fangtianxia2.xls')

def get_url(url):
    """
        封装方法用来爬取网页的源代码
        :param url: 页面访问链接
        :return: 网页源代码
        """
    try:
        hd = {'user-agent': 'chorme/10'}
        r = requests.get (url, timeout=30, headers=hd)
        r.raise_for_status ()  # 容错机制，若请求访问失败则返回的不是200，则返回字符串空
        r.encoding = r.apparent_encoding  # 设置编码方式，用解析返回网页源码得出的编码方式代替  UTF-8
        return r.text
    except Exception as e:
        return e

def getinfo():
    loupanname = "projname: '世茂滨江花园',"
    district = "district: '浦东',"
    address = "address:'潍坊西路1弄、潍坊西路2弄',"
    fangwunum = "<span>房屋总数</span> <p>5661户</p>"
    bulidyears = "<span>建筑年代</span> <p>2006年建成</p>"
    url = "https://sh.esf.fang.com/loupan/1210012000/housedetail.htm"
    r = requests.get(url)
    etree_html = etree.HTML(r.text)
    aa = etree_html.xpath("/html/body/div[3]/div[4]/div[1]/div[1]/div/ul/li[16]/p/a/text()")
    print(aa)
    loupan_soup = BeautifulSoup(get_url(url), 'lxml')
    infolist = loupan_soup.find_all('p')
    print(infolist)
if __name__ == '__main__':
    getinfo()
