import requests
#导入header池
import random
import re
from pythonRobots.randomHeaders import user_agent_list
from bs4 import BeautifulSoup
from openpyxl import workbook

#链接
links=[]
#标题
titles=[]
#价格
prices=[]
#额外信息
dess=[]
#已存入的标题
pastTitle=[]

def get_data(url):
    headers = {"user-agent": random.choice(user_agent_list)}
    data=requests.get(url,headers=headers)
    # with open('test.html','w',encoding='utf-8')as f:
    #     f.write(data.text)
    soup=BeautifulSoup(data.text,'lxml')
    return soup


def prase_data(soup):
    for i in soup.find_all(class_='twoline'):
       title=i.get_text().strip()
       titles.append(title)
       i=str(i)
       linkorign=re.findall('href=\"(.*?)\"',i,re.S)
       link='https://hf.lianjia.com/zufang/'+linkorign[0]
       links.append(link)
       # print(link)


    #价格
    for i in soup.find_all(class_='content__list--item-price'):
        i=str(i)
        priceorign=re.findall('<em>(.*?)</em>(.*?)<',i)
        price=priceorign[0][0]+priceorign[0][1]
        prices.append(price)
        # print(price)

    for i in soup.find_all(class_='content__list--item--des'):
        i=str(i)
        j='<i>/</i>(.*?)<i>/</i>(.*?)<i>/</i>(.*?)<span\sclass="hide">.*?<i>/</i>'
        desorign=re.findall(j,i,re.S)
        if desorign==[]:
            j= '<i>/</i>(.*?)<i>/</i>(.*?)<i>/</i>(.*?)</p>'
            desorign = re.findall(j, i, re.S)
        des=desorign[0][0].strip()+' '+desorign[0][1].strip()+' '+desorign[0][2].strip()
        # print(des)
        dess.append(des)
    save_data(titles,links,prices,dess)
# for title1,link1,price1,des1 in zip(titles,links,prices,dess):
#     print(title1,link1,price1,des1)
def check_information(title1):
    for i in pastTitle:
        if title1==i:return 0
    return 1

def save_data(titles,links,prices,dess):
    for title1,link1,price1,des1 in zip(titles,links,prices,dess):
        if(check_information(title1)==1):
            ws.append([title1,link1,price1,des1])
            #去除重复信息
            pastTitle.append(title1)
    wb.save('链家出租房信息表.xlsx')

if __name__=='__main__':
    # 创建excel表格对象
    wb = workbook.Workbook()
    ws = wb.active  # 激活Excel表格
    ws.append(['标题', '链接', '价格', '额外信息'])

    url='https://hf.lianjia.com/zufang/'
    #下载前10页的信息
    for i in range(1,11):
        str1='pg'+str(i)
        url=url+str1
        soup=get_data(url)
        prase_data(soup)

