import requests
from bs4 import BeautifulSoup
import csv
import time
import random
from lxml import html
etree = html.etree

def get_html(url):
    # 用的代理 ip，如果被封的，在http://www.xicidaili.com/换一个，即 ip:端口
    proxy_addr = {'http': '220.168.52.245:55255'}
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
    try:
        html = requests.get(url, headers=headers, proxies=proxy_addr).text
        return html
    except BaseException:
        print('request error')
        pass


def RoomInfo(html):
    soup = BeautifulSoup(html, 'lxml')
    data = []#新建一个列表，用于存储某一页所有的房租信息
    global title1 ,title2,title3, styles, squares, prices
    for i in range(1, 31):#每一页有30条信息，经查看网页源码，发现了标签的规律，通过标签找到想要的内容
        info = etree.HTML(html)
        c1= '#content > div.content__article > div.content__list > div:nth-child('+str(i)+') > div > p.content__list--item--des > a:nth-child(1)'
        c2 = '#content > div.content__article > div.content__list > div:nth-child(' + str(i) + ') > div > p.content__list--item--des > a:nth-child(2)'
        c3 = '#content > div.content__article > div.content__list > div:nth-child(' + str(i) + ') > div > p.content__list--item--title.twoline > a'
        c4 = '//*[@id="content"]/div[1]/div[1]/div['+ str(i) +']/div/p[2]/text()[4]'
        c5 = '//*[ @ id = "content"]/div[1]/div[1]/div['+ str(i) +']/div/p[2]/text()[6]'
        c6 = '#content > div.content__article > div.content__list > div:nth-child('+ str(i) +') > div > span'
        #使用beautifulsoup或xpath读取标签内容
        for title1 in soup.select(c1):
            print("title1:")
            print(title1.get_text().strip())
        for title2 in soup.select(c2):
            print("title2:")
            print(title2.get_text().strip())
        title3 = soup.select(c3)
        squares = info.xpath(c4)
        styles = info.xpath(c5)
        prices = soup.select(c6)
        #将所有内容存储到infos字典中，然后将字典（30个）存储到data列表里，统一回传
        for tl1,tl2,tl3,st,sq,pr in zip( title1 ,title2,title3, styles, squares, prices):
            infos = {}
            tl="%s%s%s"%(tl1,'-',tl2)
            str_list = tl3.get_text().strip().split()
            titles = "%s%s%s"%(tl,'-',str_list[0])# 出租房屋标题，三个字段分两次拼接起来
            infos['地段'] = titles
            style = st.strip()    # 出租房屋户型
            infos['户型'] = style
            square = sq.strip()   # 出租房屋面积
            infos['面积（平方）'] = square
            price = pr.get_text().strip()    # 出租房屋房租
            infos['房租（元/月）'] = price
            data.append(infos)
    return data


def write2csv(url, data):
    name = url.split('/')[-3]
    print('正在把数据写入{}文件'.format(name))    # 以链接中的地区拼音给文件命名
    with open('E:\\zufang\\{}.csv'.format(name), 'a', newline='', encoding='utf-8-sig') as f:
        fieldnames = ['地段', '户型', '面积（平方）', '房租（元/月）']  # 控制列的顺序
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        #writer.writeheader()#省掉表格里的标题
        writer.writerows(data)
        print("写入成功")

#区域可以根据需要更改，例如爬上海的数据，把76、78行的网页链接中cq改成sh就行，然后rare列表改成上海每个区的拼音，具体看链家网站
for area in ['jiangbei', 'yubei', 'nanan', 'banan', 'shapingba', 'jiulongpo', 'yuzhong']:#, 'dadukou', 'jiangjing','dianjiangxian', 'wuxixian', 'pengshuimiaozutujiazuzizhixian', 'youyangtujiazumiaozuzizhixian','zhongxian', 'kaizhouqu', 'fengjiexian', 'rongchangqu','wushanxian1','xiushantujiazumiaozuzizhixian', 'chengkouxian','all'
    base_url = 'https://cq.lianjia.com/zufang/{}/pg1/'.format(area)
    for page in range(1, 91):
        url = 'https://cq.lianjia.com/zufang/{}/pg{}/'.format(area, page)
        #print(url)
        html = get_html(url)
        #print(html)
        data = RoomInfo(html)
        write2csv(url, data)
        time.sleep(int(format(random.randint(0, 5))))