import requests,re,openpyxl,os
#请求头部信息
headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
	}

#保存数据文件
def save_file(data):
    wb = openpyxl.Workbook()
    ws = wb.active
    title = ['标题','图片连接','户型','价格']
    ws.append(title)
    for i in range(len(data)):
        print('正在保存第%d条数据...' % (i+1))
        ws.append(data[i])
    wb.save('./58同城房屋出租信息.xlsx')
#保存图片
def save_img(data):
    target = ['|',':','*','?','/','\\','<','>','"']
    #创建文件夹
    if not os.path.exists('./58同城图片'):
        os.makedirs('./58同城图片')
    #循环取出图片地址保存
    for item in range(len(data)):
        try:
            res = requests.get(data[item][1],headers=headers)
            #判断文件名内是否有非法字符,并替换
            title = data[item][0]
            for i in target:
                if i in title:
                    title = title.replace(i,'')
            with open('./58同城图片/'+title+'.png','wb') as f:
                print('正在保存第%d条图片...' % (item+1))
                f.write(res.content)
        except Exception as err:
            print('第%d条图片无法保存原因是:%s' % (item+1,err))
            pass
#提取html中的需要信息
def str_content(res):
    content_list = []
    title_pat = '<h2>.*?<a.*?tongji_label="listclick".*?>(.*?)                    </a>'
    img_pat = 'lazy_src="(.*?)"'
    room_pat = '<p class="room">(.*?)                    &nbsp;&nbsp;&nbsp;&nbsp;(.*?)</p>'
    price_pat = '<div class="money">.*?<b>(.*?)</b>'
    titile_list = re.compile(title_pat,re.S).findall(res)
    img_list = re.compile(img_pat,re.S).findall(res)
    room_list = re.compile(room_pat,re.S).findall(res)
    price_list = re.compile(price_pat,re.S).findall(res)
    for i in range(len(titile_list)):
        i_list = title,img,room,price = titile_list[i].strip(),img_list[i],room_list[i][0] +':' + room_list[i][1],price_list[i]+'元/月'
#        img = img_list[i]
#        room = room_list[i][0] +':' + room_list[i][1]
#        price = price_list[i]
        content_list.append(i_list)
    return content_list



#打开爬取信息的地址
def open_url(page):
    total_list = []

    for page in range(1,page):
        print('正在爬取第%d页信息...' % page)
        url = 'http://bj.58.com/dashanzi/chuzu/pn%d/?ClickID=1' % page
        res = requests.get(url,headers=headers)
        html = res.content.decode('utf-8')
        data_list = str_content(html)
        total_list.extend(data_list)
    print('='*10+'正在保存文件...'+'='*10)
    save_file(total_list)
    print('='*10+'文件保存完毕'+'='*10)
    print('='*10+'正在保存图片...'+'='*10)
    save_img(total_list)
    print('='*10+'图片保存完毕'+'='*10)


if __name__ == '__main__':
    page = int(input('请输出需要爬取的页数:')) + 1
    open_url(page)
