import requests
from bs4 import BeautifulSoup
from pprint import pprint
import time
import random
import xlsxwriter
import os
import shutil
# pip install fake_useragent
from fake_useragent import UserAgent

class SpiderLianJiaZuFangData():
    def __init__(self):
        self.url = 'https://xm.lianjia.com/zufang/'
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Host': 'xm.lianjia.com',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': UserAgent().chrome
        }
        self.exportPath = './document/excel/'

    def __main__(self, page_num):
        '''主程序'''
        print('链家网租房数据爬虫程序开始执行，请稍后。。。')
        house_list = []
        if page_num <=1:
            house_list.extend(self.__get_one__(self.url))
        else:
            for page in range(page_num):
                url = self.url + 'pg{}/#contentList'.format(page)
                house_list.extend(self.__get_one__(url))
                time.sleep(random.randint(3, 5))
        self.__export_excel__(house_list)
    def __get_one__(self, url):
        '''获取某一页的数据'''
        response = requests.get(self.url, self.headers)
        bs = BeautifulSoup(response.content.decode(errors='ignore'), 'lxml')
        house_element_list = bs.select('#content > .content__article > .content__list > .content__list--item')
        house_list = []
        for house_element in house_element_list:
            advertisement_element = house_element.select_one('div > .content__list--item--aside > p')
            if advertisement_element is not None:
                continue
            else:
                title = house_element.select('div > div > p')[0].select_one('p > a').text
                title = title.replace('\n', '').lstrip().rstrip()
                cover = house_element.select_one('div > .content__list--item--aside > img')['data-src']
                des = house_element.select_one('div > div > .content__list--item--des').text
                des = des.replace(' ', '').replace('\n', '')
                des_arry = des.split('/')
                labels = []
                label_elements = house_element.select('div > div > p')[2].select('p > i')
                if len(label_elements) > 0 :
                    for label_element in label_elements:
                        labels.append(label_element.text)
                house_list.append({
                    'title': title,
                    'cover': cover,
                    'address': des_arry[0],
                    'space': des_arry[1],
                    'orientation': des_arry[2],
                    'layout': des_arry[3],
                    'floor': des_arry[4],
                    'labels': labels,
                })
        return house_list

    def __export_excel__(self, data_list):
        '''导出excel'''
        # 下载房屋封面，插入excel之后自动删除
        img_download_path = './images/temp/'
        for data in data_list:
            img_download_path_first = './images/'
            self.__create_dir__(img_download_path, img_download_path_first)
            image_res = requests.get(data['cover'])
            if image_res.ok is True and image_res.status_code == 200:
                image_content = image_res.content
                image_name = data['cover'].split('!')[0].split('/')[-1]
                if len(image_name.split('?')) > 0:
                    image_name = image_name.split('?')[0]
                with open(img_download_path + image_name.split("&")[0], 'wb') as file:
                    file.write(image_content)
                    file.close()
                data['image_name'] = image_name
                time.sleep(2)
        excel_download_path = './document/lianjia/'
        excel_download_path_first = './document/'
        self.__create_dir__(excel_download_path, excel_download_path_first)
        wb = xlsxwriter.Workbook(excel_download_path + '链家网-租房信息.xlsx')
        ws = wb.add_worksheet('租房信息')
        head_styles = wb.add_format({
            'font_size': 14,
            'bold': True,
            'color': 'white',
            'align': 'center',
            'valign': 'vcenter',
            'text_wrap': False,
            'border': 1,
            'bg_color': '#33ccff',
        })
        # 设置行高
        ws.set_row(0, height=40)
        ws.write_row(0, 0, ['标题', '封面', '地址', '面积', '朝向', '布局', '楼层', '标签'], head_styles)
        content_styles = wb.add_format({
            'font_size': 10,
            'bold': False,
            'color': 'black',
            'align': 'center',
            'valign': 'vcenter',
            # 设置自动换行
            'text_wrap': True,
            'border': 1,
        })
        row = 0
        for data in data_list:
            lable_info = ''
            labels = data['labels']
            if len(labels) > 0:
                for label in labels:
                    lable_info = lable_info + '、' + label
                lable_info = lable_info[1:]
            row = row + 1
            # 设置行高
            ws.set_row(row, height=100)
            ws.write_row(row, 0, [data['title'], None, data['address'], data['space'], data['orientation'], data['layout'], data['floor'], lable_info], content_styles)
            ws.insert_image(row, 1, img_download_path + data['image_name'], options={'x_scale': 0.72, 'y_scale': 0.73})
        # 设置列宽，必须在插入数据之后设置才能生效
        ws.set_column(0, row, width=30)
        ws.set_column(1, row, width=25)
        ws.set_column(2, row, width=30)
        ws.set_column(3, row, width=10)
        ws.set_column(4, row, width=8)
        ws.set_column(5, row, width=12)
        ws.set_column(6, row, width=14)
        ws.set_column(7, row, width=30)
        wb.close()
        # 删除临时文件夹
        shutil.rmtree(img_download_path)
    def __create_dir__(self, dir, first_dir):
        # 判断该文件夹是否存在
        if os.path.exists(first_dir) is False:
            # 不存在则创建
            os.mkdir(first_dir)
            os.mkdir(dir)
        elif os.path.exists(dir) is False:
            os.mkdir(dir)

if __name__ == '__main__':
    spider = SpiderLianJiaZuFangData()
    spider.__main__(1)
