import requests, time, re
from lxml import etree
from concurrent.futures import ThreadPoolExecutor
import pandas as pd

class LjSpider:
    def __init__(self):
        self.headers = {'UserAgent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.16 Safari/537.36'}

    # 获取各大地区链接
    def get_block_basic(self):
        response = requests.get('https://qd.lianjia.com/ershoufang/', headers=self.headers)
        xml = etree.HTML(response.text)
        block_name = xml.xpath('//div[@data-role="ershoufang"]/div/a/text()')
        block_url = xml.xpath('//div[@data-role="ershoufang"]/div/a/@href')
        temp_ls = []
        for n, u in zip(block_name, block_url):
            temp_ls.append({'地区': n, '链接': u})
        self.get_block_page(temp_ls)
        return temp_ls

    # 进入各个地区页面获取页数
    def get_block_page(self, temp_ls):
        for dic in temp_ls:
            url_ls = []  # 存放各详情页url
            init_url = 'https://qd.lianjia.com'+dic['链接']+'co32/'
            response = requests.get(init_url, headers=self.headers)
            xml = etree.HTML(response.text)
            total_num = xml.xpath('//h2[@class="total fl"]/span/text()')[0]
            total_page = min(int(total_num) // 30, 100)
            dic['有效页数'] = total_page
            for i in range(total_page):
                if i > 0:
                    url_ls.append(init_url.replace('co32', f'pg{i+1}co32'))
                else:
                    url_ls.append(init_url)
            dic['链接'] = url_ls

    # 用于请求各页码的响应
    def req_page(self, url):
        page = re.findall(r'pg(\d*?)co32',url)
        if page == []:
            print('在爬第1页')
        else:
            print(f'在爬第{page[0]}页')
        response = requests.get(url, headers=self.headers)
        return response.text

    # 用于解析各房源链接
    def req_href(self, text):
        xml = etree.HTML(text)
        href = xml.xpath('//div[@class="title"]/a/@href')
        return href

    # 用于请求得到各房源详情
    def each_house(self, url):
        # 各个房源获取字段总价、面积
        # print(re.findall(r'/(\d*?)\.html',url))
        response = requests.get(url, headers=self.headers)
        xml = etree.HTML(response.text)
        try:
            price = ''.join(xml.xpath('//span[@class="total"]/text()'))
            square = ''.join(xml.xpath('//div[@class="area"]/div[1]/text()'))
            cx = ''.join(xml.xpath('//div[@class="type"]/div[1]/text()'))
            hx = ''.join(xml.xpath('//div[@class="room"]/div[1]/text()'))
            xq = ''.join(xml.xpath('//div[@class="communityName"]/a[@class="info "]/text()'))
            jq = ''.join(xml.xpath('//div[@class="areaName"]/span[@class="info"]/a/text()'))
            data = {'总价': price, '面积': square, '户型': hx, '朝向': cx, '小区': xq, '街区': jq}
            return data
        except Exception as e:
            print(e)
            url_fail.append(url)

def task():
    ljspider = LjSpider()
    info_ls = ljspider.get_block_basic() # 理论上100页3000条
    t0 = time.time()
    executor = ThreadPoolExecutor(max_workers=1000)
    block_ls = []
    for dic in info_ls:
        url_fail = []
        block = dic['地区']
        block_ls.append(block)
        print('开始爬取' + block)
        t1 = time.time()
        # 第一轮：多线程请求该区每一页url
        req_each_page = executor.map(ljspider.req_page, dic['链接'])
        response_ls = [i for i in req_each_page]
        print(f'    已获得{len(response_ls)}页响应')
        t2 = time.time()
        print(f'    请求每页url共耗时{round(t2 - t1, 2)}秒')
        # 第二轮：多线程解析该区每一页房源链接
        req_each_house = executor.map(ljspider.req_href, response_ls)
        each_house_href_ls = [i for i in req_each_house]
        t3 = time.time()
        print(f'    解析每页房源链接共耗时{round(t3 - t2, 2)}秒')
        # 第三轮：多线程请求该区每一页房源
        each_detail_ls = []
        for url_li in each_house_href_ls:
            req_each_href = executor.map(ljspider.each_house, url_li)
            each_detail_ls += [i for i in req_each_href]
        t4 = time.time()
        for i in each_detail_ls:
            i['地区'] = block
        print(f'    请求每页房源耗时{round(t4 - t3, 2)}秒')
        # 最后保存
        print(f'    开始存储数据，数据量={len(each_detail_ls)}')
        print(url_fail)
        df = pd.DataFrame(each_detail_ls + url_fail)
        df = df[['地区', '街区', '小区', '总价', '面积', '户型', '朝向']]
        df.fillna('', inplace=True)
        df.to_csv(f'{block}.csv')
        print()
    print(f'爬取总时间={round((time.time() - t0) / 60, 2)}分')
    return block_ls

def combine_csv(block_ls):
    df = pd.DataFrame([])
    for block in block_ls:
        df0 = pd.read_csv(f'{block}.csv')
        df = pd.concat([df,df0],axis=0)
    df.index = range(len(df))
    return df

if __name__ == '__main__':
    block_ls = task()
    df = combine_csv(block_ls)
    print(df)