import asyncio
import aiohttp
from lxml import etree
from pymongo import MongoClient


def download_completed_callback(task_obj):
    client = MongoClient("127.0.0.1", 27017)
    clients = 'mongodb://python:python@localhost:27017/database?authSource=admin&authMechanism=SCRAM-SHA-1'
    client.test.authenticate('python', 'python', mechanism='SCRAM-SHA-1', source='admin')
    collection = client["test"]["t3"]
    print("下载的内容为:", task_obj.result())
    for s in task_obj.result():
        html = etree.HTML(s)
        ons = html.xpath("//li[@class='items clearfix']")
        for i in ons:
            tsb = ''
            s = ''
            ctt = i.xpath(".//div[@class='list-main fl']")
            # 房名
            name = ctt[0].xpath(".//div[@class='list-main-header clearfix']//a/text()")
            # 户型
            hx = ctt[0].xpath(".//div[@class='house-metas clearfix']//p[@class='meta-items'][1]/text()")[0]
            # 面积
            mj = ctt[0].xpath(".//div[@class='house-metas clearfix']//p[@class='meta-items'][2]/text()")[0]
            # 楼层
            lc = ctt[0].xpath(".//div[@class='house-metas clearfix']//p[@class='meta-items'][4]/text()")[0]
            # 朝面
            cm = ctt[0].xpath(".//div[@class='house-metas clearfix']//p[@class='meta-items'][5]/text()")[0]
            # 年份
            try:
                nf = ctt[0].xpath(".//div[@class='house-metas clearfix']//p[@class='meta-items'][6]/text()")[0]
            except:
                nf = ''
            dz = ctt[0].xpath(".//div[@class='text fl']//a[@class='link']/text()")
            # 特色
            try:
                ts = ctt[0].xpath(".//a[@class='school fl']//span/text()")[0]
            except:
                ts = ''
            # 特色2
            ts2 = ctt[0].xpath(".//div[@class='house-tags clearfix']//p/text()")
            for i in ts2:
                tsb += ts + ' ' + i
            for i in dz:
                s += i
            # money
            mny = ctt[0].xpath("..//div[@class='list-price']/p/span/text()")
            mny = mny[0] + mny[1]
            cms = ctt[0].xpath("..//div[@class='list-price']//p[@class='smaller']/text()")
            info_list = {"房名": name[0], "户型": hx, "面积": mj, "楼层": lc, "朝面": cm, "年份": nf, "地址": dz, "价格": mny,
                         "平米价格": cms[0]}
            ret = collection.insert(info_list)


async def spider(num):
    a = []
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/86.0.4240.193 Safari/537.36'}
    for i in range(1, num + 2):
        url = "https://nanjing.qfang.com/sale/f%s" % i
        async with aiohttp.ClientSession() as session:
            async with session.get(url=url, headers=headers) as r:
                a.append(await r.text())
            #     添加等待时间减少被封几率
            await asyncio.sleep(1)
            print(i)
    return a


async def main():
    # 创建多个Task，且添加回调函数
    # spider(这里填要获取的页数)
    task = asyncio.create_task(spider(10))
    task.add_done_callback(download_completed_callback)

    tasks = [task]
    # 等待下载
    await asyncio.wait(tasks)


if __name__ == '__main__':
    asyncio.run(main())