import asyncio
import random
import traceback
import requests as requests
from pyppeteer import launch, connection
from lxml import etree
from fake_useragent import UserAgent

# IP代理配置
username = "username"
password = "password"
api_url = "api_url"

# 爬虫配置
spider_tasks = [{"url": "https://cc.58.com/shengyizr/", "city": "长春", "type": "生意转让"}]
spider_tasks2 = [{"url": "https://cc.58.com/shengyizr/", "city": "长春", "type": "生意转让"},
                 {"url": "https://hrb.58.com/shengyizr/", "city": "哈尔滨", "type": "生意转让"},
                 {"url": "https://sy.58.com/shengyizr/", "city": "沈阳", "type": "生意转让"},
                 {"url": "https://cc.58.com/shangpucz/", "city": "长春", "type": "商铺出租"},
                 {"url": "https://hrb.58.com/shangpucz", "city": "哈尔滨", "type": "商铺出租"},
                 {"url": "https://sy.58.com/shangpucz/", "city": "沈阳", "type": "商铺出租"},
                 {"url": "https://cc.58.com/shangpucs/", "city": "长春", "type": "商铺出售"},
                 {"url": "https://hrb.58.com/shangpucs/", "city": "哈尔滨", "type": "商铺出售"},
                 {"url": "https://sy.58.com/shangpucs/", "city": "沈阳", "type": "商铺出售"}]
page_number = 1
open_proxy = False
one_page_finish_wait_time = 20

browser_instances = []


class Tc58Spider(object):
    def __init__(self):
        self.data_list = list()

    # 异步处理单个任务的函数
    async def handler_one_task(self, spider_task):
        print("开始执行任务：{}".format(spider_task))
        # 初始化页码索引，当前页URL
        index = 0
        url = spider_task["url"]

        # 无限循环，直到达到最大页数
        while True:
            try:
                # 检查是否达到最大页数
                if index >= page_number:
                    break

                # 打开浏览器访问
                await self.close_all_browsers()
                browser, page = await self.openBrowser(url)

                # 提取页面列表数据，用于判断页面有无数据
                page_content = await page.content()
                html = etree.HTML(page_content)
                li_list = html.xpath('//ul[@class="infoList"]/li')
                print("第 {} 页, 抓取到{}个数据".format(index + 1, len(li_list)))

                # 如果当前页没有数据，关闭浏览器并退出循环
                if len(li_list) == 0:
                    await self.close_all_browsers()
                    await asyncio.sleep(random.uniform(2, 3))
                    break

                # 设置下一页地址
                url = html.xpath('//a[@class="next "]/@href')[0]
                index = index + 1

                # 爬取当前页并入库
                item_list = await self.crawlerItemObj(page_content, spider_task)
                print(
                    "执行结果：任务{}.执行完毕，当前第{}页，处理{}条数据".format(spider_task, index, len(li_list)))
            except Exception as a:
                # 记录异常信息
                exception_info = traceback.format_exc()
                print(exception_info)
                continue
            finally:
                # 关闭所有浏览器，并等待
                await self.close_all_browsers()
                await asyncio.sleep(random.uniform(one_page_finish_wait_time, one_page_finish_wait_time + 10))

    async def crawlerItemObj(self, page_content, spider_task):
        data = []
        html = etree.HTML(page_content)
        items = html.xpath('//ul[@class="infoList"]/li')
        for index, li_html in enumerate(items):
            await asyncio.sleep(random.uniform(0, 1))
            try:
                detail_obj = await self.parseToObj(spider_task, li_html)
                print(detail_obj)
                data.append(detail_obj)
            except Exception as a:
                print(a)
                traceback.print_stack()
                continue
        return data

    async def parseToObj(self, spider_task, li_html):
        try:
            item_dic = {}
            item_dic['title'] = li_html.xpath('.//p[@class="title"]/text()')[0]
            item_dic['place'] = li_html.xpath('.//p[@class="place"]/text()')[0]
            item_dic['tags'] = self.clean_field(', '.join(li_html.xpath('.//p[@class="tips"]/span/text()')))
            area_elements = li_html.xpath('.//p[@class="price"]/span[@class="area"]/text()')
            if area_elements:
                item_dic['area'] = area_elements[0]
            mark_elements = li_html.xpath('.//p[@class="price"]/span[@class="mark"]/text()')
            if mark_elements:
                item_dic['mark'] = mark_elements[0]
            price_nums = li_html.xpath('.//p[@class="price"]/b/text()')
            if price_nums:
                price_num = self.clean_field(price_nums[0])
                price_unit = self.clean_field(li_html.xpath('.//p[@class="price"]/text()')[1])
                item_dic['price'] = price_num + price_unit
            item_dic['cc_city'] = spider_task["city"]
            item_dic['cc_post_classification'] = spider_task["type"]
        except Exception as a:
            print(a)
            traceback.print_stack()
        return item_dic

    async def openBrowser(self, url):
        if open_proxy:
            print("开启代理")
            proxy = self._get_proxy_ip()
            browser = await launch(headless=True
                                   , userDataDir="./config"
                                   , args=['--disable-infobars', '--window-size=1366,768', '--no-sandbox',
                                           '--proxy-server=' + proxy])

        else:
            print("无需代理")
            browser = await launch(headless=True
                                   , userDataDir="./config"
                                   , args=['--disable-infobars', '--window-size=1366,768', '--no-sandbox'])
        browser_instances.append(browser)
        # 设置用户代理
        page = await browser.newPage()
        user_agent = UserAgent().random
        await page.setUserAgent(user_agent)
        await page.authenticate({"username": username, "password": password})
        # 转到主网站
        width, height = 1366, 768
        await page.setViewport({'width': width, 'height': height})
        # 禁用 headless 模式检测
        await page.evaluateOnNewDocument(
            '''() =>{ Object.defineProperties(navigator, { webdriver: { get: () => false } }) }''')
        await page.goto(url)
        await asyncio.sleep(random.uniform(2, 3))
        return browser, page

    def clean_field(self, field):
        if isinstance(field, list):
            field = field[0].strip() if field else ""
        elif isinstance(field, str):
            field = field.strip()
        return field.replace('\xa0', '').replace('\n', '').replace(' ', '').replace('[]', '').removeprefix(
            "['").removesuffix("']")

    def _get_proxy_ip(self):
        proxy_ip = requests.get(api_url).text
        proxy = "http://" + proxy_ip
        print("代理IP：{}".format(proxy))
        return proxy

    # 关闭浏览器
    async def close_all_browsers(self):
        if browser_instances:
            for browser in browser_instances:
                await browser.close()
            browser_instances.clear()
            print("All browsers closed.")


# 当前py入口执行方法
if __name__ == '__main__':
    print("开始执行任务")
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    for spider_task in spider_tasks:
        loop.run_until_complete(Tc58Spider().handler_one_task(spider_task))
    loop.close()
    print("结束执行任务")
