# coding=utf-8
import random
import time
import os
from DrissionPage import Chromium
from DrissionPage.items import MixTab
from config import cookies as ck
import json
from loguru import logger
import pandas as pd
from openpyxl import Workbook
import threading
import base64
import requests
from urllib.parse import unquote


class AiQiChaSpider:
    def __init__(self):
        self.browser = Chromium()
        self.browser.set.cookies(ck)
        self.root_path = os.path.dirname(os.path.abspath(__file__))
        self.output_path = os.path.join(self.root_path, "output")

    def read_city_file(self):
        root_path = os.path.dirname(os.path.abspath(__file__))
        city_file_path = os.path.join(root_path, "city.json")
        with open(city_file_path, "rt", encoding="utf-8") as f:
            city_data = json.load(f)

        return city_data

    def save_city_data_to_json(self):
        tab = self.browser.latest_tab
        tab.get("https://aiqicha.baidu.com/")
        tab.scroll.to_half()
        tab.scroll.down(30)

        eles = tab.eles("@class=qy-info")

        data = []
        for item in eles:
            province = item.child("@tag()=a").text
            province_url = item.child("@tag()=a").attr("href")
            citys = item.ele("@class=qy-city-item-wrap").eles("@@tag()=a@@class=qy-city-item")

            temp = {"province": province, "province_url": province_url, "citys": []}
            for city in citys:
                city_name = city.text
                city_url = city.attr("href")
                temp['citys'].append({"name": city_name, "url": city_url})

            data.append(temp)
            logger.success(temp)

        with open("city.json", "wt", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def main(self):
        """
        一次性采集完 时间会很长
        """
        citys = self.read_city_file()

        cur_tab = self.browser.latest_tab  # 循环每个城市或区域
        sub_tab = self.browser.new_tab()  # 循环每个企业的详情页

        # 外层循环 遍历每个省份
        for provinces in citys:
            province_name = provinces["province"]  # 省份名称
            workbook_path = os.path.join(self.output_path, f'{province_name}.xlsx')
            if not os.path.exists(workbook_path):
                Workbook().save(workbook_path)  # 创建工作簿

            # 内层循环 遍历每个市或区
            for city in provinces["citys"]:
                city_name = city["name"]  # 城市名称或区域名称
                city_url = city["url"]  # url地址

                # 访问每个城市或区域的url地址
                cur_tab.get(city_url)
                # 存放每个城市或区域的所有企业名称和网址
                city_data = []  # [{},{},...]
                while True:
                    # 下一页按钮
                    next_btn = cur_tab.ele("@@tag()=li@@title=下一页")
                    # 页面滚动
                    cur_tab.scroll.to_see(next_btn)

                    # 判断是否可点击 是否被遮挡
                    if next_btn.states.is_clickable and not next_btn.states.is_covered:
                        # 拼接出每个企业详情页url
                        # 再循环请求这些详情页获取网站名称和网址信息
                        h3_btn = cur_tab.ele('@class=wrap').eles('@@tag()=h3@@class=title')
                        next_urls = ["https://aiqicha.baidu.com/company_detail_" + item.ele('@tag()=a').attr('data-log-title').split("-")[1] for item in h3_btn]

                        for url in next_urls:
                            try:
                                # 这里会跳到该页面进行循环
                                sub_tab.get(url, timeout=15, retry=2, interval=3)

                                span_label = sub_tab.ele('@@tag()=span@@class=label@@text():网址')  # 模糊匹配关键定位
                                next_node = span_label.next(1)  # 后面同级节点
                                if next_node.tag == "span":
                                    logger.info('暂无网址')
                                    time.sleep(random.randint(2, 4))
                                    continue

                                site_name = sub_tab.ele('@@tag()=h1@@class=name').text.strip()  # 网站名称
                                site_url = sub_tab.ele('@@tag()=a@@class=website').text  # 网站url
                                city_data.append({"title": site_name, "url": site_url})

                                logger.success(f'{site_name} - {site_url}')  # 采集成功 打印输出
                                time.sleep(random.randint(4, 6))  # 避免百度旋转验证码

                            except Exception as e:
                                logger.error(f'{url} - {e}')
                                continue

                        time.sleep(random.randint(4, 8))
                        # 点击下一页 进行翻页
                        next_btn.click(timeout=2)

                    else:
                        # 不可点击或被遮挡 跳出循环
                        break

                try:
                    # 保存数据
                    with pd.ExcelWriter(workbook_path, mode='a') as writer:
                        pd.DataFrame(city_data).to_excel(writer, sheet_name=city_name, index=False)
                        logger.success(f"{province_name}.xlsx 保存成功")

                except Exception as e:
                    # 保存出错 退出检查代码bug
                    logger.error(e)
                    return

    def beta(self, prov: dict):
        """
        测试版本：本此时方法 需要手动传入省份数据 同时每个分区数据需要手动更换 避免重复采集
        """
        cur_tab = self.browser.latest_tab  # 循环每个城市或区域
        sub_tab = self.browser.new_tab()  # 循环每个企业的详情页

        prov_name = prov["province"]  # 省份名称
        workbook_path = os.path.join(self.output_path, f'{prov_name}.xlsx')
        if not os.path.exists(workbook_path):
            Workbook().save(workbook_path)  # 创建工作簿

        # 需要手动更换采集区域
        cur_city = prov["citys"][4]  # 当前采集：河北区
        city_name = cur_city["name"]
        city_url = cur_city["url"]
        logger.info(f"当前采集区域:{prov_name} - {city_name}")

        cur_tab.get(city_url, timeout=15, retry=2, interval=3)
        city_data = []  # 存放企业名称和网址; 格式: [{},{},...]
        while True:
            # 下一页按钮
            next_btn = cur_tab.ele("@@tag()=li@@title=下一页")
            # 页面滚动
            cur_tab.scroll.to_see(next_btn)

            # 判断是否可点击 是否被遮挡
            if next_btn.states.is_clickable and not next_btn.states.is_covered:
                # 拼接出每个企业详情页url
                # 再循环请求这些详情页获取网站名称和网址信息
                h3_btn = cur_tab.ele('@class=wrap').eles('@@tag()=h3@@class=title')
                next_urls = ["https://aiqicha.baidu.com/company_detail_" + item.ele('@tag()=a').attr('data-log-title').split("-")[1] for item in h3_btn]

                for url in next_urls:
                    try:
                        # 这里是单线程 循环请求详情页 外层while循环不再执行
                        # 可能会弹出验证码 将等待时间延长 方便手动过验证码
                        sub_tab.get(url, timeout=15, retry=2, interval=3)

                        span_label = sub_tab.ele('@@tag()=span@@class=label@@text():网址')  # 模糊匹配关键定位
                        next_node = span_label.next(1)  # 后面同级节点
                        if next_node.tag == "span":
                            logger.info('暂无网址')
                            time.sleep(random.randint(2, 4))
                            continue

                        qy_name = sub_tab.ele('@@tag()=h1@@class=name').text.strip()  # 企业名称
                        qy_url = sub_tab.ele('@@tag()=a@@class=website').text  # 企业url
                        city_data.append({"name": qy_name, "url": qy_url})

                        logger.success(f'{qy_name} - {qy_url}')  # 采集成功 打印输出
                        time.sleep(random.randint(4, 6))  # 避免出现百度旋转验证码

                    except Exception as e:
                        logger.error(f'{url} - {e}')
                        continue

                time.sleep(random.randint(4, 6))
                # 点击下一页 进行翻页
                next_btn.click(timeout=3)

            else:
                # 不可点击或被遮挡 跳出循环
                break

        try:
            # 保存数据
            with pd.ExcelWriter(workbook_path, mode='a') as writer:
                pd.DataFrame(city_data).to_excel(writer, sheet_name=city_name, index=False)
                logger.success(f"{prov_name}.xlsx 保存成功")
                logger.info(f"采集完成:{prov_name} - {city_name}")

        except Exception as e:
            # 保存出错 退出检查代码bug
            logger.error(e)
            return

    def verify(self, img_url: str):
        """
        通过 云码平台大码 识别旋转角度
        :param img_url: 旋转验证码图片url
        :return:
        """
        try:
            json_data = {
                "token": "nukHZ6acJSEEkEG6RLP3p1ySuchLQTcH-Y-0bDNlJA8",
                "type": "900011",
                "image": "",
            }

            # 请求图片数据
            _headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36', "Referer": img_url}
            resp = requests.get(url=img_url, headers=_headers, cookies=ck, timeout=10)
            resp.raise_for_status()

            # 请求打码平台接口
            json_data.update({"image": base64.b64encode(resp.content).decode('utf-8')})
            response = requests.post(url="http://api.jfbym.com/api/YmServer/customApi", headers={"Content-Type": "application/json"}, json=json_data)
            response.raise_for_status()

            response_data = response.json()  # 打码平台接口返回 JSON数据
            if response_data.get("msg") == "识别成功":
                data = int(response_data.get("data").get("data"))
                if data < 0:
                    distance = round(((360 - data) // 360) * 238)  # 四舍五入到整数
                else:
                    distance = round((data / 360) * 238)
                return distance

            else:
                logger.info(response_data)
                return False

        except Exception as e:
            logger.error(e)
            return False

    def verify_spin_captcha(self, tab: MixTab):
        """
        验证码识别
        :return:
        """
        try:
            spin_wrapper = tab.ele('@id=spin-0')  # 定位到旋转验证码浮层
            img_url = spin_wrapper.ele('@class=passMod_spin-background').attr('src')  # 背景图片
            btn_slider = spin_wrapper.ele("@class=passMod_slide-btn  passMod_slide-btn-loading")  # 滑块
            # 通过打码平台识别图片 获取移动距离
            distance = self.verify(img_url)

            if not distance:
                logger.error("图片请求或识别可能存在问题")
                return

            # 按住鼠标移动滑块
            tab.actions.move_to(btn_slider).hold()
            tab.actions.right(distance)
            tab.actions.release()

            time.sleep(random.randint(2, 4))
            return tab.url

        except Exception as e:
            logger.error(e)
            return False

    def multi_thread(self, prov: dict):
        """
        多线程采集版本
        """
        cur_tab = self.browser.latest_tab

        prov_name = prov["province"]  # 省份名称
        workbook_path = os.path.join(self.output_path, f'{prov_name}.xlsx')
        if not os.path.exists(workbook_path):
            Workbook().save(workbook_path)  # 创建工作簿

        # 需要手动更换采集区域
        cur_city = prov["citys"][15]  # 当前采集：蓟州区
        city_name = cur_city["name"]
        city_url = cur_city["url"]
        logger.info(f"当前采集区域:{prov_name} - {city_name}")

        self.company_data = []  # 存放企业名称和网址; 格式: [{},{},...]
        company_detail_urls = []  # 存放所有企业详情页url

        cur_tab.get(city_url, timeout=15, retry=2, interval=3)

        while True:
            try:
                next_btn = cur_tab.ele("@@tag()=li@@title=下一页")  # 下一页按钮
                cur_tab.scroll.to_see(next_btn)  # 向下移动滚动条

                # 判断是否可点击 是否被遮挡
                if next_btn.states.is_clickable and not next_btn.states.is_covered:
                    # 拼接出每个企业详情页url
                    # 再循环请求这些详情页获取网站名称和网址信息
                    h3_labels = cur_tab.ele('@class=wrap').eles('@@tag()=h3@@class=title')

                    for label in h3_labels:
                        detail_url = "https://aiqicha.baidu.com/company_detail_" + label.ele('@tag()=a').attr('data-log-title').split("-")[1]
                        company_detail_urls.append(detail_url)

                    time.sleep(random.randint(4, 6))
                    # 点击下一页 进行翻页
                    next_btn.click(timeout=3)

                else:
                    # 不可点击或被遮挡 跳出循环
                    break

            except Exception as e:
                # 进入异常 可能页面跳转到验证码页面
                # 如果发生跳转 设置死循环 防止一次过不了验证码
                while True:
                    this_url = unquote(cur_tab.url)  # url解码

                    if this_url == city_url:
                        break

                    elif this_url != city_url and "https://wappass.baidu.com/static/captcha/" in this_url:
                        logger.error("页面弹出了旋转验证码")
                        # 当前页面弹出了旋转验证码
                        ret_url = self.verify_spin_captcha(cur_tab)
                        if city_url in ret_url or ret_url == cur_city:
                            break

                    elif 'https://aiqicha.baidu.com/fs/forbidden' in this_url:
                        logger.error('访问频率过高')
                        time.sleep(random.randint(2, 3))
                        cur_tab.get(city_url, timeout=15, retry=2, interval=3)
                        break

        new_tab_1 = self.browser.new_tab()
        new_tab_2 = self.browser.new_tab()
        new_tab_3 = self.browser.new_tab()

        for i in range(0, len(company_detail_urls), 3):
            t1 = threading.Thread(target=self.open_web, args=(company_detail_urls[i], new_tab_1))
            t2 = threading.Thread(target=self.open_web, args=(company_detail_urls[i + 1], new_tab_2))
            t3 = threading.Thread(target=self.open_web, args=(company_detail_urls[i + 2], new_tab_3))
            t1.start()
            t2.start()
            t3.start()
            # 必须让主线程等待 避免一次性载入过多 导致卡顿
            t1.join()
            t2.join()
            t3.join()

        try:
            # 保存数据
            with pd.ExcelWriter(workbook_path, mode='a') as writer:
                pd.DataFrame(self.company_data).to_excel(writer, sheet_name=city_name, index=False)
                logger.success(f"{prov_name}.xlsx 保存成功")
                logger.info(f"采集完成:{prov_name} - {city_name}")

        except Exception as e:
            # 保存出错 退出检查代码bug
            logger.error(e)
            return

        # 关闭所有资源
        self.browser.clear_cache()
        self.browser.close_tabs([new_tab_1, new_tab_2, new_tab_3, cur_tab])
        self.browser.quit()

    def open_web(self, url: str, tab: MixTab):
        """
        打开网页
        """
        # 可能会弹出验证码
        tab.get(url, timeout=15, retry=2, interval=3)

        while True:
            try:
                span_label = tab.ele('@@tag()=span@@class=label@@text():网址')  # 模糊匹配关键定位
                next_node = span_label.next(1)  # 后面同级节点
                if next_node.tag == "span":
                    logger.info('暂无网址')
                    time.sleep(random.randint(4, 6))
                    return

                qy_name = tab.ele('@@tag()=h1@@class=name').text.strip()  # 企业名称
                qy_url = tab.ele('@@tag()=a@@class=website').text  # 企业url
                self.company_data.append({"name": qy_name, "url": qy_url})

                logger.success(f'{qy_name} - {qy_url}')  # 采集成功 打印输出
                time.sleep(random.randint(4, 7))
                return

            except Exception as e:
                # 进入异常 可能是遇到 验证码
                # 还是使用死循环 防止一次过不了验证码
                while True:
                    # 当前页面url 如果不一致 可能出现验证码
                    this_url = tab.url

                    if this_url == url:
                        break

                    elif this_url != url and "https://wappass.baidu.com/static/captcha/" in this_url:
                        logger.error('页面弹出了旋转验证码')
                        # 当前页面弹出了旋转验证码
                        ret_url = self.verify_spin_captcha(tab)
                        if url in ret_url or ret_url == url:
                            break

                    elif 'https://aiqicha.baidu.com/fs/forbidden' in this_url:
                        logger.error('访问频率过高')
                        time.sleep(random.randint(2, 3))
                        tab.get(url, timeout=15, retry=2, interval=3)
                        break


if __name__ == '__main__':
    root_path = os.path.dirname(os.path.abspath(__file__))
    city_file_path = os.path.join(root_path, "city.json")
    with open(city_file_path, "rt", encoding="utf-8") as f:
        data = json.load(f)

    cur_prov = data[0]  # 当前采集：天津
    a = AiQiChaSpider()
    a.multi_thread(cur_prov)
