import os
from ast import literal_eval
from queue import Queue

from DrissionPage._base.chromium import Chromium
from DrissionPage._configs.chromium_options import ChromiumOptions
from DrissionPage._functions.settings import Settings
from DrissionPage.errors import WaitTimeoutError
from fake_useragent import UserAgent
from sqlmodel import select

from config.db import get_session
from constant.crawler_constant import CrawlerId
from crawler_handler.abstract_crawler_handler import AbstractCrawlerHandler
from decorators.exception_decorator import crawler_exception_handler
from entity.model.community_model import CommunityDataCollection
from entity.model.hefei.crawler_record_model import HeFeiDataCollectionBuilding, HeFeiDataCollectionCommunity
# from entity.model.hefei.crawler_record_model import HeFeiDataCollectionBuilding
from entity.schema.crawler_schema import CrawlerHeFeiArgs
from util.crawler_util import get_query_params, get_path_params, recode
from util.data_change_util import change_to_int, is_today, change_to_float
from util.mylog import my_logger
from concurrent.futures import ThreadPoolExecutor, as_completed, wait

class HeFeiHandler(AbstractCrawlerHandler):
    building_executor = ThreadPoolExecutor(max_workers=(os.cpu_count() or 1) * 4, thread_name_prefix='hf-building')
    room_executor = ThreadPoolExecutor(max_workers=(os.cpu_count() or 1) * 4, thread_name_prefix='hf-room')
    data_executor = ThreadPoolExecutor(max_workers=(os.cpu_count() or 1) * 4, thread_name_prefix='hf-data_detail')
    community_executor = ThreadPoolExecutor(max_workers=(os.cpu_count() or 1) * 4, thread_name_prefix='hf-community')

    community_url = 'https://www.hfzfzlw.com/spf/index?servicetype={}&re={}'
    # community_url = 'https://www.hfzfzlw.com/spf/Permit?p={}&item=&use={}&permitno='
    community_info_url = 'https://www.hfzfzlw.com/spf/item/{}'
    building_detail_url = 'https://www.hfzfzlw.com/spf/details/{}'
    room_rsa_url = 'https://www.hfzfzlw.com/spf/details/getrsa/{}'
    room_detail_info_url = 'https://www.hfzfzlw.com/spf/details/house/{}'
    room_detail_data_url = 'https://www.hfzfzlw.com/spf/details/house/*'

    county_list = [
        "蜀山区",
        "高新区",
        "庐阳区",
        "瑶海区",
        "滨湖区",
        "新站区",
        "包河区",
        "政务区",
        "经济区",
    ]

    def __init__(self, browser: Chromium, crawler_args: CrawlerHeFeiArgs):
        super().__init__(browser)
        self.crawler_args = crawler_args
        self.community_queue_task = Queue()
        self.building_queue_task = Queue()
        self.room_detail_queue_task = Queue()

    @staticmethod
    def get_last_page_num(last_str: str):
        query_params = get_query_params(last_str)
        params = query_params.get("p", None)
        if not params:
            raise Exception("获取页码失败")
        return params[0]

    @staticmethod
    def get_link_id(link: str):
        path_params = get_path_params(link)
        link_id = path_params[-1]
        return link_id

    @crawler_exception_handler
    def collect_community_project(self, page_num: int):
        with self.fetch_tab(HeFeiHandler.community_url.format(page_num, self.crawler_args.house_type)) as tab:
            data_table = tab.ele('.tab_con01')
            tr_list = data_table.eles('@@tag()=tr@!class=table_bg')
            if not tr_list:
                my_logger.error(f"获取tr数据失败: {page_num}")
                return False, []
            building_model_list = []
            for tr in tr_list:
                td_list = tr.children()
                if not td_list:
                    my_logger.warning(f"获取td数据失败: {tr_list}")
                    continue
                building_model = HeFeiDataCollectionBuilding()
                building_model.province = '安徽省'
                building_model.province_id = 34
                building_model.area = '合肥市'
                building_model.area_id = 3401
                building_model.data_source = 1
                building_model.certificate_no = td_list[0].child().attr('id')

                building_a = td_list[2].child()
                building_no = building_a.attr('title')
                # my_logger.debug(f"楼栋号: {building_no}")
                building_model.building_number = building_no
                building_id = building_a.attr('id')
                building_model.building_id = building_id
                building_model.building_href = HeFeiHandler.get_link_id(building_a.link)
                # 判断数据库中是否有该条数据
                # with get_session() as session:
                #     stat = select(HeFeiDataCollectionBuilding.building_id).where(
                #         HeFeiDataCollectionBuilding.building_id == building_id)
                #     res = session.exec(stat).first()
                #     if res:
                #         my_logger.debug(f"数据已存在: {building_id}")
                #         continue
                project_a = td_list[1].child()
                project_a.run_js('reurl(this)')
                project_id = project_a.attr('id')
                building_model.community_id = project_id
                # my_logger.debug(f"项目id: {project_id}")
                # my_logger.debug(f"项目链接: { a.link}")
                # tab.change_mode()
                # tab.get(a.link)
                with self.fetch_page(project_a.link) as page:
                    project_info = page.s_ele('.lbox')
                    if not project_info:
                        my_logger.error(f"获取项目信息失败: {project_a.link}")
                        continue
                    building_model.community_href = HeFeiHandler.get_link_id(project_a.link)
                    info_list = project_info.children()
                    for idx, info in enumerate(info_list):
                        texts = info.texts(text_node_only=True)
                        if idx == 0:
                            # my_logger.debug(f"开发商: {info.texts(text_node_only=True)[0]}")
                            building_model.developer = texts[0] if texts else None
                        elif idx == 8:
                            # my_logger.debug(f"所属区域: {info.texts(text_node_only=True)[0]}")
                            building_model.county = texts[0] if texts else None
                        elif idx == 9:
                            # my_logger.debug(f"地址: {info.texts(text_node_only=True)[0]}")
                            building_model.address = texts[0] if texts else None
                building_model_list.append(building_model)
            # HeFeiHandler.bulk_save_or_update(HeFeiDataCollectionBuilding, building_model_list, [CrawlerId.BUILDING_ID])
        return True, building_model_list
            # for idx, td in enumerate(td_list):
            #     if idx == 0:
            #         # my_logger.debug(f"许可证: {td.child().attr('id')}")
            #         building_model.certificate_no = td.child().attr('id')
            #     elif idx == 1:
            #         a = td.child()
            #         a.run_js('reurl(this)')
            #         project_id = a.attr('id')
            #         building_model.community_id = project_id
            #         # my_logger.debug(f"项目id: {project_id}")
            #         # my_logger.debug(f"项目链接: { a.link}")
            #         # tab.change_mode()
            #         # tab.get(a.link)
            #         with self.fetch_page(a.link) as page:
            #             project_info = page.s_ele('.lbox')
            #             if not project_info:
            #                 my_logger.error(f"获取项目信息失败: {a.link}")
            #                 continue
            #             building_model.community_href = HeFeiHandler.get_link_id(a.link)
            #             info_list = project_info.children()
            #             for idx, info in enumerate(info_list):
            #                 if idx == 0:
            #                     # my_logger.debug(f"开发商: {info.texts(text_node_only=True)[0]}")
            #                     building_model.developer = info.texts(text_node_only=True)[0]
            #                 elif idx == 8:
            #                     # my_logger.debug(f"所属区域: {info.texts(text_node_only=True)[0]}")
            #                     building_model.county = info.texts(text_node_only=True)[0]
            #                 elif idx == 9:
            #                     # my_logger.debug(f"地址: {info.texts(text_node_only=True)[0]}")
            #                     building_model.address = info.texts(text_node_only=True)[0]
            #     elif idx == 2:
            #         a = td.child()
            #         building_no = a.attr('title')
            #         # my_logger.debug(f"楼栋号: {building_no}")
            #         building_model.building_number = building_no
            #         building_id = a.attr('id')
            #         building_model.building_id = building_id
            #         building_model.building_href = HeFeiHandler.get_link_id(a.link)
            #         # 判断数据库中是否有该条数据
            #         with get_session() as session:
            #             stat = select(HeFeiDataCollectionBuilding.building_id).where(HeFeiDataCollectionBuilding.building_id == building_id)
            #             res = session.exec(stat).first()
            #             if res:
            #                 my_logger.debug(f"数据已存在: {building_id}")
            #                 is_exist_flag = True
            #                 continue
                    # building_id = HeFeiHandler.get_community_link_id(a.link)
                    # my_logger.debug(f"楼栋ID: {building_id}")
                # elif idx == 3:
                #     my_logger.debug(td.text)
                # elif idx == 4:
                #     my_logger.debug(td.text)
                # elif idx == 5:
                #     my_logger.debug(td.text)


    def crawler_project(self):
        with self.fetch_page(HeFeiHandler.community_url.format(1, self.crawler_args.house_type)) as page:
            pagination = page.s_ele('.green-black')
            if not pagination:
                my_logger.warning("项目没有页数信息..")
                return
            all_count = change_to_int(pagination.ele('@tag()=em').text)
            page_count = change_to_int(HeFeiHandler.get_last_page_num(pagination.child(index=-3).link))
            my_logger.debug(f"项目总数：{all_count}, 总页数: {page_count}")
        futures = [HeFeiHandler.community_executor.submit(self.collect_community_project, page_num) for page_num in range(1, page_count + 1)]
        success_task, fail_task = HeFeiHandler.process_futures(futures)
        my_logger.debug(len(success_task))
        if fail_task:
            my_logger.error(f"失败任务: {fail_task}")

    def crawler_community(self):
        def crawler(idx, county: str):
            link = HeFeiHandler.community_url.format(self.crawler_args.house_type, county)
            with self.fetch_page(link) as page:
                community_a_1_list = page.ele('.beian_se2_1_d1').ele('.pleft_1_1').eles('t:a')
                community_a_2_list = page.ele(f'#beian_{idx}').ele('.pleft_1_1').eles('t:a')
                if not community_a_1_list and not community_a_2_list:
                    my_logger.warning(f"没有获取到小区信息: {link}")
                    return
                iptstamp = page.ele('#iptstamp').attr('value')
                if not iptstamp:
                    my_logger.warning(f"没有获取到iptstamp: {link}")
                    return
                for community_a in community_a_1_list + community_a_2_list:
                    community_id = community_a.attr("id")
                    community_name = community_a.attr('title')
                    reurl = recode(community_id, iptstamp)
                    community_info_url = HeFeiHandler.community_info_url.format(reurl)
                    # my_logger.debug(community_info_url)
                    self.community_queue_task.put((community_id, community_name, community_info_url))

        def crawler_community_info(community_id: str, community_name: str, community_info_url: str):
            with self.fetch_page(community_info_url) as page:
                project_info = page.s_ele('.lbox')
                if not project_info:
                    my_logger.error(f"获取项目信息失败: {community_info_url}")
                    return False, community_id
                community_model = HeFeiDataCollectionCommunity()
                community_model.province = '安徽省'
                community_model.province_id = 34
                community_model.area = '合肥市'
                community_model.area_id = 3401
                community_model.data_source = 1
                community_model.collect_community_id = community_id
                community_model.community_name = community_name
                info_list = project_info.children()
                for idx, info in enumerate(info_list):
                    texts = info.texts(text_node_only=True)
                    if idx == 0:
                        community_model.developer = texts[0] if texts else None
                    elif idx == 8:
                        community_model.county = texts[0] if texts else None
                    elif idx == 9:
                        community_model.address = texts[0] if texts else None
                return True, [community_model]
        product_futures = [HeFeiHandler.community_executor.submit(crawler, idx, county) for idx, county in enumerate(HeFeiHandler.county_list, start=1)]
        HeFeiHandler.process_produce_futures(product_futures)
        success_tasks, _ = HeFeiHandler.process_tasks(self.community_queue_task, HeFeiHandler.community_executor, crawler_community_info)
        HeFeiHandler.bulk_save_or_update(HeFeiDataCollectionCommunity, success_tasks, [CrawlerId.COMMUNITY_ID])

    @crawler_exception_handler
    def collect_room_info(self, building_record: HeFeiDataCollectionBuilding):
        with self.fetch_tab(HeFeiHandler.building_detail_url.format(building_record.building_href)) as tab:
            table = tab.s_ele('@tag()=table')
            tr_list = table.eles('@tag()=tr')
            room_model_list = []
            for idx, tr in enumerate(tr_list):
                if idx == 0:
                    continue
                td_list = tr.children()
                if not td_list:
                    continue
                for idx, td in enumerate(td_list):
                    if idx == 0:
                        floor = HeFeiHandler.convert_floor(td.text)
                        # my_logger.debug(f"楼层: {floor}")
                        if floor is None:
                            break
                        continue
                    # my_logger.debug(f"房间号: {HeFeiHandler.convert_room(td.text)}")
                    tab.listen.start(targets=HeFeiHandler.room_detail_data_url, is_regex=True, method='GET')
                    onclick = td.attr('onclick')
                    if onclick == "s('0',1)":
                        # my_logger.debug("不是房间")
                        continue
                    model = CommunityDataCollection(**building_record.model_dump(exclude={"id"}))
                    i = 0
                    while True:
                        try:
                            tab.run_js(onclick)
                            break
                        except TimeoutError as e:
                            i += 1
                            my_logger.warning(f"{e} 等待 10s 重试...{i}")
                            tab.reconnect()
                            if i > 3:
                                break
                    room_id = literal_eval(onclick.replace('s', ''))[0]
                    # my_logger.debug(f"房间ID: {room_id}")
                    model.room_id = room_id
                    res = tab.listen.wait()
                    res_data = res.response.body
                    if res_data['state']:
                        # my_logger.debug(res_data['data'])
                        data = res_data['data']
                        model.unit_number = '1'
                        model.max_room_no = 1
                        model.room_number = data['lbPartNO']
                        model.building_area = change_to_float(data['lbBuildArea'].replace('㎡', ''))
                        model.inner_area = change_to_float(data['lbInsideArea'].replace('㎡', ''))
                        model.public_area = change_to_float(data['lbJoinArea'].replace('㎡', ''))
                        model.purpose = data['lbHouseUsefulness']
                        model.sale_status = data['lbSellFlag']
                        model.floor = floor
                        if model.purpose == '住宅':
                            room_model_list.append(model)
                        """
                        {'lbBuildArea': '144.77㎡', 'lbHouseType': '三室户', 'lbInsideArea': '125.02㎡', 
                        'iPrice': '27079元/㎡', 'strTitle': '101号', 
                        'lbLocation': '合肥市包河区黟县路与宿松路交口东北角澄庐苑3幢101室', 
                        'lbSellFlag': '可售', 'lbJoinArea': '19.75㎡', 'lbHouseUsefulness': '住宅', 
                        'sellflag': 0, 'lbStructure': '钢筋混凝土结构', 'lbPartNO': '101'}
                        """
                self.bulk_save_or_update(CommunityDataCollection, room_model_list, unique_keys=[CrawlerId.ROOM_ID])
        return True, [building_record]
    def crawler_room(self):
        with get_session() as session:
            stat = select(HeFeiDataCollectionBuilding)
            building_record_generator = session.exec(stat).yield_per(1000)
            futures = [HeFeiHandler.room_executor.submit(self.collect_room_info, building_record) for building_record in building_record_generator]
        self.process_futures(futures)
    @staticmethod
    def convert_floor(floor_str: str):
        if floor_str == '-':
            return None
        floor = floor_str.replace('层：', '')
        return change_to_int(floor)

    @staticmethod
    def convert_room(room_str: str):
        if room_str == '-':
            return None
        room = room_str.replace('房间号: ', '')
        return room

    def test(self):
        tab = self.browser.new_tab()
        # tab.get(HeFeiHandler.community_url.format(self.crawler_args.house_type, self.crawler_args.area))
        # data_list = tab.ele('.beian_se2').children()
        # data_list[0].eles('@tag()=a')[0].run_js('reurl(this)')
        # # print(data_list[0].eles('@tag()=a')[0].link)
        # tab.get(data_list[0].eles('@tag()=a')[0].link)
        tab.get(HeFeiHandler.community_url.format(1, self.crawler_args.house_type))
        pagination = tab.ele('.green-black')
        if not pagination:
            return
        all_count = change_to_int(pagination.ele('@tag()=em').text)
        page_count = HeFeiHandler.get_last_page_num(pagination.child(index=-3).link)
        my_logger.info(f"项目总数：{all_count}, 总页数: {page_count}")

if __name__ == '__main__':
    co = ChromiumOptions().headless()
    # co = ChromiumOptions()
    # co.auto_port()
    co.set_local_port(9600)
    # 启用多例：
    # Settings.set_singleton_tab_obj(False)
    co.set_retry(2, 30)
    co.set_load_mode('eager')
    co.set_user_agent(UserAgent().random)
    co.incognito()
    co.ignore_certificate_errors()
    b = Chromium(co)
    b.set.auto_handle_alert()
    # HeFeiHandler(b, CrawlerHeFeiArgs(area='蜀山区')).test()
    # HeFeiHandler(b, CrawlerHeFeiArgs()).crawler_project()
    #  [896, 1093]
    # HeFeiHandler(b, CrawlerHeFeiArgs()).collect_community_project(1093)
    # HeFeiHandler(b, CrawlerHeFeiArgs(area='蜀山区')).crawler_room()
    HeFeiHandler(b, CrawlerHeFeiArgs(area='蜀山区')).crawler_community()
    # b.quit()