from .base_spider import xml_parser, BaseSpider
import re
from bs4 import BeautifulSoup
from playwright.sync_api import sync_playwright

# from http.client import HTTPConnection
# import logging
# HTTPConnection.debuglevel = 1
# logging.basicConfig()  # 初始化 logging，否则不会看到任何 requests 的输出。
# logging.getLogger().setLevel(logging.DEBUG)
# requests_log = logging.getLogger("requests.packages.urllib3")
# requests_log.setLevel(logging.DEBUG)
# requests_log.propagate = True


class WiWjSpider(BaseSpider):
    cookies = None

    def __init__(self, city):
        BaseSpider.__init__(self)
        self.host = f"https://{self.city(city)}.5i5j.com"

    @xml_parser(lambda soup: None if int(soup.find("div", class_="total-box fl").find("span").text) != 1 else {
        "wiwj_id": (e := soup.find("i", class_='infoicon i_01').find_next_siblings("span")[1].find_next("a")["href"]) and e.split("/")[2],
        "wiwj_six_month_sell_count": (e := soup.find("i", class_='infoicon i_01').find_next_sibling("span").find_next("a").text) and int(("x"+e).split()[1]),
        "birthday": soup.find("i", class_='infoicon i_02').find_next_sibling(text=True).strip(),
        "traffic": (e := soup.find("i", class_='infoicon i_03').find_next_sibling(text=True)) and (e.split("·")[1] if "·" in e else "")
    })
    def community_basic_info(self, community_name):
        """获取小区的基本信息：建成时间，距离地铁站距离"""
        return self.get(f"xiaoqu/_{community_name}?zn={community_name}")

    def community_house_list(self, community_name: str, conditions=None):
        """获取指定小区的二手房列表"""
        condition = ""
        with sync_playwright() as p:
            browser = p.chromium.launch()
            page = browser.new_page()
            if conditions:
                condition = self.create_house_condition(conditions)
                url = self._url(f"ershoufang/{condition}/_{community_name}/")
            else:
                url = self._url(f"ershoufang/_{community_name}/")
            # 必须等待页面的网络空闲
            page.goto(url, wait_until="networkidle")
            self.cookies = {cookie.get('name'): cookie.get('value') for cookie in page.context.cookies()}
            result = page.content()

        soup = BeautifulSoup(result, "lxml")
        try:
            total = int(soup.find("div", class_="total-box fl").find("span").text)
        except:
            raise Exception("页面解析失败!")

        ret = []
        for elem in soup.find("ul", class_="pList").find_all("div", class_="listImg"):
            href: str = elem.find("a")["href"]
            if href.endswith(".html"):
                ret.append(f"{self.host}{href}")

        page_no = 1
        while page_no*30 < total:
            page_no += 1
            result = self.session.get(self._url(f"ershoufang/{condition}n{page_no}/_{community_name}/"),
                                      cookies=self.cookies, headers=self.headers)
            if result.status_code != 200:
                raise Exception("http返回码错误!")
            soup = BeautifulSoup(result.text, "lxml")
            for elem in soup.find("ul", class_="pList").find_all("div", class_="listImg"):
                href: str = elem.find("a")["href"]
                if href.endswith(".html"):
                    ret.append(f"{self.host}{href}")
        return ret

    @xml_parser(lambda soup: {
        "type_graph": (e := soup.find("a", class_="huxingimg jqzoom fancybox-thumbs")) and e["href"],  # 可能还没有拍照片
        "living_graph": (e := soup.find("a", class_="jqzoom fancybox-thumbs")) and e["href"],  # 第一张客厅照即可
        "price": int(soup.find("div", class_="de-price fl").find("span").text),  # 售价
        "floor": soup.find("label", text='所在楼层').find_next_sibling("span").text.split('/')[0],
        "total_floors": int(soup.find("label", text='所在楼层').find_next_sibling("span").text.split('/')[1][:-1]),
        "decoration": soup.find("label", text='装修情况').find_next_sibling("span").text,
        "type": soup.find("label", text='房屋户型').find_next_sibling("span").text,
        "size": soup.find("label", text='建筑面积').find_next_sibling("span").text[:-2],
        "attribute": soup.find("label", text='规划用途').find_next_sibling('span').text,
        "pre_trading_date": None,
        "age_limit": soup.find("label", text='购房年限').find_next_sibling('span').text,
        "code": (e := soup.find("span", class_="hy_code")) and e.text,
        "focus_count": None  # 关注数
    })
    def house_info(self, href: str):
        """获取指定房子的信息：价格、户型图、客厅图、楼层、装修、面积、房屋年限、房源编码、关注人数"""
        return self.session.get(href, cookies=self.cookies, headers=self.headers)

    @staticmethod
    def create_house_condition(conditions: dict[str, list[str] | str]) -> str:
        """创建 二手房的查询条件, 这里仅实现 房型和用途"""
        # 注意顺序
        order_options = {
            "用途": {
                "order": {"普通住宅": 1, "商业类": 31, "别墅": 3, "四合院": 4, "其他": 4, "车位": 7},
                "key": "q"
            },
            "房型": {
                "order": {"一室": 1, "二室": 2, "三室": 3, "四室": 4, "四室以上": "5r9"},
                "key": "r"
            }
        }
        res = ""
        for option in order_options:
            if option in conditions:
                order = order_options[option]["order"]
                key = order_options[option]["key"]
                sorted_condition = sorted(conditions[option], key=lambda x: order[x])
                res += "".join([f"{key}{order[i]}" for i in sorted_condition])

        if conditions.__contains__("面积") and conditions["面积"]:
            try:
                [small, large] = conditions["面积"].split('-')
            except:
                raise Exception("面积配置错误，请设置如：80-100")
            res += f"h{large}l{small}"

        return res
