# /code/utils.py
import json
import os
import re
import urllib.parse
from lxml import etree
from code.logger_config import logger


def extract_json_from_html(html_content):
    try:
        # 尝试直接解析为JSON
        try:
            data = json.loads(html_content)
            return process_json_data(data)
        except json.JSONDecodeError:
            pass

        # 使用lxml解析HTML
        parser = etree.HTMLParser()
        try:
            tree = etree.fromstring(html_content, parser)
        except etree.XMLSyntaxError:
            logger.error("Invalid HTML content")
            return []

        # 获取body中的文本内容
        body_texts = tree.xpath('//body/text()')
        if not body_texts:
            try:
                data = json.loads(html_content)
                return process_json_data(data)
            except json.JSONDecodeError:
                logger.error("No valid JSON found in content")
                return []

        body_text = body_texts[0].strip()

        # 使用正则表达式匹配JSON
        json_match = re.search(r'$$.*$$', body_text, re.DOTALL)
        if not json_match:
            json_match = re.search(r'\{.*\}', body_text, re.DOTALL)
            if not json_match:
                logger.error("No valid JSON found in body text")
                return []

        json_str = json_match.group(0)
        try:
            data = json.loads(json_str)
            return process_json_data(data)
        except json.JSONDecodeError as e:
            logger.error(f"JSON decode error: {str(e)}")
            return []

    except Exception as e:
        logger.error(f"Error processing content: {str(e)}")
        return []


def process_json_data(data):
    """处理JSON数据，提取所需字段"""
    if isinstance(data, dict):
        if 'children' in data and isinstance(data['children'], list):
            data = data['children']
        elif 'data' in data and isinstance(data['data'], list):
            data = data['data']
        else:
            data = [data]

    if not isinstance(data, list):
        logger.error(f"Expected list or dict, got {type(data)}")
        return []

    result = []
    for item in data:
        if not isinstance(item, dict):
            logger.warning(f"Skipping non-dict item: {item}")
            continue

        new_item = {}

        if 'title' in item:
            new_item['title'] = item['title']

        if 'user_id' in item:
            new_item['user_id'] = item['user_id']

        if 'online' in item:
            new_item['online'] = item['online']

        if 'url' in item:
            try:
                parsed_url = urllib.parse.urlparse(item['url'])
                query_params = urllib.parse.parse_qs(parsed_url.query)
                if 'DEPT_ID' in query_params:
                    new_item['dept_id'] = query_params['DEPT_ID'][0]
            except Exception as e:
                logger.warning(f"Failed to parse URL for dept_id: {str(e)}")

        result.append(new_item)

    return result


def extract_gender_position(html):
    # 初始化结果字典
    result = {'gender': '未知', 'position': '未知'}

    # 匹配角色信息
    position_pattern = r'<td\s+class="multi-row">角色</td>\s*<td>\s*([^<]+?)\s*</td>'
    position_match = re.search(position_pattern, html)
    if position_match:
        result['position'] = position_match.group(1).strip()

    # 匹配性别信息
    gender_pattern = r'<td\s+class="multi-row">性别</td>\s*<td>\s*([^<]+?)\s*</td>'
    gender_match = re.search(gender_pattern, html)
    if gender_match:
        result['gender'] = gender_match.group(1).strip()

    return result


async def save_cookie(cookie):
    jsonCookies = json.dumps(cookie, ensure_ascii=False)
    txtFileName = "cookie.txt"
    with open(txtFileName, 'w+', encoding="utf-8") as file:
        file.write(jsonCookies)
        file.close()


async def load_cookie():
    txtFileName = "cookie.txt"
    with open(txtFileName, 'r', encoding="utf-8") as file:
        cookie = json.load(file)
        file.close()
    return cookie
