import re
from datetime import datetime
from typing import List
from urllib.parse import urlparse, urljoin
from os.path import splitext
from parsel import Selector as Locator
from .core.models import SimpleNewsItem, NewsItem
from bs4 import BeautifulSoup


def to_playwright_selector(selector_by: str, selector_value: str) -> str:
    """
    公共静态方法：将 Selenium 风格选择器转换为 Playwright 选择器格式
    """
    if not selector_by or not selector_value:
        return ""

    selector_map = {
        "id": f"#{selector_value}",
        "xpath": selector_value,
        "link text": f'text="{selector_value}"',
        "partial link text": f"text={selector_value}",
        "name": f'[name="{selector_value}"]',
        "tag name": selector_value,
        "class name": f".{selector_value}",
        "css selector": selector_value,
    }

    if selector_by not in selector_map:
        raise ValueError(f"Unsupported selector type: {selector_by}")

    return selector_map[selector_by]


def process_datetime_with_regex_and_format(txt, pat, dat_format):
    try:
        re_output = list(re.search(pat, txt).groups())
        if not re_output:
            return ""
        if re_output[3] is None:
            re_output[3] = "00:00:00"
        if len(re_output[3]) == 5:
            re_output[3] += ":00"
        date_str = f"{re_output[0]}-{re_output[1]}-{re_output[2]} {re_output[3]}"
        datetime.strptime(date_str, dat_format)

        return datetime.strftime(
            datetime.strptime(date_str, dat_format), "%Y-%m-%d %H:%M:%S"
        )
    except Exception as e:
        return ""


def process_datetime_format(date_str):
    """
    处理日期时间数据，将其转换为标准的YYYY-MM-DD HH:mm:ss格式

    Args:
        date_str (str): 日期字符串

    Returns:
        str: 标准格式的日期字符串，如果输入为空则返回空字符串
    """
    # 如果为空值，直接返回
    # date_str = find_datetime_str(date_str)
    if not date_str:
        return ""
    if not date_str or date_str.strip() == "":
        return ""

    date_str = date_str.strip()
    regex_datetime_formats = {
        r"(\d{4})年(\d{1,2})月(\d{1,2})日\s*(\d{2}:\d{2}(?::\d{2})?)?": "%Y-%m-%d %H:%M:%S",
        r"(\d{4})-(\d{1,2})-(\d{1,2})\s*(\d{2}:\d{2}(?::\d{2})?)?": "%Y-%m-%d %H:%M:%S",
        r"(\d{4})/(\d{1,2})/(\d{1,2})\s*(\d{2}:\d{2}(?::\d{2})?)?": "%Y-%m-%d %H:%M:%S",
        r"(\d{4}).(\d{1,2}).(\d{1,2})\s*(\d{2}:\d{2}(?::\d{2})?)?": "%Y-%m-%d %H:%M:%S",
        r"-(\d{4})-\s+(\d{2})/(\d{2})\s+(\d{2}:\d{2})": "%Y-%m-%d %H:%M:%S",
    }
    for pat, dat_format in regex_datetime_formats.items():
        ret = process_datetime_with_regex_and_format(date_str, pat, dat_format)
        if ret:
            return ret
    # 如果尝试转换失败，返回原值
    return ""


def strip_plus(text: str):
    return text.strip().rstrip().strip(">").rstrip(">").rstrip(":").strip(":")


def langchain_text_spilt(input_text, chunk_size=450, chunk_overlap=10) -> List[str]:
    from langchain.text_splitter import RecursiveCharacterTextSplitter

    separators = [
        "\n\n",
        "\n",
        "。”",
        "！”",
        "？”",
        "。“",
        "！“",
        "？“",
        "。",
        "！",
        "？",
        "!",
        " ",
    ]
    text_splitter = RecursiveCharacterTextSplitter(
        separators=separators,
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
    )
    texts = text_splitter.split_text(input_text)
    for i, text in enumerate(texts):
        if text[:2] in separators and i > 0:
            texts[i - 1] += text[:2]
            texts[i] = text[2:]
        elif text[:1] in separators and i > 0:
            texts[i - 1] += text[:1]
            texts[i] = text[1:]
    for text in texts:
        if text in separators or not text:
            texts.remove(text)
    return texts


def get_url_extension(url: str):
    """获取 URL 中的文件后缀"""
    # 解析 URL
    parsed_url = urlparse(url)

    # 从路径中提取文件名和后缀
    filename, ext = splitext(parsed_url.path)

    # 返回后缀（小写，去除点号）
    return ext.lower().lstrip(".")


def is_css_or_xpath(expression: str) -> str:
    """
    判断表达式是 CSS 选择器还是 XPath 表达式。

    Args:
        expression: 待判断的字符串（CSS 或 XPath 表达式）。

    Returns:
        'css'：判断为 CSS 选择器；
        'xpath'：判断为 XPath 表达式；
        'unknown'：无法确定类型。
    """
    # 去除首尾空白（避免空格影响判断）
    expr = expression.strip()
    if not expr:
        return "unknown"

    # 特征 1：XPath 常以 / 或 // 开头（绝对/相对路径）
    if expr.startswith(("/ ", "//", "/")):
        return "xpath"

    # 特征 2：XPath 用 @ 表示属性（如 @id、[@class]）
    # 排除 CSS 中极少见的 @ 规则（如 @media，但不属于选择器）
    if re.search(r"[@]\w+", expr) or re.search(r"\[\s*@", expr):
        return "xpath"

    # 特征 3：XPath 特有函数或轴（如 text()、contains()、ancestor::）
    xpath_specific_patterns = (
        r"text\(\)",
        r"contains\(",
        r"starts-with\(",
        r"ancestor::",
        r"following-sibling::",
        r"preceding-sibling::",
        r"position\(\)",
    )
    if any(re.search(pattern, expr) for pattern in xpath_specific_patterns):
        return "xpath"

    # 特征 4：CSS 特有语法（.class、#id、属性选择器无 @）
    # 若包含 . 或 # 开头，且无 XPath 特征，则倾向于 CSS
    if re.match(r"^[.#]", expr):
        return "css"

    # 特征 5：CSS 属性选择器无 @（如 [name="xxx"]），XPath 有 @（如 [@name="xxx"]）
    if "[" in expr:
        # 检查属性选择器中是否有 @
        if not re.search(r"\[\s*@", expr):
            return "css"

    # 边缘情况：简单标签名（如 'div' 既可作为 CSS 也可作为 XPath）
    # 优先判断为 CSS（更常见）
    return "css"


def simple_news_item_to_news_item(simple: SimpleNewsItem) -> NewsItem:
    locator = Locator(text=simple.content)
    text_content = "".join(
        locator.xpath("//text()[not(parent::script) and not(parent::style)]").getall()
    )
    text_content = re.sub("{{[a-zA-Z]+}}", "", text_content)
    text_content = re.sub(r'\t{4,}', '\t\t', text_content)
    text_content = re.sub(r'\s{4,}', "\n\n", text_content)
    
    length = len(text_content)
    return NewsItem(**simple.model_dump(), length=length, text_content=text_content)


def match_domain(domain, pat):
    pat = re.sub(r"\*", r"(\\w+(\.)?)+?", pat)
    return bool(re.match(pat, domain))


def replace_relative_urls(html, base_url):
    """
    将HTML中的相对URL替换为绝对URL

    参数:
        html: 原始HTML字符串
        base_url: 基准URL（用于转换相对路径，如"https://example.com/path/"）

    返回:
        处理后的HTML字符串
    """
    # 解析HTML（使用lxml解析器，若未安装则替换为'html.parser'）
    soup = BeautifulSoup(html, "lxml")

    # 1. 处理常规标签的URL属性（如a.href、img.src等）
    # 定义需处理的标签和属性（可根据需求扩展）
    tags_attrs = [
        ("a", "href"),
        ("img", "src"),
        ("img", "srcset"),  # 特殊处理：包含多个URL的srcset
        ("link", "href"),  # 如CSS链接
        ("script", "src"),  # 如JS链接
        ("iframe", "src"),
        ("video", "src"),
        ("audio", "src"),
        ("source", "src"),  # video/audio的子标签
        ("source", "srcset"),
    ]

    for tag_name, attr_name in tags_attrs:
        # 遍历所有该标签
        for tag in soup.find_all(tag_name):
            if attr_name not in tag.attrs:
                continue  # 无该属性则跳过
            attr_value = tag[attr_name].strip()
            if not attr_value:
                continue  # 空值不处理

            # 特殊处理srcset（格式："url1 1x, url2 2x"）
            if attr_name == "srcset":
                new_parts = []
                for part in attr_value.split(","):
                    part = part.strip()
                    if not part:
                        continue
                    # 分离URL和尺寸描述（如"1x"、"200w"）
                    url_part, *rest = part.split(maxsplit=1)  # 只分割第一个空格
                    rest_part = " ".join(rest) if rest else ""
                    # 转换URL并重组
                    absolute_url = urljoin(base_url, url_part)
                    new_parts.append(f"{absolute_url} {rest_part}".strip())
                tag[attr_name] = ", ".join(new_parts)
            else:
                # 常规属性：直接转换为绝对URL
                absolute_url = urljoin(base_url, attr_value)
                tag[attr_name] = absolute_url

    # 替换函数：将相对URL转为绝对URL
    def replace_css_url(match):
        quote = match.group(1)  # 引号（可能为空）
        relative_url = match.group(2)
        absolute_url = urljoin(base_url, relative_url)
        return f"url({quote}{absolute_url}{quote})"

    # 2. 处理<style>标签内的url()引用（如CSS中的background-image）
    for style_tag in soup.find_all("style"):
        if not style_tag.string:
            continue  # 无内容则跳过
        # 正则匹配url(...)，支持带引号（'或"）和无引号的情况
        pattern = re.compile(r'url\((["\']?)(.*?)\1\)')

        # 替换并更新style内容
        style_tag.string = pattern.sub(replace_css_url, style_tag.string)

    # 3. 处理内联style属性中的url()（如<div style="background: url('img.jpg')">）
    for tag in soup.find_all(style=True):  # 找到所有带style属性的标签
        style_attr = tag["style"]
        # 用同样的正则替换
        pattern = re.compile(r'url\((["\']?)(.*?)\1\)')
        tag["style"] = pattern.sub(replace_css_url, style_attr)

    # 返回处理后的HTML
    return soup.find("body").decode_contents()


from markdownify import MarkdownConverter


class ImagePreservingConverter(MarkdownConverter):
    # 必须严格按此顺序声明参数：self, el, text, convert_as_inline, parent_tags
    def convert_img(self, el, text, parent_tags=None):
        # 提取图片属性
        alt = el.get("alt", "").strip()
        src = el.get("src", "").strip()
        if not src:
            return ""
        # 生成Markdown图片格式
        return f"![{alt}]({src})"


# 转换函数
def markdownify_with_images(html):
    return ImagePreservingConverter().convert(html)
