import re
import hashlib
import openpyxl
import requests
from io import BytesIO
from docx import Document
from PyPDF2 import PdfReader
from langdetect import detect
from pptx import Presentation
from bs4 import BeautifulSoup, Comment
from src.common.logger import getLogger
from urllib.parse import urljoin, urlparse

logger = getLogger()

def string_to_hash(text, algorithm = 'md5', length = 32):
    """
    基本字符串转哈希
    :param text: 输入字符串
    :param algorithm: 哈希算法 ('md5', 'sha1', 'sha256', 'sha512')
    :param length: 输出长度
    :return: 指定长度的哈希字符串
    """
    logger.info(f"string_to_hash text: {text}")
    hash_obj = hashlib.new(algorithm)
    hash_obj.update(text.encode('utf-8'))
    full_hash = hash_obj.hexdigest()
    logger.info(f"string_to_hash full_hash: {full_hash}")

    # 截取指定长度
    return full_hash[:length]

def extract_document_content(file):
    filename = file.filename
    logger.info(f"extract_document_content filename: {filename}")
    file_type = filename.split(".")[-1]

    content = ""
    file_stream = BytesIO(file.read())
    if file_type == "txt":
        content = file_stream.read().decode("utf-8")
    elif file_type == "pdf":
        file_stream.seek(0)
        pdf_reader = PdfReader(file_stream)
        for page in pdf_reader.pages:
            content += page.extract_text().strip() + "\n"
    elif file_type == "docx":
        file_stream.seek(0)
        doc = Document(file_stream)
        for paragraph in doc.paragraphs:
            content += paragraph.text.strip()
    elif file_type == "pptx":
        file_stream.seek(0)
        pptx = Presentation(file_stream)
        for slide in pptx.slides:
            for shape in slide.shapes:
                if hasattr(shape, "text"):
                    content += shape.text.strip()
    elif file_type == "xlsx":
        xlsx_content = []
        file_stream.seek(0)
        workbook = openpyxl.load_workbook(file_stream, read_only=True)
        for sheetname in workbook.sheetnames:
            sheet = workbook[sheetname]
            sheet_content = [f"--- sheet {sheetname} ---"]
            for row in sheet.iter_rows(values_only=True):
                row_text = " | ".join([str(cell) for cell in row if cell is not None])
                if row_text.strip():
                    sheet_content.append(row_text)
            xlsx_content.append(sheet_content)
        content = str(xlsx_content)
    logger.info(f"extract_document_content content len: {len(content)}")
    return content

def build_tools_description(agent_tools):
    descriptons = []
    for i, tool in enumerate(agent_tools):
        args_descs = []
        for j, arg in enumerate(tool.args_schema.model_fields):
            args_descs.append(f"{ j + 1} {arg}: {tool.args_schema.model_fields[arg].description}")
        descripton = f"({i + 1}) {tool.name}: {tool.description} (参数{len(tool.args_schema.model_fields)}个："
        descripton += ", ".join(args_descs) + ")"
        descriptons.append(descripton)
    response = "\n" + "\n".join(descriptons)
    logger.info(f"commonUtils build_tools_description response: {response}")
    return response

def robust_detect_language(text):
    logger.info(f"commonUtils robust_detect_language text len: {len(text)}")
    text = text.strip()

    # 先快速判断纯中/英文
    has_chinese = bool(re.search(r'[\u4e00-\u9fff]', text))
    has_english = bool(re.search(r'[a-zA-Z]', text))
    if has_chinese and not has_english:
        return "zh"
    if has_english and not has_chinese:
        return "en"
    if not has_chinese and not has_english:
        return "unknown"

    # 混合或不确定时，用 langdetect
    lang = detect(text)
    if lang.startswith("zh"):
        return "zh"
    elif lang == "en":
        return "en"
    else:
        return "zh"

def extract_website_content(prefix, pattern, suffix):
    logger.info(f"commonUtils extract_website_content prefix: {prefix}, pattern: {pattern}, suffix: {suffix}")
    urls = extract_website_url(prefix, pattern, suffix)
    logger.info(f"commonUtils extract_website_content urls len: {len(urls)}")

    website_texts = []
    for url in urls:
        website_texts.append(extract_website_text(url))
    logger.info(f"commonUtils extract_website_content website_texts len: {len(website_texts)}")
    return website_texts

def extract_website_text(url):
    logger.info(f"commonUtils extract_website_text url: {url}")
    response = requests.get(url, timeout = 10)
    response.raise_for_status()
    soup = BeautifulSoup(response.text, "html.parser")

    # 移除 <script> 和 <style> 标签
    for script_or_style in soup(["script", "style"]):
        script_or_style.decompose()

    # 移除 HTML 注释
    for element in soup(text=lambda text: isinstance(text, Comment)):
        element.extract()

    # 假设侧边栏通常有一个特定的 id 或 class 名称，这里可以根据实际情况调整
    # 例如，移除所有类名为 'sidebar' 的元素
    for sidebar in soup.find_all("div", {'class': 'sidebar'}):
        sidebar.decompose()

    # 同样地，可以针对具体的网站结构来删除其他非内容部分
    # 如移除页眉和页脚
    for tag in soup(['header', 'footer']):
        if tag.get('id') in ['header', 'footer'] or tag.get('class') == ['header', 'footer']:
            tag.decompose()

    # 获取清理后的文本
    cleaned_text = soup.get_text(separator = "\n", strip = True)
    logger.info(f"commonUtils extract_website_text cleaned_text len: {len(cleaned_text)}")
    return cleaned_text

def extract_website_url(prefix, pattern, suffix):
    tutorial_prefix = prefix + pattern
    url = tutorial_prefix + "/" + pattern + suffix
    response = requests.get(url, timeout = 10)
    response.raise_for_status()
    soup = BeautifulSoup(response.text, "html.parser")

    # 提取所有 a 标签的 href
    links = [url]
    for a in soup.find_all('a', href = True):
        full_url = urljoin(url, a['href'])
        validate_parser = urlparse(full_url)
        domain = urlparse(url).netloc
        if validate_parser.netloc == domain and validate_parser.scheme in ("http", "https") and full_url.startswith(tutorial_prefix):
            links.append(full_url)
    return links
