# +----------------------------------------------------------------------
# | ChatWork智能聊天办公系统
# +----------------------------------------------------------------------
# | 软件声明: 本系统并非自由软件,未经授权任何形式的商业使用均属非法。
# | 版权保护: 任何企业和个人不允许对程序代码以任何形式任何目的复制/分发。
# | 授权要求: 如有商业使用需求,请务必先与版权所有者取得联系并获得正式授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
import os
import re
import time
import random
import string
import base64
import hashlib
import mammoth
import asyncio
import pandas as pd
from typing import List
from datetime import datetime
from bs4 import BeautifulSoup
from markdownify import markdownify as md
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader, PyMuPDFLoader
from config import get_settings


class FilesUtil:

    @classmethod
    async def parse_raw(cls, path: str) -> str:
        if not os.path.exists(path):
            raise Exception("The file to be parsed does not exist.")

        try:
            extension = os.path.splitext(path)[1][1:].lower()
            if extension in ["csv", "xls", "xlsx"]:
                def sync_conversion():
                    xls = pd.ExcelFile(path)
                    df = pd.read_excel(xls)
                    return df.to_markdown(index=True)
                return await asyncio.to_thread(sync_conversion)
            elif extension in ["docx", "doc"]:
                def sync_conversion():
                    with open(path, "rb") as docx_file:
                        result = mammoth.convert_to_html(docx_file)
                        return cls.html_to_md(result.value)
                return await asyncio.to_thread(sync_conversion)
            elif extension in ["pdf"]:
                pages = await PyMuPDFLoader(file_path=path).aload()
                texts = "".join(page.page_content for page in pages)
                return texts
            else:
                pages = await TextLoader(path).aload()
                texts = "".join(page.page_content for page in pages)
                return texts
        except Exception as e:
            raise Exception(f"parse_raw: {str(e)}")

    @classmethod
    async def parse_qa(cls, path) -> List[dict]:
        def sync_conversion():
            ext = os.path.splitext(path)[1][1:].lower()
            if ext == "csv":
                df = pd.read_csv(path)
            else:
                xls = pd.ExcelFile(path)
                df = pd.read_excel(xls)

            lists = []
            for index, row in df.iterrows():
                print(row)
                record = row.to_dict()
                header = list(record.keys())
                Q = record.get(header[0] if len(header) >= 1 else "")
                A = record.get(header[1] if len(header) >= 2 else "")
                if Q or A:
                    lists.append({"question": Q, "answer": A})
            return lists

        try:
            return await asyncio.to_thread(sync_conversion)
        except Exception as e:
            raise Exception(f"parse_qa: {str(e)}")

    @classmethod
    async def parse_pdf(cls, path: str, chunk_size: int = 0):
        # 载入PDF
        pages = await PyMuPDFLoader(file_path=path).aload()

        # 内容读取
        paging = []
        content = ""
        for page in pages:
            content += page.page_content
            paging.append({
                "page_no": page.metadata["page"] + 1,
                "content": page.page_content,
                "metadata": page.metadata
            })

        # 内容分片
        sections = []
        if chunk_size:
            # 内容切片
            chunks = RecursiveCharacterTextSplitter(chunk_size=chunk_size).split_text(content)
            # 处理切片
            pageNo = 0
            for index, chunk in enumerate(chunks):
                texts: str = ""
                ppNum: list = []
                for page in pages[pageNo:]:
                    ppNum.append(str(page.metadata["page"] + 1))
                    texts += page.page_content
                    if chunk in texts:
                        pageNo = page.metadata["page"]
                        sections.append({
                            "page_nv": ppNum,
                            "page_no": int(ppNum[0]),
                            "chunk_index": index + 1,
                            "chunk_texts": chunk
                        })
                        break
                else:
                    sections.append({
                        "page_nv": [str(0)],
                        "page_no": 0,
                        "chunk_index": index + 1,
                        "chunk_texts": chunk
                    })

        # 返回结果
        if chunk_size:
            return paging, sections
        return paging

    @classmethod
    def html_to_md(cls, html: str, extract_img: bool = False):
        """
        HTML转Markdown

        Args:
            html (str): HTML内容。
            extract_img (bool): 是否提取图片。
        """
        # 初始化BeautifulSoup
        soup = BeautifulSoup(html, 'html.parser')

        # 移除不需要的标签
        for tag in soup(['i', 'script', 'iframe']):
            tag.decompose()

        # 处理代码
        for pre in soup.find_all('pre'):
            code_name = pre.get('class', '')[0] if pre.get('class') else ''
            content = pre.get_text().strip()
            pre.replace_with(soup.new_string(f"```\n{code_name}\n{content}\n```"))

        # 处理图片
        images = []
        m = hashlib.md5()
        date_str: str = str(datetime.now().strftime("%Y%m%d"))
        for img in soup.find_all('img'):
            if not extract_img:
                img.replace_with(soup.new_string(""))
            else:
                alt = img.get('alt', '')
                src = img.get('src', '')
                ext = os.path.splitext(alt)[1][1:].lower()
                if ext not in ["png", "jpg", "jpeg", "gif", "webp"]:
                    ext = "png"

                random_chars = ''.join(random.choices(string.ascii_letters + string.digits, k=10))
                m.update((alt + random_chars + str(time.time())).encode("utf-8"))
                root_path: str = get_settings().APP_PATH + "/public"
                file_path: str = f"storage/knowing/images/{date_str}"
                file_name: str = f"{m.hexdigest()}.{ext}"
                save_name: str = f"{root_path}/{file_path}/{file_name}"

                if not os.path.exists(f"{root_path}/{file_path}"):
                    os.makedirs(f"{root_path}/{file_path}")

                decoded_bytes = base64.b64decode(src.split(",")[1])
                with open(save_name, "wb") as file:
                    file.write(decoded_bytes)

                img.replace_with(soup.new_string(f"![{alt}]({file_path}/{file_name})"))
                images.append(f"{file_path}/{file_name}")

        # 处理换行
        for br in soup.find_all('br'):
            br.replace_with(soup.new_string(""))

        # 转换为Markdown
        markdown = md(
            soup.prettify(),
            escape_asterisks=False,
            escape_underscores=False,
            escape_misc=False
        )

        # 合并多余的换行
        markdown = re.sub(r"\n\n+", "\n\n", markdown)
        return (markdown, images) if extract_img else str(markdown)

    @classmethod
    async def convert_file(cls, path: str, output_path: str, output_ext: str = "pdf"):
        try:
            # 验证文件
            if not os.path.exists(path):
                raise Exception("The source file does not exist")

            # 创建目录
            if not os.path.exists(output_path):
                os.makedirs(output_path, exist_ok=True)

            # 构造命令
            command = [
                'soffice', '--headless', '--norestore',
                '--convert-to', output_ext, path,
                '--outdir', output_path
            ]

            # 启动异步子进程
            process = await asyncio.create_subprocess_exec(
                *command,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )

            # 等待进程完成并获取输出
            try:
                stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=120)
                if stderr:
                    raise Exception(stderr.decode("utf-8").strip())
                await process.wait()
            except asyncio.TimeoutError:
                process.kill()
                await process.wait()
                raise RuntimeError("Conversion timed out after 120 seconds")

            # 检查返回码
            if process.returncode != 0:
                error_msg = stderr.decode("utf-8").strip() if stderr else "Unknown error"
                raise RuntimeError(
                    f"LibreOffice failed with exit code {process.returncode}: {error_msg}"
                )
        except Exception as e:
            raise Exception(str(e))
