"""Utility & helper functions."""

from langchain.chat_models import init_chat_model
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import BaseMessage

from langchain_ollama import ChatOllama
from langchain_ollama import OllamaEmbeddings
from langchain_chroma import Chroma
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_nomic import NomicEmbeddings
import os
from typing import List, Optional
import re


def get_message_text(msg: BaseMessage) -> str:
    """Get the text content of a message."""
    content = msg.content
    if isinstance(content, str):
        return content
    elif isinstance(content, dict):
        return content.get("text", "")
    else:
        txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
        return "".join(txts).strip()


def image_uri_extract(query: str) -> Optional[str]:
    """Extract image URI from user query."""
    query = query.replace("\\", "/")
    # Define regex patterns for matching file paths and URLs
    # support relative path beginning with './' - the base must be project root
    re_path_pattern = r'((?:\./)+(?:[^/]+/)*[^/]+\.(?:png|jpg|jpeg|bmp))'
    # support absolute path for win / linux
    win_abs_path_pattern = r'([a-zA-Z]:[\\/](?:[^\\/]+[\\/])*[^\\/]+\.(?:png|jpg|jpeg|bmp))'
    linux_abs_path_pattern = r'(/(?:[^/]+/)*[^/]+\.(?:png|jpg|jpeg|bmp))'
    # NOT-TEST: support http and https url
    url_pattern = r'(https?://[^\s]+\.png|\.jpg|\.jpeg)'

    # Search for file paths and URLs in the query
    if (re_path_match := re.search(re_path_pattern, query)):
        return re_path_match.group(0)
    elif (win_path_match := re.search(win_abs_path_pattern, query)):
        return win_path_match.group(0)
    elif (linux_path_match := re.search(linux_abs_path_pattern, query)):
        return linux_path_match.group(0)
    elif (url_match := re.search(url_pattern, query)):
        return url_match.group(0)
    else:
        return None


def load_chat_model(fully_specified_name: str, max_predict: int, context: int) -> BaseChatModel:
    """Load a chat model from a fully specified name.

    Args:
        fully_specified_name (str): ollama model name.
    """
    # Initialize the LLM with Local model e.g., llama3.1:8b
    return ChatOllama(model=fully_specified_name, temperature=0, num_predict=max_predict, num_ctx=context)


def list_txt_text(directory: str) -> List[str]:
    """
    List all TXT text paths from the specified directory and its subdirectories.

    Args:
        directory (str): The directory to search for text.

    Returns:
        List[str]: A list of paths to TXT text.
    """
    text_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.endswith('.txt'):
                text_files.append(os.path.join(root, file).replace("\\", "/"))
    return text_files


def load_text_db(db: str):
    '''
    Load or new one

    db: str, path to the directory where the db is saved
    '''
    # using local model to embed
    Embed = OllamaEmbeddings(
        model="nomic-embed-text")

    vector_store = Chroma(
        collection_name="dunhuang_text_db",
        embedding_function=Embed,
        persist_directory=db,
    )
    return vector_store


def append_text_db(db: str, text_dir: str):
    """
    db: str, path to the directory where the db is saved
    img_dir: str, path to the directory where the appended text are saved
    """
    vector_store = load_text_db(db)
    # vector_store.reset_collection()
    # Load text
    txt_files = list_txt_text(text_dir)
    print(txt_files)

    # Use whole file with tile as Document and no chunking. This way is not good for long text search.
    # docs = [Document(page_content=(txt_file.split("/")[-2] + "窟 " + os.path.basename(txt_file).replace("_", "").replace(".txt", "") + ". " + open(txt_file, "r", encoding='utf-8').read()), metadata={"path": txt_file})
    #         for txt_file in txt_files]
    docs = [Document(page_content=open(txt_file, "r", encoding='utf-8').read(), metadata={"path": txt_file})
            for txt_file in txt_files]

    # Chunking, and tag the metadata in front of each chunk
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
    all_splits = text_splitter.split_documents(docs)
    all_splits = [Document(page_content=(doc.metadata['path'].split("/")[-2] + "窟 " + os.path.basename(doc.metadata['path']
                                                                                                       ).replace("_", "").replace(".txt", "") + ". " + doc.page_content), metadata=doc.metadata) for doc in all_splits]
    # print(all_splits[0].page_content[:500],
    #       all_splits[1].page_content[:500], all_splits[2].page_content[:500])

    # Index text
    vector_store.add_documents(all_splits)
    print(
        f"Append all txt in {text_dir} to {vector_store._collection_name} done")
    return vector_store


def list_png_images(directory: str) -> List[str]:
    """
    List all PNG image paths from the specified directory and its subdirectories.

    Args:
        directory (str): The directory to search for PNG images.

    Returns:
        List[str]: A list of paths to PNG images.
    """
    png_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.endswith('.png'):
                png_files.append(os.path.join(root, file).replace("\\", "/"))
    return png_files


def load_img_db(db: str):
    '''
    Load or new one

    db: str, path to the directory where the db is saved
    '''
    ImageEmbed = NomicEmbeddings(
        model="_", vision_model="nomic-embed-vision-v1.5")

    vector_store = Chroma(
        collection_name="dunhuang_db",
        embedding_function=ImageEmbed,
        persist_directory=db,
    )
    return vector_store


def append_img_db(db: str, img_dir: str):
    """
    db: str, path to the directory where the db is saved
    img_dir: str, path to the directory where the appended images are saved

    Note: This function needs NOMIC API KEY to work.
    """
    vector_store = load_img_db(db)
    # load images
    png_images = list_png_images(img_dir)
    # print(png_images)

    # Index images
    metadata = [{'path': image} for image in png_images]
    vector_store.add_images(uris=png_images, metadatas=metadata)
    print(f"Append all images in {img_dir} to {vector_store._collection_name} done")
    return vector_store
