# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import asyncio
import json
import os
from typing import List, Optional, Union

import aiohttp
import asyncpg
import httpx
import tiktoken
from dataprep_utils import (
    create_upload_folder,
    document_loader,
    encode_filename,
    get_separators,
    get_tables_result,
    parse_html_new,
    save_content_to_local_disk,
)
from docarray import BaseDoc
from dotenv import load_dotenv
from fastapi import File, Form, HTTPException, UploadFile
from fastapi.responses import StreamingResponse
from langchain.text_splitter import (
    HTMLHeaderTextSplitter,
    RecursiveCharacterTextSplitter,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

from comps import (
    DocPath,
    ServiceType,
    opea_microservices,
    opea_telemetry,
    register_microservice,
)

openai_tokenizer = tiktoken.get_encoding("o200k_base")

load_dotenv()
LLM_ENDPOINT = os.getenv("LLM_ENDPOINT", "http://localhost:8008/v1")
LLM_MODEL = os.getenv("LLM_MODEL", "meta-llama/Meta-Llama-3-8B-Instruct")
embedding_enpoint = os.getenv("EMBEDDING_ENDPOINT", "")
pg_host = os.getenv("PG_HOST", "")
pg_port = os.getenv("PG_PORT", "5432")
pg_user = os.getenv("PG_USER", "")
pg_pwd = os.getenv("PG_PWD", "")
pg_db = os.getenv("PG_DB", "")
pg_table = os.getenv("PG_TABLE", "iaudit")
chunk_size = os.getenv("CHUNK_SIZE", 300)
chunk_overlap = os.getenv("CHUNK_OVERLAP", 50)

AUDIT_AGENT_ENDPOINT = os.getenv(
    "AUDIT_AGENT_ENDPOINT", "http://localhost:9095/v1/chat/completions"
)

assert embedding_enpoint != "", "EMBEDDING_ENDPOINT is not set"
assert pg_host != "", "PG_HOST is not set"
assert pg_user != "", "PG_USER is not set"
assert pg_pwd != "", "PG_PWD is not set"
assert pg_db != "", "PG_DB is not set"
assert pg_table != "", "PG_TABLE is not set"

upload_folder = "./uploaded_files/"

CHUNK_STORAGE = dict()


class ChatParams(BaseDoc):
    messages: list
    streaming: bool = True


@opea_telemetry
def post_process_text(text: str):
    if text == " ":
        return "data: @#$\n\n"
    if text == "\n":
        return "data: <br/>\n\n"
    if text.isspace():
        return None
    new_text = text.replace(" ", "@#$")
    return f"data: {new_text}\n\n"


def get_doc_content(doc_path: DocPath):
    """Ingest document to Redis."""
    path = doc_path.path

    print(f"[ ingest data ] Parsing document {path}.")

    content = document_loader(path)
    print("[ ingest data ] file content loaded")

    file_name = doc_path.path.split("/")[-1]
    CHUNK_STORAGE[file_name] = [content]
    print(f"chunks storage keys {CHUNK_STORAGE.keys()}")
    return content


def get_chunks(doc_path: DocPath):
    """Ingest document to Redis."""
    path = doc_path.path

    print(f"[ ingest data ] Parsing document {path}.")

    if path.endswith(".html"):
        headers_to_split_on = [
            ("h1", "Header 1"),
            ("h2", "Header 2"),
            ("h3", "Header 3"),
        ]
        text_splitter = HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
    else:
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=doc_path.chunk_size,
            chunk_overlap=doc_path.chunk_overlap,
            add_start_index=True,
            separators=get_separators(),
        )

    content = document_loader(path)
    print("[ ingest data ] file content loaded")

    structured_types = [".xlsx", ".csv", ".json", "jsonl"]
    _, ext = os.path.splitext(path)

    if ext in structured_types:
        chunks = content
    else:
        chunks = text_splitter.split_text(content)

    ### Specially processing for the table content in PDFs
    if doc_path.process_table and path.endswith(".pdf"):
        table_chunks = get_tables_result(path, doc_path.table_strategy)
        chunks = chunks + table_chunks

    print(
        f"[ ingest data ] Done preprocessing. Created {len(chunks)} chunks of the given file."
    )

    file_name = doc_path.path.split("/")[-1]
    CHUNK_STORAGE[file_name] = chunks
    print(f"chunks storage keys {CHUNK_STORAGE.keys()}")
    return chunks
    # return ingest_chunks_to_redis(file_name, chunks)


def role_mapping(openai_role):
    if openai_role == "assistant":
        return "ai"
    elif openai_role == "user":
        return "human"
    else:
        return openai_role


def count_tokens(text):
    return len(openai_tokenizer.encode(text))


async def file_exists(encoded_file_name):
    conn = await asyncpg.connect(
        user=pg_user, password=pg_pwd, database=pg_db, host=pg_host, port=pg_port
    )

    async with conn.transaction():
        results = await conn.fetch(
            f"""
                     SELECT count(*)
                     FROM {pg_table}
                     WHERE filename = $1
                """,
            encoded_file_name,
        )

    await conn.close()

    return int(results[0]["count"]) > 0


@register_microservice(
    name="opea_service@api",
    service_type=ServiceType.LLM,
    endpoint="/chat",
    host="0.0.0.0",
    port=9000,
)
async def chat(params: ChatParams):
    llm = ChatOpenAI(
        model=LLM_MODEL,
        openai_api_key="dummy",
        openai_api_base=LLM_ENDPOINT,
        temperature=0.01,
        # max_tokens=1024,
    )

    parameters = {
        "top_p": 1,
        "temperature": 0.01,
        "frequency_penalty": 0,
        "presence_penalty": 0,
    }

    prompt = params.messages[-1].get("content", "")

    just_chat = False
    standard = None
    file_name = None

    if "Compliance report for" in prompt:
        standard = "IATF" if "IATF" in prompt else "ISO"
        file_name = prompt.replace(f"{standard} Compliance report for", "").strip()
    else:
        just_chat = True

    print(f"just_chat: {just_chat}, standard: {standard}, file_name: {file_name}")

    messages = [
        (role_mapping(m.get("role")), m.get("content")) for m in params.messages
    ]

    chat_prompt = ChatPromptTemplate(messages=messages).invoke(input={})

    if just_chat:

        async def stream_generator():
            chat_response = ""
            async for chunk in llm.astream(chat_prompt, **parameters):
                text = chunk.content
                if text not in ["<|im_end|>", "<|endoftext|>"]:
                    chat_response += text
                    chunk_repr = repr(text.encode("utf-8"))
                    print(f"chunk: {text}")
                    yield f"data: {chunk_repr}\n\n"
            yield "data: [DONE]\n\n"

        return StreamingResponse(stream_generator(), media_type="text/event-stream")
    else:
        encoded_file_name = encode_filename(file_name)
        is_exists = await file_exists(encoded_file_name)

        if not is_exists:
            raise HTTPException(
                status_code=400,
                detail=f"File {file_name} does not exist or not processed.",
            )

        async def event_stream():
            timeout = httpx.Timeout(30.0, connect=60.0)
            async with httpx.AsyncClient(timeout=timeout) as client:
                async with client.stream(
                    "POST",
                    AUDIT_AGENT_ENDPOINT,
                    json={
                        "messages": f"{standard},{encoded_file_name}",
                        "stream": True,
                    },
                ) as response:
                    response.raise_for_status()
                    async for chunk in response.aiter_bytes():
                        yield chunk
                    yield "data: [DONE]\n\n"

        # Return a StreamingResponse with the streamed content
        return StreamingResponse(event_stream(), media_type="text/event-stream")


@register_microservice(
    name="opea_service@api", endpoint="/dataprep", host="0.0.0.0", port=9000
)
async def upload_documents(
    files: Optional[Union[UploadFile, List[UploadFile]]] = File(None),
    link_list: Optional[str] = Form(None),
    chunk_size: int = Form(1500),
    chunk_overlap: int = Form(100),
    process_table: bool = Form(False),
    table_strategy: str = Form("fast"),
):
    try:
        print(f"[ upload ] files:{files}")
        print(f"[ upload ] link_list:{link_list}")

        file_name = ""

        if files:
            if not isinstance(files, list):
                files = [files]
            uploaded_files = []

            for file in files:
                encode_file = encode_filename(file.filename)
                file_name = encode_file
                doc_id = "file:" + encode_file
                print(f"[ upload ] processing file {doc_id}")

                save_path = upload_folder + encode_file
                await save_content_to_local_disk(save_path, file)
                content = get_doc_content(
                    DocPath(
                        path=save_path,
                        chunk_size=chunk_size,
                        chunk_overlap=chunk_overlap,
                        process_table=process_table,
                        table_strategy=table_strategy,
                    )
                )
                uploaded_files.append(save_path)
                print(f"[ upload ] Successfully saved file {save_path}")

        if link_list:
            link_list = json.loads(link_list)  # Parse JSON string to list
            if not isinstance(link_list, list):
                raise HTTPException(
                    status_code=400, detail=f"Link_list {link_list} should be a list."
                )
            for link in link_list:
                encoded_link = encode_filename(link)
                file_name = encoded_link
                doc_id = "file:" + encoded_link + ".txt"
                print(f"[ upload ] processing link {doc_id}")

                save_path = upload_folder + encoded_link + ".txt"
                content = parse_html_new(
                    [link], chunk_size=chunk_size, chunk_overlap=chunk_overlap
                )
                await save_content_to_local_disk(save_path, content)
            print(f"[ upload ] Successfully saved link list {link_list}")

        # Ingest doc content into vector database
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            add_start_index=True,
            separators=get_separators(),
        )
        chunks = text_splitter.split_text(content)
        print(
            f"[ ingest data ] Done preprocessing. Created {len(chunks)} chunks of the given file."
        )

        CHUNK_STORAGE[file_name] = chunks
        print(f"chunks storage keys {CHUNK_STORAGE.keys()}")

        # Get embeddings for each chunk
        async with aiohttp.ClientSession() as session:
            tasks = [
                session.post(embedding_enpoint, json={"input": chunk})
                for chunk in chunks
            ]
            responses = await asyncio.gather(*tasks)
            embeddings = [await response.json() for response in responses]

        # Insert embeddings and chunks into vector database
        conn = await asyncpg.connect(
            user=pg_user, password=pg_pwd, database=pg_db, host=pg_host, port=pg_port
        )

        await conn.execute(f"""
                    CREATE TABLE IF NOT EXISTS {pg_table} (
                    id SERIAL PRIMARY KEY,
                    chunk TEXT,
                    embedding vector(1024),
                    filename TEXT
                );
            """)

        async with conn.transaction():
            for i, embedding in enumerate(embeddings):
                embedding_vector = embedding["data"][0]["embedding"]
                embedding_vector_str = "[" + ",".join(map(str, embedding_vector)) + "]"
                await conn.execute(
                    """
                    INSERT INTO iaudit (chunk, embedding, filename)
                    VALUES($1, $2, $3);
                """,
                    chunks[i],
                    embedding_vector_str,
                    file_name,
                )
        await conn.close()

        return {"status": 200, "message": "Data preparation succeeded"}

    except Exception as e:
        return {"status": 400, "message": f"Data preparation failed: {e}"}


if __name__ == "__main__":
    create_upload_folder(upload_folder)
    opea_microservices["opea_service@api"].start()
