#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"

import json
import logging
import requests
from typing import Any, Dict, Optional, List, TypedDict, Annotated

from fastapi import FastAPI, Request, Header, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field, ConfigDict
from pathlib import Path
import chromadb
from langchain_chroma import Chroma
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.documents import Document
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_community.vectorstores.utils import filter_complex_metadata
from langchain_deepseek import ChatDeepSeek
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.vectorstores import VectorStoreRetriever
import uvicorn
from contextlib import asynccontextmanager
from fastapi.responses import JSONResponse

import hashlib
import hmac

from langgraph.graph import StateGraph
# from langgraph.graph import MessagesState
from langgraph.graph import START, END
# from langchain_core.messages import AnyMessage

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

ignore_users = [
    "wayneliu0019",
    "HaiHui886",
    "csgagent",
    "samchen8008",
    "Rader",
    "pulltheflower",
    "phantom-rabbit",
    "MasonXon",
    "ganisback",
    "zhendi",
    "hiveer",
]

root_path = "/root/hhwang"

model_name = f"{root_path}/bge-base-zh-v1.5"
md_path = f"{root_path}/opencsg-docs/docs"
issue_path = f"{root_path}/issues"
chroma_path = "chroma_langchain_db"
collection_name = "rag-doc"

# model_name = "/Users/hhwang/models/bge-base-zh-v1.5"
# md_path = "/Users/hhwang/code/jihulab/opencsg/opencsg-docs/docs"
# issue_path = "/Users/hhwang/temp/github_issues/md"
# chroma_path = f"/Users/hhwang/temp/github_issues/{chroma_path}"

# model_name = "BAAI/bge-large-en-v1.5"
# model_kwargs = {'device': 'cuda'}
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity

github_token = os.getenv("GITHUB_TOKEN", "")
deepseek_token = os.getenv("DEEPSEEK_TOKEN", "")

answer_system_prompt = """
## Role:
You are a professional OpenCSG Hub issue QA assistant. Please answer the user's question based on the retrieved content below.

## Constraints:
- Answer the user's question directly based on the retrieved content. Do not include any persion opinion or subjective views. Only provide factual information based on the retrieved content.
- If there is no relevant information in the context, clearly state that you cannot answer and do not fabricate information.
- At the end, prompt the user that they can find more information at https://opencsg.com/docs and technical support will get involved.

## Output:
Please provide accurate and concise answers and do not repeat the question in your response.
"""

check_system_prompt = """
## Role:
You are a professional issue QA compare assistant. Please inspect if the retrieved content is relevant to the user's  question and if the content can be used to answer the question.

## Constraints:
- Only inspect if the retrieved content is relevant to the user's question and can be used to answer the question.
- Do not answer the user's question or provide any additional information.
- Just return False if the question is a complaint or negative feedback.
- Just return False if the question is a greeting or thank you or farewell message.
- Just return False if the question is about Github repositories star count.
"""

# ai_prompt = ChatPromptTemplate.from_template("Content:\n{context}\n")
# user_prompt = ChatPromptTemplate.from_template("Question:\n{question}\n")

retriever: VectorStoreRetriever = None
dsllm: ChatDeepSeek = None

class GraphState(TypedDict):
    retrieved_context: str = ""
    can_answer: bool = False
    question: str = ""
    answer: str = ""
    repo_owner: str = ""
    repo_name: str = ""
    issue_number: str = ""
    push_answer: bool = False

class CheckResult(BaseModel):    
    """inspect retrieved content and question"""    
    can_answer: bool = Field(    
        description="Inspect if the retrieved content is relevant to the user's question and if the content can be used to answer the question.",
    )

def create_llm():
    global dsllm
    print("Load llm ......")
    dsllm = ChatDeepSeek(
        model="deepseek-chat",
        temperature=0.5,
        max_tokens=4096,
        timeout=None,
        max_retries=2,
        streaming=False,
        api_key=deepseek_token,
    )
    print("Load llm done!")

def get_doc_files(search_path: str):
    exclude_dirs = [f"{search_path}/starship", f"{search_path}/autohub"]
    md_root_path = Path(search_path)
    md_file_objs = [
        file for file in md_root_path.rglob("*.md")
        if not any(str(file).startswith(exclude_dir) for exclude_dir in exclude_dirs)
    ]
    md_files = [str(file) for file in md_file_objs]
    return md_files

def create_vector_store():
    global retriever
    print(f"Loading model {model_name} ......")
    embed_model = HuggingFaceEmbeddings(
        model_name=model_name,
        model_kwargs=model_kwargs,
        encode_kwargs=encode_kwargs,
    )
    print(f"Load model {model_name} done!")

    if os.path.exists(chroma_path):
        print(f"Load vector store from local {chroma_path} ......")
        vectorstore = Chroma(
            persist_directory=chroma_path,
            embedding_function=embed_model,
            collection_name=collection_name,
            client_settings=chromadb.Settings(anonymized_telemetry=False)
        )
        retriever = vectorstore.as_retriever(
            search_type="similarity",
        )
        print(f"Load vector store from local {chroma_path} done!")
        # return retriever
    else:
        doc_md_files = get_doc_files(md_path)
        print(f"Find {len(doc_md_files)} markdown files in {md_path}")

        issue_md_files = get_doc_files(issue_path)
        print(f"Find {len(issue_md_files)} markdown files in {issue_path}")

        md_files = []
        md_files.extend(doc_md_files)
        md_files.extend(issue_md_files)

        md_docs: List[Document] = []

        for md_file in md_files:
            print(f"Loading markdown file: {md_file}")
            loader = UnstructuredMarkdownLoader(md_file, remove_hyperlinks=True, remove_images=True)
            documents = loader.load()
            filtered_documents = filter_complex_metadata(documents)
            if filtered_documents:
                md_docs.extend(filtered_documents)

        print(f"Loaded {len(md_docs)} markdown files.")

        print("Creating vector store ......")
        vectorstore = Chroma.from_documents(
            documents=md_docs,
            collection_name=collection_name,
            embedding=embed_model,
            persist_directory=chroma_path,
            client_settings=chromadb.Settings(anonymized_telemetry=False)
        )
        print("Vector store completed!")

        retriever = vectorstore.as_retriever(
            search_type="similarity",
        )
        # return retriever

def format_docs(docs):
    return "\n\n".join([f"document {i+1}:\n{doc.page_content}" for i, doc in enumerate(docs)])

def retrieve_context(state: GraphState) -> Dict[str, Any]:
    # print(f"Retrieve context for question: {state}")
    question = state["question"]
    result_docs = retriever.invoke(question, k=3)
    context = format_docs(result_docs)
    return {"retrieved_context": context}

def check_can_answer(state: GraphState) -> Dict[str, Any]:
    # print(f"Check can answer for state: {state}")
    question = state["question"]
    context = state["retrieved_context"]
    system_message = SystemMessage(content=answer_system_prompt)
    ai_message = AIMessage(content=f"Retrieved Content:\n{context}\n")
    user_message = HumanMessage(content=f"Question:\n{question}\n")
    structured_llm = dsllm.with_structured_output(CheckResult)
    result = structured_llm.invoke([system_message, ai_message, user_message], config={"streaming": False})
    # logger.info(f"Check result: {result} for question: {question}")
    return {"can_answer": result.can_answer}

def generate_answer(state: GraphState) -> Dict[str, Any]:
    # print(f"Generate answer for state: {state}")
    question = state["question"]
    context = state["retrieved_context"]
    system_message = SystemMessage(content=answer_system_prompt)
    ai_message = AIMessage(content=f"Retrieved Content:\n{context}\n")
    user_message = HumanMessage(content=f"Question:\n{question}\n")
    response = dsllm.invoke([system_message, ai_message, user_message], config={"streaming": False})
    # print(f"Generate response: {response}")
    answer = ""
    if response.content:
        answer = response.content
    # logger.info(f"Generate answer: {answer} for question: {question}")
    return {"answer": answer}

def should_continue(state: GraphState):
    if state["can_answer"]:
        return "generate_answer"
    return END

def push_answer(state: GraphState):
    repo_owner = state["repo_owner"]
    repo_name = state["repo_name"]
    issue_number = state["issue_number"]
    answer = state["answer"]

    if len(repo_owner) < 1 or len(repo_name) < 1 or len(issue_number) < 1 or len(answer) < 1:
        return {push_answer: False}
    
    # https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#create-an-issue-comment
    url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/issues/{issue_number}/comments"
    headers = {
        "Accept": "application/vnd.github+json",
        "Authorization": f"Bearer {github_token}",
        "X-GitHub-Api-Version": "2022-11-28",
    }
    json_data = {"body": answer}
    resp = requests.post(url=url, headers=headers, json=json_data, verify=False)
    logger.info(f"commit comment response on {url}, response code: {resp.status_code}, response text: {resp.text}")
    #return resp.json()
    return {push_answer: True}

builder = StateGraph(GraphState)
builder.add_node(retrieve_context)
builder.add_node(check_can_answer)
builder.add_node(generate_answer)
builder.add_node(push_answer)
builder.add_edge(START, "retrieve_context")
builder.add_edge("retrieve_context", "check_can_answer")
builder.add_conditional_edges("check_can_answer", should_continue, ["generate_answer", END])
builder.add_edge("generate_answer", "push_answer")
builder.compile(name="cbg")

graph = builder.compile()

@asynccontextmanager
async def lifespan(app: FastAPI):
    logger.info("init components...")
    create_vector_store()
    create_llm()
    logger.info("all components initialized!")

    yield

    logger.info("closing...")

app = FastAPI(
    title="GitHub Webhook webhook",
    description="Receive and handle GitHub webhook request",
    version="1.0.0",
    lifespan=lifespan
)

class GitHubWebhookPayload(BaseModel):
    payload: Dict[str, Any] = Field(default_factory=dict, description="GitHub webhook event data")

    class ConfigDict:
        extra = "allow"

def verify_signature(payload_body, secret_token, signature_header):
    """Verify that the payload was sent from GitHub by validating SHA256.

    Raise and return 403 if not authorized.

    Args:
        payload_body: original request body to verify (request.body())
        secret_token: GitHub app webhook token (WEBHOOK_SECRET)
        signature_header: header received from GitHub (x-hub-signature-256)
    """
    if not signature_header:
        raise HTTPException(status_code=403, detail="x-hub-signature-256 header is missing!")
    hash_object = hmac.new(secret_token.encode('utf-8'), msg=payload_body, digestmod=hashlib.sha256)
    expected_signature = "sha256=" + hash_object.hexdigest()
    if not hmac.compare_digest(expected_signature, signature_header):
        raise HTTPException(status_code=403, detail="Request signatures didn't match!")

@app.post("/webhook")
async def github_webhook(request: Request, x_github_event: Optional[str] = Header(None)):
    logger.info(f"Receive GitHub event type: {x_github_event}")

    body = await request.body()
    payload = json.loads(body)

    body = await request.body()
    verify_signature(body, "csg_issue_agent_token", request.headers.get("x-hub-signature-256"))

    action = payload.get('action')

    is_issue_open = False
    is_issue_comment_create = False
    if x_github_event == "issues" and action == "opened":
        is_issue_open = True
    if x_github_event == "issue_comment" and action == "created":
        is_issue_comment_create = True

    if not is_issue_open and not is_issue_comment_create:
        print(f"ignore {x_github_event} - {action} event")
        return JSONResponse(content={"status": "success", "message": f"ignore {x_github_event} - {action} event"})

    # logger.info(json.dumps(payload, indent=2, ensure_ascii=False))
    # logger.info("GitHub webhook content:")
    # logger.info(f"action: {action}")
    issue_number = payload.get('issue').get('number')
    # logger.info(f"issue number: {issue_number}")
    # logger.info(f"issue title: {payload.get('issue').get('title')}")
    issue_title = payload.get('issue').get('title')
    issue_body = payload.get('issue').get('body')
    # logger.info(f"issue body: {issue_body}")
    issue_user = payload.get('issue').get('user').get('login')
    # logger.info(f"issue user: {issue_user}")

    comment_body = ""
    comment_user = ""
    comment_id = ""
    if payload.get('comment'):
        comment_id = payload.get('comment').get('id')
        comment_body = payload.get('comment').get('body')
        #logger.info(f"comment body: {comment_body}")
        comment_user = payload.get('comment').get('user').get('login')
        #logger.info(f"comment user: {comment_user}")

    repo_name = payload.get('repository').get('name')
    # full_name = payload.get('repository').get('full_name')
    repo_owner = payload.get('repository').get('owner').get('login')

    # logger.info(f"repo full name: {repo_name}")
    # logger.info(f"repo owner login: {owner}")
    #logger.info(f"repo name: {repo_name}, repo owner: {owner}, issue number: {issue_number}, issue_user: {issue_user}, comment_user: {comment_user}")

    if is_issue_open and issue_user in ignore_users:
        return JSONResponse(content={"status": "success", "message": f"handle {x_github_event} - {action} event"})
    if is_issue_comment_create and comment_user in ignore_users:
        return JSONResponse(content={"status": "success", "message": f"handle {x_github_event} - {action} event"})

    handle_question(
        is_issue_open=is_issue_open,
        issue_title=issue_title,
        issue_body=issue_body,
        is_issue_comment_create=is_issue_comment_create,
        comment_body=comment_body,
        repo_name=repo_name,
        repo_owner=repo_owner,
        issue_number=issue_number,
        issue_user=issue_user,
        comment_user=comment_user,
        comment_id=comment_id,
    )

    return JSONResponse(content={"status": "success", "message": f"handle {x_github_event} - {action} event"})

def handle_question(is_issue_open: bool,
                    issue_title: str,
                    issue_body: str,
                    is_issue_comment_create: bool,
                    comment_body: str,
                    repo_name: str,
                    repo_owner: str,
                    issue_number: str,
                    issue_user: str,
                    comment_user: str,
                    comment_id: str,
    ):
    if not is_issue_open and not is_issue_comment_create:
        return

    user_question = ""
    if is_issue_open:
        user_question = f"{issue_title}\n\n{issue_body}"
    if is_issue_comment_create:
        user_question = comment_body

    #if not "auto_reply" in user_question:
    #    return

    if len(user_question) < 1:
        return

    logger.info(f"repo name: {repo_name}, repo owner: {repo_owner}, issue number: {issue_number}, issue_user: {issue_user}, comment_user: {comment_user}, comment_id: {comment_id}, user_question: {user_question}")

    result = get_answer_by_llm(
        question=user_question, 
        repo_owner=repo_owner, 
        repo_name=repo_name, 
        issue_number=issue_number,
    )
    # answer(repo_owner=repo_owner, repo_name=repo_name, issue_number=issue_number, answer=result)
    if result["can_answer"]:
        logger.info(f"append comment result: {result}")
    else:
        logger.info(f"cannot answer question: {user_question}")


def get_answer_by_llm(question: str, repo_owner: str="", repo_name: str="", issue_number: str=""):
    submit_state = {
        "question": question,
        "repo_owner": repo_owner,
        "repo_name": repo_name,
        "issue_number": str(issue_number),
    }
    response = graph.invoke(submit_state)
    # print(f"graph response: {response}")
    return response

@app.get("/")
async def root():
    return {"status": "online", "message": "GitHub Webhook is running"}

@app.get("/ask")
async def ask(question: str):
    logger.info(f"question: {question}")
    result = get_answer_by_llm(question)
    return result

if __name__ == "__main__":
    uvicorn.run(
        "webhook:app",
        host="0.0.0.0",
        port=9090,
        reload=False
    )
    logger.info("server shutdown")
