from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.chains.llm import LLMChain
from langchain.chains.sequential import SequentialChain
#!/usr/bin/env python
from langchain.prompts import ChatPromptTemplate
from langserve import add_routes
from langchain.schema.runnable import RunnableLambda
from langserve.schema import CustomUserType

from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

from utils import CodeBlockOutputParser, CODE_INSTRUCTIONS, create_json_retriver

import langchain.globals
langchain.globals.set_debug(True)

from config import config

app = FastAPI(
    title="LangChain Server",
    version="1.0",
    description="A simple api server using Langchain's Runnable interfaces",
)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
    expose_headers=["*"],
)

chat_llm = ChatOpenAI(
    api_key="sk-584347f21d944d649af0da3a6f412c1f",
    base_url="https://api.deepseek.com/v1",
    model_name="deepseek-chat"
)
coder_llm = ChatOpenAI(
    api_key="sk-584347f21d944d649af0da3a6f412c1f",
    base_url="https://api.deepseek.com/v1",
    model_name="deepseek-coder"
)

retriever = create_json_retriver(config["collect_file"])

def create_code_trans_chain():

    with open(config["thoughts_file"], "r") as f:
        base_thoughts = f.readlines()
        base_thoughts = "\n".join([x.strip() for x in base_thoughts])
    
    
    
    prompt = ChatPromptTemplate.from_messages([
            ("system", "你是一名世界级的C、C++、Rust语言专家"),
            ("user", "你可以帮我将以下{language}语言代码转换成Rust吗：{input_code}\n" + f"对这项代码转换任务已经进行了一些有效的思考：{base_thoughts}\n"+ "{thoughts}\n" + \
            "这份Rust代码或许和你要完成的代码转换任务有关，可以参考：\n{example}" + CODE_INSTRUCTIONS["code_trans"])
        ])
    output_parser = CodeBlockOutputParser(code_type="rust")

    code_trans_chain = LLMChain(llm=coder_llm, prompt=prompt, output_parser=output_parser, output_key="output_code")

    # code_trans_chain = prompt | coder_llm | output_parser

    return code_trans_chain

def create_code_trans_debug_chain():

    debug_prompt = "你的初步转换结果为如下的Rust代码：{model_response}\n你给出的代码在编译过程中报错，存在语法上的错误，报错信息如下：{std_error}\n现在请你仔细分析代码转换任务，并重新给出转换后的{language}代码\n"
    
    with open(config["thoughts_file"], "r") as f:
        base_thoughts = f.readlines()
        base_thoughts = "\n".join([x.strip() for x in base_thoughts])
    prompt = ChatPromptTemplate.from_messages([
            ("system", "你是一名世界级的C、C++、Rust语言专家"),
            ("user", "你可以帮我将以下{language}语言代码转换成Rust吗：{input_code}\n" + f"对这项代码转换任务已经进行了一些有效的思考：{base_thoughts}\n"+ "{thoughts}\n" + \
            "这份Rust代码或许和你要完成的代码转换任务有关，可以参考：\n{example}" + debug_prompt + CODE_INSTRUCTIONS["code_trans"])
        ])
    output_parser = CodeBlockOutputParser(code_type="rust")

    code_trans_chain = LLMChain(llm=coder_llm, prompt=prompt, output_parser=output_parser, output_key="output_code")

    # code_trans_chain = prompt | coder_llm | output_parser

    return code_trans_chain

def create_code_think_chain():
    prompt = ChatPromptTemplate.from_messages([
            ("system", "你是一名世界级的C、C++、Rust语言专家"),
            ("user", "假如我希望将以下{language}语言代码转换成Rust：{input_code}\n" + CODE_INSTRUCTIONS["code_think"])
        ])

    output_parser = StrOutputParser()

    # code_think_chain = prompt | chat_llm | output_parser

    code_think_chain = LLMChain(llm=chat_llm, prompt=prompt, output_parser=output_parser, output_key="thoughts")

    return code_think_chain

    # add_routes(
    #     app,
    #     prompt | llm | output_parser,
    #     path="/code_think",
    # )

class CodeTransParam(CustomUserType):
    language: str
    input_code: str

class CodeTransDebugParam(CodeTransParam):
    thoughts: str
    model_response: str
    std_error: str

def run_code_trans(param: CodeTransParam) -> str:
    """Sample function that expects a Foo type which is a pydantic model"""

    code_trans_chain = create_code_trans_chain()
    code_think_chain = create_code_think_chain()

    overall_chain = SequentialChain(
        chains=[code_think_chain, code_trans_chain],
        input_variables=["language", "input_code", "example"],
        output_variables=["thoughts", "output_code"],
        verbose=True
    )

    chain_result = overall_chain.invoke(
        {"language": param.language, "input_code": param.input_code,
         "example": eval(retriever.get_relevant_documents(param.input_code)[0].page_content)["output_code"]}
        )
    
    return {
        "output_code": chain_result["output_code"],
        "thoughts": chain_result["thoughts"]
        }

def run_code_trans_with_debug(param: CodeTransDebugParam) -> str:
    """Sample function that expects a Foo type which is a pydantic model"""

    code_trans_chain_with_debug = create_code_trans_debug_chain()

    # overall_chain = SequentialChain(
    #     chains=[code_think_chain, code_trans_chain_with_debug],
    #     input_variables=["language", "input_code", "model_response", "std_error"],
    #     output_variables=["output_code"],
    #     verbose=True
    # )

    return code_trans_chain_with_debug.invoke({
        "language": param.language,
        "input_code": param.input_code,
        "example": eval(retriever.get_relevant_documents(param.input_code)[0].page_content)["output_code"],
        "thoughts": param.thoughts,
        "model_response": param.model_response,
        "std_error": param.std_error
        })["output_code"]

def main():
    
    add_routes(
        app,
        RunnableLambda(run_code_trans).with_types(output_type=dict),
        path="/code_trans"
    )

    add_routes(
        app,
        RunnableLambda(run_code_trans_with_debug),
        path="/code_trans_debug"
    )

if __name__ == "__main__":
    main()
    import uvicorn
    uvicorn.run(app, host=config["host"], port=config["port"])
