from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
import os
import sys
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "playground"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GOOGLE_API_KEY"] = "AIzaSyBJoz7BvdFgWTBwzcu-0xWpJKfEJOR6vPM"

llm = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7)

def simple_demo():

    result = llm.invoke("你是什么版本,你的知识截止到什么时间点")
    print(result)


def simple_stream_demo():

    for chunk in llm.stream("Write a limerick about LLMs."):
        print(chunk.content)


def simple_stream_demo2():
    for chunk in llm.stream("Tell me a short poem about snow"):
        sys.stdout.write(chunk.content)
        sys.stdout.flush()


def simple_batch_result_demo():
    messages = [
        "What's 2+2?",
    ]
    results = llm.batch(messages)
    for result in results:
        print(result.content)

def message_entity_demo1():
    result = llm.invoke(
        [
            SystemMessage(content="你是一位出色的律师，你现在在法庭上"),
            HumanMessage(content="我只不过是偷了一辆电瓶车，我有什么罪"),
        ]
    )
    print(result.content)

def message_entity_demo2():
    messages = [
        "你是一位出色的律师，你现在在法庭上","我只不过是偷了一辆电瓶车，我有什么罪"
    ]
    result = llm.invoke(messages)
    print(result.content)

def message_entity_demo3():
    messages = [
        {"role": "system", "content": "你是一位出色的律师，你现在在法庭上"},
        {"role": "user", "content": "我只不过是偷了一辆电瓶车，我有什么罪"}
    ]
    result = llm.invoke(messages)
    print(result.content)

# 解读图片
def message_entity_read_image():
    message = HumanMessage(
        content=[
            {
                "type": "text",
                "text": "这个图片里面是什么",
            },  # You can optionally provide text parts
            {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/300/300"},
        ]
    )
    result = llm.invoke([message])
    print(type(result))
    print(result.content)


def prompt_template_simple_demo():
    template = """Question: {question}

    Answer: Let's think step by step."""
    prompt = PromptTemplate.from_template(template)

    chain = prompt | llm

    question = "why is 2+2 = 4?"
    result = chain.invoke({"question": question})
    print(result)

if __name__ == '__main__':
    simple_demo()

