
import os

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from langsmith import Client
from langsmith.evaluation import evaluate

from langsmith import traceable
import google.generativeai as genai
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "playground"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GOOGLE_API_KEY"] = "AIzaSyBJoz7BvdFgWTBwzcu-0xWpJKfEJOR6vPM"

llm = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7)
def test1():
    prompt = ChatPromptTemplate.from_template(
        "给我讲一个关于{topic}的笑话"
    )
    output_parser = StrOutputParser()

    chain = prompt | llm | output_parser

    response = chain.invoke({"topic": "躺平"})
    print(response)

def test2():
    client = Client()

    # Define dataset: these are your test cases
    dataset_name = "Sample Dataset"
    dataset = client.create_dataset(dataset_name, description="A sample dataset in LangSmith.")
    client.create_examples(
        inputs=[
            {"postfix": "to LangSmith"},
            {"postfix": "to Evaluations in LangSmith"},
        ],
        outputs=[
            {"output": "Welcome to LangSmith"},
            {"output": "Welcome to Evaluations in LangSmith"},
        ],
        dataset_id=dataset.id,
    )

    # Define your evaluator
    def exact_match(run, example):
        return {"score": run.outputs["output"] == example.outputs["output"]}

    experiment_results = evaluate(
        lambda input: "Welcome " + input['postfix'],  # Your AI system goes here
        data=dataset_name,  # The data to predict and grade over
        evaluators=[exact_match],  # The evaluators to score the results
        experiment_prefix="sample-experiment",  # The name of the experiment
        metadata={
            "version": "1.0.0",
            "revision_id": "beta"
        },
    )

def test3():


    # Auto-trace LLM calls in-context

    model = genai.GenerativeModel('gemini-pro')
    #response = model.generate_content("who is akulaku CEO")
    @traceable  # Auto-trace this function
    def pipeline(user_input: str):
        result = model.generate_content(user_input)
        return result

    print(pipeline("给我写篇春天里的文章"))
def test4():


    example_inputs = [
        ("What is the largest mammal?", "The blue whale"),
        ("What do mammals and birds have in common?", "They are both warm-blooded"),
        ("What are reptiles known for?", "Having scales"),
        ("What's the main characteristic of amphibians?", "They live both in water and on land"),
    ]

    client = Client()
    dataset_name = "Elementary Animal Questions"

    # Storing inputs in a dataset lets us
    # run chains and LLMs over a shared set of examples.
    dataset = client.create_dataset(
        dataset_name=dataset_name, description="Questions and answers about animal phylogenetics.",
    )
    for input_prompt, output_answer in example_inputs:
        client.create_example(
            inputs={"question": input_prompt},
            outputs={"answer": output_answer},
            metadata={"source": "Wikipedia"},
            dataset_id=dataset.id,
        )

def test5():


    client = Client()
    dataset_name = "evaluators Example Dataset"

    # Filter runs to add to the dataset
    runs = client.list_runs(
        project_name="evaluators",
        is_root=True,
        error=False,
    )

    dataset = client.create_dataset(dataset_name, description="An example dataset")
    for run in runs:
        client.create_example(
            inputs=run.inputs,
            outputs=run.outputs,
            dataset_id=dataset.id,
        )


from langsmith.schemas import Example, Run

def correct_label(root_run: Run, example: Example) -> dict:
    score = root_run.outputs.get("output") == example.outputs.get("label")
    return {"score": int(score), "key": "correct_label"}
def test6():

    from langchain_core.prompts import ChatPromptTemplate
    from langchain_core.output_parsers import StrOutputParser

    prompt = ChatPromptTemplate.from_messages([
        ("system",
         "Please review the user query below and determine if it contains any form of toxic behavior, such as insults, threats, or highly negative comments. Respond with 'Toxic' if it does, and 'Not toxic' if it doesn't."),
        ("user", "{input}")
    ])

    output_parser = StrOutputParser()

    chain = prompt | llm | output_parser

    results = evaluate(
        chain.invoke,
        data='first project name',
        evaluators=[correct_label],
        experiment_prefix="Toxic Queries",
    )

import wikipedia as wp
@traceable(run_type="retriever")
def retrieve(query):
    results = []
    for term in wp.search(query, results = 10):
        try:
            page = wp.page(term, auto_suggest=False)
            results.append({
                "page_content": page.summary,
                "type": "Document",
                "metadata": {"url": page.url}
            })
        except wp.DisambiguationError:
            pass
        if len(results) >= 2:
            return results
def test7():
    query = 'What is artificial intelligence?'
    result = retrieve(query)
    print(result)

if __name__ == "__main__":
    test7()