import json
import logging
import os
import pickle
from getpass import getpass

import dbutils as dbutils
import promptlayer
import requests
from langchain.chains import LLMChain, SimpleSequentialChain, ConversationChain, MapReduceChain
from langchain.llms.ai21 import AI21
from langchain.llms.aleph_alpha import AlephAlpha
from langchain.llms.anyscale import Anyscale
from langchain.llms.bananadev import Banana
from langchain.llms.baseten import Baseten
from langchain.llms.bedrock import Bedrock
from langchain.llms.cerebriumai import CerebriumAI
from langchain.llms.cohere import Cohere
from langchain.llms.ctransformers import CTransformers
from langchain.llms.databricks import Databricks
from langchain.llms.deepinfra import DeepInfra
from langchain.llms.forefrontai import ForefrontAI
from langchain.llms.gooseai import GooseAI
from langchain.llms.gpt4all import GPT4All
from langchain.llms.huggingface_hub import HuggingFaceHub
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.huggingface_text_gen_inference import HuggingFaceTextGenInference
from langchain.llms.llamacpp import LlamaCpp
from langchain.llms.manifest import ManifestWrapper
from langchain.llms.modal import Modal
from langchain.llms.mosaicml import MosaicML
from langchain.llms.nlpcloud import NLPCloud
from langchain.llms.openai import AzureOpenAI, OpenAI
from langchain.llms.openlm import OpenLM
from langchain.llms.petals import Petals
from langchain.llms.pipelineai import PipelineAI
from langchain.llms.predictionguard import PredictionGuard
from langchain.llms.promptlayer_openai import PromptLayerOpenAI
from langchain.llms.replicate import Replicate
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
from langchain.llms.stochasticai import StochasticAI
from langchain.llms.vertexai import VertexAI
from langchain.llms.writer import Writer
from langchain.memory import ConversationBufferMemory
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain.schema import Document
from langchain.schema.callbacks.manager import CallbackManager
from langchain.schema.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.text_splitter import CharacterTextSplitter
from langchain.tools import tool
from langchain_experimental.llms import RELLM, JsonFormer
from manifest import Manifest
import pipeline


def al21_test():
    # pip install ai21
    # 接受输入的密码
    # AI21_API_KEY = getpass()
    # print(AI21_API_KEY)
    template = """Question: {question}
    Answer: Let`s think step by step.
    """
    prompt = PromptTemplate(template=template, input_variables=["question"])
    # ai21_api_key="xxxxx"
    # llm = AI21(ai21_api_key="xxxxx")
    llm = AI21()
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
    print(llm_chain.run(question))


def aleph_alpha_test():
    # pip install aleph-alpha-client
    from getpass import getpass

    ALEPH_ALPHA_API_KEY = getpass()
    template = """Q: {question}

    A:"""

    prompt = PromptTemplate(template=template, input_variables=["question"])

    llm = AlephAlpha(
        model="luminous-extended",
        naximum_tokens=20,
        stop_sequence=["Q:"],
        aleph_alpha_api_key=ALEPH_ALPHA_API_KEY,
    )
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What is AI?"

    result = llm_chain.run(question)
    print(result)


def anyscale_test():
    os.environ["ANYSCALE_SERVICE_URL"] = "ANYSCALE_SERVICE_URL"
    os.environ["ANYSCALE_SERVICE_ROUTE"] = "ANYSCALE_SERVICE_ROUTE"
    os.environ["ANYSCALE_SERVICE_TOKEN"] = "ANYSCALE_SERVICE_TOKEN"

    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])

    llm = Anyscale()
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "When was George Washington president?"

    result = llm_chain.run(question)
    print(result)


def azure_openai_test():
    os.environ["OPENAI_API_TYPE"] = "azure"
    os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
    os.environ["OPENAI_API_BASE"] = "https://your-resource-name.openai.azure.com"
    os.environ["OPENAI_API_KEY"] = "..."
    llm = AzureOpenAI(
        deployment_name="td2",
        model_name="text-davinci-002",
    )
    llm("Tell me a joke")


def banana_test():
    os.environ["BANANA_API_KEY"] = "YOUR_API_KEY"
    # OR
    # BANANA_API_KEY = getpass()
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = Banana(model_key="YOUR_MODEL_KEY")
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def baseten_test():
    # pip install baseten
    import baseten
    baseten.login("you api key")
    wizardlm = Baseten(model="MODEL_VERSION_ID", verbose=True)
    wizardlm("What is the difference between a Wizard and a Sorcerer?")
    # Build the first link in the chain

    prompt = PromptTemplate(
        input_variables=["cuisine"],
        template="Name a complex entree for a {cuisine} dinner. Respond with just the name of a single dish.",
    )

    link_one = LLMChain(llm=wizardlm, prompt=prompt)
    # Build the second link in the chain

    prompt = PromptTemplate(
        input_variables=["entree"],
        template="What are three sides that would go with {entree}. Respond with only a list of the sides.",
    )

    link_two = LLMChain(llm=wizardlm, prompt=prompt)
    # Build the third link in the chain

    prompt = PromptTemplate(
        input_variables=["sides"],
        template="What is one alcoholic and one non-alcoholic beverage that would go well with this list of sides: {sides}. Respond with only the names of the beverages.",
    )

    link_three = LLMChain(llm=wizardlm, prompt=prompt)
    # Run the full chain!

    menu_maker = SimpleSequentialChain(
        chains=[link_one, link_two, link_three], verbose=True
    )
    menu_maker.run("South Indian")


def beam_test():
    # !curl https://raw.githubusercontent.com/slai-labs/get-beam/main/get-beam.sh -sSfL | sh
    # pip install beam-sdk
    # import os
    # import subprocess
    #
    # beam_client_id = "<Your beam client id>"
    # beam_client_secret = "<Your beam client secret>"
    #
    # # Set the environment variables
    # os.environ["BEAM_CLIENT_ID"] = beam_client_id
    # os.environ["BEAM_CLIENT_SECRET"] = beam_client_secret
    #
    # # Run the beam configure command
    # !beam configure --clientId={beam_client_id} --clientSecret={beam_client_secret}
    from langchain.llms.beam import Beam

    llm = Beam(
        model_name="gpt2",
        name="langchain-gpt2-test",
        cpu=8,
        memory="32Gi",
        gpu="A10G",
        python_version="python3.8",
        python_packages=[
            "diffusers[torch]>=0.10",
            "transformers",
            "torch",
            "pillow",
            "accelerate",
            "safetensors",
            "xformers",
        ],
        max_length="50",
        verbose=False,
    )

    llm._deploy()

    response = llm._call("Running machine learning on a remote GPU")

    print(response)


def bedrock_test():
    # pip install boto3
    llm = Bedrock(credentials_profile_name="bedrock-admin", model_id="amazon.titan-tg1-large")
    conversation = ConversationChain(
        llm=llm, verbose=True, memory=ConversationBufferMemory()
    )

    conversation.predict(input="Hi there!")


def cerebrium_ai_test():
    # pip install cerebrium
    os.environ["CEREBRIUMAI_API_KEY"] = "YOUR_KEY_HERE"
    llm = CerebriumAI(endpoint_url="YOUR ENDPOINT URL HERE")
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def cohere_test():
    #!pip install cohere
    COHERE_API_KEY = getpass()
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = Cohere(cohere_api_key=COHERE_API_KEY)
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def ctransformers_test():
    # pip install ctransformers
    llm = CTransformers(model="marella/gpt-2-ggml")
    print(llm("AI is going to"))
    from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

    llm = CTransformers(
        model="marella/gpt-2-ggml", callbacks=[StreamingStdOutCallbackHandler()]
    )

    response = llm("AI is going to")
    from langchain import PromptTemplate, LLMChain

    template = """Question: {question}

    Answer:"""

    prompt = PromptTemplate(template=template, input_variables=["question"])

    llm_chain = LLMChain(prompt=prompt, llm=llm)

    response = llm_chain.run("What is AI?")


def databricks():
    llm = Databricks(endpoint_name="dolly")
    llm("How are you?")
    llm("How are you?", stop=["."])
    # Otherwise, you can manually specify the Databricks workspace hostname and personal access token
    # or set `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively.
    # See https://docs.databricks.com/dev-tools/auth.html#databricks-personal-access-tokens
    # We strongly recommend not exposing the API token explicitly inside a notebook.
    # You can use Databricks secret manager to store your API token securely.
    # See https://docs.databricks.com/dev-tools/databricks-utils.html#secrets-utility-dbutilssecrets

    import os
    # pip install dbutils
    os.environ["DATABRICKS_TOKEN"] = dbutils.secrets.get("myworkspace", "api_token")
    llm = Databricks(host="myworkspace.cloud.databricks.com", endpoint_name="dolly")
    llm("How are you?")
    # If the serving endpoint accepts extra parameters like `temperature`,
    # you can set them in `model_kwargs`.
    llm = Databricks(endpoint_name="dolly", model_kwargs={"temperature": 0.1})

    llm("How are you?")

    # Use `transform_input_fn` and `transform_output_fn` if the serving endpoint
    # expects a different input schema and does not return a JSON string,
    # respectively, or you want to apply a prompt template on top.

    def transform_input(**request):
        full_prompt = f"""{request["prompt"]}
        Be Concise.
        """
        request["prompt"] = full_prompt
        return request

    llm = Databricks(endpoint_name="dolly", transform_input_fn=transform_input)

    llm("How are you?")

    # model = "databricks/dolly-v2-3b"
    # tokenizer = AutoTokenizer.from_pretrained(model, padding_side="left")
    # dolly = pipeline(model=model, tokenizer=tokenizer, trust_remote_code=True, device_map="auto")
    # device = dolly.device
    #
    # class CheckStop(StoppingCriteria):
    #     def __init__(self, stop=None):
    #         super().__init__()
    #         self.stop = stop or []
    #         self.matched = ""
    #         self.stop_ids = [tokenizer.encode(s, return_tensors='pt').to(device) for s in self.stop]
    #
    #     def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):
    #         for i, s in enumerate(self.stop_ids):
    #             if torch.all((s == input_ids[0][-s.shape[1]:])).item():
    #                 self.matched = self.stop[i]
    #                 return True
    #         return False


def deepinfra_test():
    DEEPINFRA_API_TOKEN = getpass()
    os.environ["DEEPINFRA_API_TOKEN"] = DEEPINFRA_API_TOKEN
    llm = DeepInfra(model_id="databricks/dolly-v2-12b")
    llm.model_kwargs = {
        "temperature": 0.7,
        "repetition_penalty": 1.2,
        "max_new_tokens": 250,
        "top_p": 0.9,
    }
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "Can penguins reach the North pole?"
    llm_chain.run(question)


def fore_front_ai_test():
    FOREFRONTAI_API_KEY = getpass()
    os.environ["FOREFRONTAI_API_KEY"] = FOREFRONTAI_API_KEY
    llm = ForefrontAI(endpoint_url="YOUR ENDPOINT URL HERE")
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def google_cloud_platform_vertex_ai_palm_test():
    # pip install google-cloud-aiplatform
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = VertexAI()
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
    llm_chain.run(question)


def goose_ai_test():
    # $ pip3 install openai
    from getpass import getpass

    GOOSEAI_API_KEY = getpass()
    os.environ["GOOSEAI_API_KEY"] = GOOSEAI_API_KEY
    llm = GooseAI()
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def gpt4_all_test():
    # pip install gpt4all
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    local_path = (
        "./models/ggml-gpt4all-l13b-snoozy.bin"  # replace with your desired local file path
    )
    # import requests

    # from pathlib import Path
    # from tqdm import tqdm

    # Path(local_path).parent.mkdir(parents=True, exist_ok=True)

    # # Example model. Check https://github.com/nomic-ai/gpt4all for the latest models.
    # url = 'http://gpt4all.io/models/ggml-gpt4all-l13b-snoozy.bin'

    # # send a GET request to the URL to download the file. Stream since it's large
    # response = requests.get(url, stream=True)

    # # open the file in binary mode and write the contents of the response to it in chunks
    # # This is a large file, so be prepared to wait.
    # with open(local_path, 'wb') as f:
    #     for chunk in tqdm(response.iter_content(chunk_size=8192)):
    #         if chunk:
    #             f.write(chunk)
    # Callbacks support token-wise streaming
    callbacks = [StreamingStdOutCallbackHandler()]
    # Verbose is required to pass to the callback manager
    llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
    # If you want to use a custom model add the backend parameter
    # Check https://docs.gpt4all.io/gpt4all_python.html for supported backends
    llm = GPT4All(model=local_path, backend="gptj", callbacks=callbacks, verbose=True)
    question = "What NFL team won the Super Bowl in the year Justin Bieber was born?"
    llm_chain = LLMChain(llm=llm, prompt=prompt)
    llm_chain.run(question)


def hugging_face_hub_test():
    # pip install huggingface_hub
    # get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token

    from getpass import getpass

    HUGGINGFACEHUB_API_TOKEN = getpass()
    import os

    os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
    repo_id = "google/flan-t5-xl"  # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options

    llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0, "max_length": 64})
    template = """Question: {question}

    Answer: Let's think step by step."""
    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)

    question = "Who won the FIFA World Cup in the year 1994? "

    print(llm_chain.run(question))


def hugging_face_local_pipelines_test():
    # pip install transformers
    from langchain import HuggingFacePipeline

    llm = HuggingFacePipeline.from_model_id(
        model_id="bigscience/bloom-1b7",
        task="text-generation",
        model_kwargs={"temperature": 0, "max_length": 64},
    )
    from langchain import PromptTemplate, LLMChain

    template = """Question: {question}

    Answer: Let's think step by step."""
    prompt = PromptTemplate(template=template, input_variables=["question"])

    llm_chain = LLMChain(prompt=prompt, llm=llm)

    question = "What is electroencephalography?"

    print(llm_chain.run(question))


def hugging_face_text_gen_inference_test():
    # !pip3 install text_generation
    llm = HuggingFaceTextGenInference(
        inference_server_url="http://localhost:8010/",
        max_new_tokens=512,
        top_k=10,
        top_p=0.95,
        typical_p=0.95,
        temperature=0.01,
        repetition_penalty=1.03,
    )
    llm("What did foo say about bar?")


def json_former_test():
    # !pip install --upgrade jsonformer > /dev/null
    logging.basicConfig(level=logging.ERROR)
    HF_TOKEN = os.environ.get("HUGGINGFACE_API_KEY")

    @tool
    def ask_star_coder(query: str, temperature: float = 1.0, max_new_tokens: float = 250):
        """Query the BigCode StarCoder model about coding questions."""
        url = "https://api-inference.huggingface.co/models/bigcode/starcoder"
        headers = {
            "Authorization": f"Bearer {HF_TOKEN}",
            "content-type": "application/json",
        }
        payload = {
            "inputs": f"{query}\n\nAnswer:",
            "temperature": temperature,
            "max_new_tokens": int(max_new_tokens),
        }
        response = requests.post(url, headers=headers, data=json.dumps(payload))
        response.raise_for_status()
        return json.loads(response.content.decode("utf-8"))

    prompt = """You must respond using JSON format, with a single action and single action input.
    You may 'ask_star_coder' for help on coding problems.

    {arg_schema}

    EXAMPLES
    ----
    Human: "So what's all this about a GIL?"
    AI Assistant:{{
      "action": "ask_star_coder",
      "action_input": {{"query": "What is a GIL?", "temperature": 0.0, "max_new_tokens": 100}}"
    }}
    Observation: "The GIL is python's Global Interpreter Lock"
    Human: "Could you please write a calculator program in LISP?"
    AI Assistant:{{
      "action": "ask_star_coder",
      "action_input": {{"query": "Write a calculator program in LISP", "temperature": 0.0, "max_new_tokens": 250}}
    }}
    Observation: "(defun add (x y) (+ x y))\n(defun sub (x y) (- x y ))"
    Human: "What's the difference between an SVM and an LLM?"
    AI Assistant:{{
      "action": "ask_star_coder",
      "action_input": {{"query": "What's the difference between SGD and an SVM?", "temperature": 1.0, "max_new_tokens": 250}}
    }}
    Observation: "SGD stands for stochastic gradient descent, while an SVM is a Support Vector Machine."

    BEGIN! Answer the Human's question as best as you are able.
    ------
    Human: 'What's the difference between an iterator and an iterable?'
    AI Assistant:""".format(
        arg_schema=ask_star_coder.args
    )
    hf_model = pipeline(
        "text-generation", model="cerebras/Cerebras-GPT-590M", max_new_tokens=200
    )

    original_model = HuggingFacePipeline(pipeline=hf_model)

    generated = original_model.predict(prompt, stop=["Observation:", "Human:"])
    print(generated)

    decoder_schema = {
        "title": "Decoding Schema",
        "type": "object",
        "properties": {
            "action": {"type": "string", "default": ask_star_coder.name},
            "action_input": {
                "type": "object",
                "properties": ask_star_coder.args,
            },
        },
    }

    json_former = JsonFormer(json_schema=decoder_schema, pipeline=hf_model)
    results = json_former.predict(prompt, stop=["Observation:", "Human:"])
    print(results)


def llama_cpp_test():
    # !pip install llama-cpp-python
    template = """Question: {question}

    Answer: Let's work this out in a step by step way to be sure we have the right answer."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    # Callbacks support token-wise streaming
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
    # Verbose is required to pass to the callback manager
    # Make sure the model path is correct for your system!
    llm = LlamaCpp(
        model_path="./ggml-model-q4_0.bin", callback_manager=callback_manager, verbose=True
    )
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Bieber was born?"

    llm_chain.run(question)


def manifest_test():
    # pip install manifest-ml
    manifest = Manifest(client_name="huggingface", client_connection="http://127.0.0.1:5000")
    print(manifest.client_pool.get_current_client().get_model_params())
    llm = ManifestWrapper(
        client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 256}
    )
    _prompt = """Write a concise summary of the following:


    {text}


    CONCISE SUMMARY:"""
    prompt = PromptTemplate(template=_prompt, input_variables=["text"])

    text_splitter = CharacterTextSplitter()

    mp_chain = MapReduceChain.from_params(llm, prompt, text_splitter)
    with open("../../../state_of_the_union.txt") as f:
        state_of_the_union = f.read()
    mp_chain.run(state_of_the_union)

    manifest1 = ManifestWrapper(
        client=Manifest(
            client_name="huggingface", client_connection="http://127.0.0.1:5000"
        ),
        llm_kwargs={"temperature": 0.01},
    )
    manifest2 = ManifestWrapper(
        client=Manifest(
            client_name="huggingface", client_connection="http://127.0.0.1:5001"
        ),
        llm_kwargs={"temperature": 0.01},
    )
    manifest3 = ManifestWrapper(
        client=Manifest(
            client_name="huggingface", client_connection="http://127.0.0.1:5002"
        ),
        llm_kwargs={"temperature": 0.01},
    )
    llms = [manifest1, manifest2, manifest3]
    model_lab = ModelLaboratory(llms)
    model_lab.compare("What color is a flamingo?")


def modal_test():
    # pip install modal-client
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = Modal(endpoint_url="YOUR_ENDPOINT_URL")
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def mosaic_ml_test():
    # sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain

    from getpass import getpass

    MOSAICML_API_TOKEN = getpass()
    os.environ["MOSAICML_API_TOKEN"] = MOSAICML_API_TOKEN
    template = """Question: {question}"""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = MosaicML(inject_instruction_format=True, model_kwargs={"do_sample": False})
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What is one good reason why you should train a large language model on domain specific data?"

    llm_chain.run(question)


def npl_cloud_test():
    # pip install nlpcloud
    # get a token: https://docs.nlpcloud.com/#authentication

    from getpass import getpass

    NLPCLOUD_API_KEY = getpass()
    os.environ["NLPCLOUD_API_KEY"] = NLPCLOUD_API_KEY
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = NLPCloud()
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def openai_test():
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = OpenAI()
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)
    # If you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through
    # os.environ["OPENAI_PROXY"] = "http://proxy.yourcompany.com:8080"


def openlm_test():
    # pip install openlm
    from getpass import getpass
    import os
    import subprocess

    # Check if OPENAI_API_KEY environment variable is set
    if "OPENAI_API_KEY" not in os.environ:
        print("Enter your OpenAI API key:")
        os.environ["OPENAI_API_KEY"] = getpass()

    # Check if HF_API_TOKEN environment variable is set
    if "HF_API_TOKEN" not in os.environ:
        print("Enter your HuggingFace Hub API key:")
        os.environ["HF_API_TOKEN"] = getpass()
    question = "What is the capital of France?"
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])

    for model in ["text-davinci-003", "huggingface.co/gpt2"]:
        llm = OpenLM(model=model)
        llm_chain = LLMChain(prompt=prompt, llm=llm)
        result = llm_chain.run(question)
        print(
            """Model: {}
    Result: {}""".format(
                model, result
            )
        )


def petals_test():
    # pip install petals
    HUGGINGFACE_API_KEY = getpass()
    os.environ["HUGGINGFACE_API_KEY"] = HUGGINGFACE_API_KEY
    # this can take several minutes to download big files!

    llm = Petals(model_name="bigscience/bloom-petals")
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def pipeline_ai_test():
    # pip install pipeline-ai
    os.environ["PIPELINE_API_KEY"] = "YOUR_API_KEY_HERE"
    llm = PipelineAI(pipeline_key="YOUR_PIPELINE_KEY", pipeline_kwargs={...})
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)


def prediction_guard_test():
    # pip install predictionguard langchain
    # Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows
    # you to access all the latest open access models (see https://docs.predictionguard.com)
    os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"

    # Your Prediction Guard API key. Get one at predictionguard.com
    os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
    pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
    pgllm("Tell me a joke")
    template = """Respond to the following query based on the context.

    Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! 🎉 We have officially added TWO new candle subscription box options! 📦
    Exclusive Candle Box - $80 
    Monthly Candle Box - $45 (NEW!)
    Scent of The Month Box - $28 (NEW!)
    Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉

    Query: {query}

    Result: """
    prompt = PromptTemplate(template=template, input_variables=["query"])
    # Without "guarding" or controlling the output of the LLM.
    pgllm(prompt.format(query="What kind of post is this?"))
    # With "guarding" or controlling the output of the LLM. See the
    # Prediction Guard docs (https://docs.predictionguard.com) to learn how to
    # control the output with integer, float, boolean, JSON, and other types and
    # structures.
    pgllm = PredictionGuard(
        model="OpenAI-text-davinci-003",
        output={
            "type": "categorical",
            "categories": ["product announcement", "apology", "relational"],
        },
    )
    pgllm(prompt.format(query="What kind of post is this?"))
    pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
    template = """Question: {question}

    Answer: Let's think step by step."""
    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)

    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.predict(question=question)
    template = """Write a {adjective} poem about {subject}."""
    prompt = PromptTemplate(template=template, input_variables=["adjective", "subject"])
    llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)

    llm_chain.predict(adjective="sad", subject="ducks")


def prompt_layer_openai_test():
    # pip install promptlayer
    PROMPTLAYER_API_KEY = getpass()
    os.environ["PROMPTLAYER_API_KEY"] = PROMPTLAYER_API_KEY
    llm = PromptLayerOpenAI(pl_tags=["langchain"])
    llm("I am a cat and I want")
    llm = PromptLayerOpenAI(return_pl_id=True)
    llm_results = llm.generate(["Tell me a joke"])

    for res in llm_results.generations:
        pl_request_id = res[0].generation_info["pl_request_id"]
        promptlayer.track.score(request_id=pl_request_id, score=100)


def rellm_test():
    # !pip install rellm > /dev/null
    import logging

    logging.basicConfig(level=logging.ERROR)
    prompt = """Human: "What's the capital of the United States?"
    AI Assistant:{
      "action": "Final Answer",
      "action_input": "The capital of the United States is Washington D.C."
    }
    Human: "What's the capital of Pennsylvania?"
    AI Assistant:{
      "action": "Final Answer",
      "action_input": "The capital of Pennsylvania is Harrisburg."
    }
    Human: "What 2 + 5?"
    AI Assistant:{
      "action": "Final Answer",
      "action_input": "2 + 5 = 7."
    }
    Human: 'What's the capital of Maryland?'
    AI Assistant:"""
    hf_model = pipeline(
        "text-generation", model="cerebras/Cerebras-GPT-590M", max_new_tokens=200
    )

    original_model = HuggingFacePipeline(pipeline=hf_model)

    generated = original_model.generate([prompt], stop=["Human:"])
    print(generated)

    import regex  # Note this is the regex library NOT python's re stdlib module

    # We'll choose a regex that matches to a structured json string that looks like:
    # {
    #  "action": "Final Answer",
    # "action_input": string or dict
    # }
    pattern = regex.compile(
        r'\{\s*"action":\s*"Final Answer",\s*"action_input":\s*(\{.*\}|"[^"]*")\s*\}\nHuman:'
    )

    model = RELLM(pipeline=hf_model, regex=pattern, max_new_tokens=200)

    generated = model.predict(prompt, stop=["Human:"])
    print(generated)


def replicate_test():
    # pip install replicate
    REPLICATE_API_TOKEN = getpass()
    os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
    Replicate(model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
              input={'image_dimensions': '512x512'})
    llm = Replicate(
        model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
    )
    prompt = """
    Answer the following yes/no question by reasoning step by step. 
    Can a dog drive a car?
    """
    print(llm(prompt))
    text2image = Replicate(
        model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
        input={"image_dimensions": "512x512"},
    )
    image_output = text2image("A cat riding a motorcycle by Picasso")
    print(image_output)
    from PIL import Image
    import requests
    from io import BytesIO

    response = requests.get(image_output)
    img = Image.open(BytesIO(response.content))
    dolly_llm = Replicate(
        model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
    )
    text2image = Replicate(
        model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf"
    )
    prompt = PromptTemplate(
        input_variables=["product"],
        template="What is a good name for a company that makes {product}?",
    )

    chain = LLMChain(llm=dolly_llm, prompt=prompt)
    second_prompt = PromptTemplate(
        input_variables=["company_name"],
        template="Write a description of a logo for this company: {company_name}",
    )
    chain_two = LLMChain(llm=dolly_llm, prompt=second_prompt)
    third_prompt = PromptTemplate(
        input_variables=["company_logo_description"],
        template="{company_logo_description}",
    )
    chain_three = LLMChain(llm=text2image, prompt=third_prompt)
    # Run the chain specifying only the input variable for the first chain.
    overall_chain = SimpleSequentialChain(
        chains=[chain, chain_two, chain_three], verbose=True
    )
    catchphrase = overall_chain.run("colorful socks")
    print(catchphrase)


def runhouse_test():
    # !pip install runhouse
    # For an on-demand A100 with GCP, Azure, or Lambda
    ## pip install sklearn
    # rh.cluster
    gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)

    # For an on-demand A10G with AWS (no single A100s on AWS)
    # gpu = rh.cluster(name='rh-a10x', instance_type='g5.2xlarge', provider='aws')

    # For an existing cluster
    # gpu = rh.cluster(ips=['<ip of the cluster>'],
    #                  ssh_creds={'ssh_user': '...', 'ssh_private_key':'<path_to_key>'},
    #                  name='rh-a10x')
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = SelfHostedHuggingFaceLLM(
        model_id="gpt2", hardware=gpu, model_reqs=["pip:./", "transformers", "torch"]
    )
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    print(llm_chain.run(question))
    llm = SelfHostedHuggingFaceLLM(
        model_id="google/flan-t5-small",
        task="text2text-generation",
        hardware=gpu,
    )
    print(llm("What is the capital of Germany?"))

    def load_pipeline():
        from transformers import (
            AutoModelForCausalLM,
            AutoTokenizer,
            pipeline,
        )  # Need to be inside the fn in notebooks

        model_id = "gpt2"
        tokenizer = AutoTokenizer.from_pretrained(model_id)
        model = AutoModelForCausalLM.from_pretrained(model_id)
        pipe = pipeline(
            "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
        )
        return pipe

    def inference_fn(pipeline, prompt, stop=None):
        return pipeline(prompt)[0]["generated_text"][len(prompt):]

    llm = SelfHostedHuggingFaceLLM(
        model_load_fn=load_pipeline, hardware=gpu, inference_fn=inference_fn
    )
    llm("Who is the current US president?")
    pipeline = load_pipeline()
    llm = SelfHostedPipeline.from_pipeline(
        pipeline=pipeline, hardware=gpu, model_reqs=model_reqs
    )
    rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
        gpu, path="models"
    )

    llm = SelfHostedPipeline.from_pipeline(pipeline="models/pipeline.pkl", hardware=gpu)


def sage_maker_endpoint_test():
    # !pip3 install langchain boto3
    example_doc_1 = """
    Peter and Elizabeth took a taxi to attend the night party in the city. While in the party, Elizabeth collapsed and was rushed to the hospital.
    Since she was diagnosed with a brain injury, the doctor told Peter to stay besides her until she gets well.
    Therefore, Peter stayed with her at the hospital for 3 days without leaving.
    """

    docs = [
        Document(
            page_content=example_doc_1,
        )
    ]
    from typing import Dict

    from langchain import PromptTemplate, SagemakerEndpoint
    from langchain.llms.sagemaker_endpoint import LLMContentHandler
    from langchain.chains.question_answering import load_qa_chain
    import json

    query = """How long was Elizabeth hospitalized?
    """

    prompt_template = """Use the following pieces of context to answer the question at the end.

    {context}

    Question: {question}
    Answer:"""
    PROMPT = PromptTemplate(
        template=prompt_template, input_variables=["context", "question"]
    )

    class ContentHandler(LLMContentHandler):
        content_type = "application/json"
        accepts = "application/json"

        def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
            input_str = json.dumps({prompt: prompt, **model_kwargs})
            return input_str.encode("utf-8")

        def transform_output(self, output: bytes) -> str:
            response_json = json.loads(output.read().decode("utf-8"))
            return response_json[0]["generated_text"]

    content_handler = ContentHandler()

    chain = load_qa_chain(
        llm=SagemakerEndpoint(
            endpoint_name="endpoint-name",
            credentials_profile_name="credentials-profile-name",
            region_name="us-west-2",
            model_kwargs={"temperature": 1e-10},
            content_handler=content_handler,
        ),
        prompt=PROMPT,
    )

    chain({"input_documents": docs, "question": query}, return_only_outputs=True)


def stochastic_ai_test():
    STOCHASTICAI_API_KEY = getpass()
    os.environ["STOCHASTICAI_API_KEY"] = STOCHASTICAI_API_KEY
    YOUR_API_URL = getpass()
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    llm = StochasticAI(api_url=YOUR_API_URL)
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
    llm_chain.run(question)


def writer_test():
    WRITER_API_KEY = getpass()
    os.environ["WRITER_API_KEY"] = WRITER_API_KEY
    template = """Question: {question}

    Answer: Let's think step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question"])
    # If you get an error, probably, you need to set up the "base_url" parameter that can be taken from the error log.

    llm = Writer()
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

    llm_chain.run(question)



if __name__ == '__main__':
    sage_maker_endpoint_test()