{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain import HuggingFacePipeline\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
    "\n",
    "checkpoint = \"Salesforce/codegen-350M-mono\"\n",
    "model = AutoModelForCausalLM.from_pretrained(checkpoint)\n",
    "tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
    "pipe = pipeline(\n",
    "    task=\"text-generation\",\n",
    "    model=model,\n",
    "    tokenizer=tokenizer,\n",
    "    max_new_tokens=500\n",
    ")\n",
    "text = \"\"\"\n",
    "def calculate_primes(n):\n",
    "    \\\"\\\"\\\"Create a list of consecutive integers from 2 up to N.\n",
    "\n",
    "    For example:\n",
    "    >>> calculate_primes(20)\n",
    "    Output: [2, 3, 5, 7, 11, 13, 17, 19]\n",
    "    \\\"\\\"\\\"\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "completion = pipe(text)\n",
    "print(completion[0][\"generated_text\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "llm = HuggingFacePipeline(pipeline=pipe)\n",
    "print(llm(text))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain import HuggingFaceHub\n",
    "\n",
    "llm = HuggingFaceHub(\n",
    "    task=\"text-generation\",\n",
    "    repo_id=\"HuggingFaceH4/starchat-alpha\",\n",
    "    model_kwargs={\n",
    "        \"temperature\": 0.5,\n",
    "        \"max_length\": 1000\n",
    "    }\n",
    ")\n",
    "print(llm(text))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.llms import VertexAI\n",
    "from langchain import PromptTemplate, LLMChain\n",
    "\n",
    "template = \"\"\"Question: {question}\n",
    "Let's think step by step.\n",
    "\n",
    "Answer:\n",
    "\"\"\"\n",
    "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
    "llm = VertexAI(\n",
    "    model_name=\"codechat-bison\", max_output_tokens=5000,\n",
    ")\n",
    "llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
    "print(llm_chain.run(text))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.llms.fake import FakeListLLM\n",
    "from langchain.agents import load_tools, initialize_agent, AgentType\n",
    "\n",
    "responses = [\"Action: Python_REPL\\nAction Input: print(2 + 2)\", \"Final Answer: 4\"]\n",
    "llm = FakeListLLM(responses=responses)\n",
    "tools = load_tools([\"python_repl\"])\n",
    "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
    "result = agent(\"What is 2 + 2?\")\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.llms.openai import OpenAI\n",
    "from langchain.agents import load_tools, initialize_agent, AgentType\n",
    "\n",
    "llm = OpenAI()\n",
    "tools = load_tools([\"python_repl\"])\n",
    "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
    "result = agent(\"What are the prime numbers until 20?\")\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.chains"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
