[ { "url": "https://github.com/ollama/ollama", "readme_url": "https://raw.githubusercontent.com/ollama/ollama/main/README.md", "topic": [ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ], "text": "Windows\n\nComing soon! For now, you can install Ollama on Windows via WSL2.\n\n", "sentence": [ [ "window", "coming", "soon", "!", ",", "install", "ollama", "window", "via", "wsl2", "." ], [ "window coming soon !", ", install ollama window via wsl2 ." ] ], "token": [ [ "window", "coming", "soon", "!", ",", "install", "ollama", "window", "via", "wsl2", "." ], [ "window coming soon !", ", install ollama window via wsl2 ." ] ], "level of complexity": -1 }, { "url": "https://github.com/ollama/ollama", "readme_url": "https://raw.githubusercontent.com/ollama/ollama/main/README.md", "topic": [ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ], "text": "Linux & WSL2\n\n```\ncurl https://ollama.ai/install.sh | sh\n```\n\n[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)\n\n", "sentence": [ [ "linux", "&", "wsl2", "``", "`", "curl", "http", ":", "//ollama.ai/install.sh", "|", "sh", "``", "`", "[", "manual", "install", "instruction", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/linux.md", ")" ], [ "linux & wsl2 `` ` curl http : //ollama.ai/install.sh | sh `` ` [ manual install instruction ] ( http : //github.com/jmorganca/ollama/blob/main/docs/linux.md )" ] ], "token": [ [ "linux", "&", "wsl2", "``", "`", "curl", "http", ":", "//ollama.ai/install.sh", "|", "sh", "``", "`", "[", "manual", "install", "instruction", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/linux.md", ")" ], [ "linux & wsl2 `` ` curl http : //ollama.ai/install.sh | sh `` ` [ manual install instruction ] ( http : //github.com/jmorganca/ollama/blob/main/docs/linux.md )" ] ], "level of complexity": -1 }, { "url": "https://github.com/ollama/ollama", "readme_url": "https://raw.githubusercontent.com/ollama/ollama/main/README.md", "topic": [ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ], "text": "Building\n\nInstall `cmake` and `go`:\n\n```\nbrew install cmake go\n```\n\nThen generate dependencies:\n\n```\ngo generate ./...\n```\n\nThen build the binary:\n\n```\ngo build .\n```\n\nMore detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)\n\n", "sentence": [ [ "building", "install", "`", "cmake", "`", "`", "go", "`", ":", "``", "`", "brew", "install", "cmake", "go", "``", "`", "generate", "dependency", ":", "``", "`", "go", "generate", "./", "...", "``", "`", "build", "binary", ":", "``", "`", "go", "build", ".", "``", "`", "detailed", "instruction", "found", "[", "developer", "guide", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/development.md", ")" ], [ "building install ` cmake ` ` go ` : `` ` brew install cmake go `` ` generate dependency : `` ` go generate ./ ... `` ` build binary : `` ` go build .", "`` ` detailed instruction found [ developer guide ] ( http : //github.com/jmorganca/ollama/blob/main/docs/development.md )" ] ], "token": [ [ "building", "install", "`", "cmake", "`", "`", "go", "`", ":", "``", "`", "brew", "install", "cmake", "go", "``", "`", "generate", "dependency", ":", "``", "`", "go", "generate", "./", "...", "``", "`", "build", "binary", ":", "``", "`", "go", "build", ".", "``", "`", "detailed", "instruction", "found", "[", "developer", "guide", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/development.md", ")" ], [ "building install ` cmake ` ` go ` : `` ` brew install cmake go `` ` generate dependency : `` ` go generate ./ ... `` ` build binary : `` ` go build .", "`` ` detailed instruction found [ developer guide ] ( http : //github.com/jmorganca/ollama/blob/main/docs/development.md )" ] ], "level of complexity": -1 }, { "url": "https://github.com/ollama/ollama", "readme_url": "https://raw.githubusercontent.com/ollama/ollama/main/README.md", "topic": [ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ], "text": "Extensions & Plugins\n\n- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)\n- [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel)\n- [Continue](https://github.com/continuedev/continue)\n- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)\n- [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq)\n- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)\n- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)\n- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)\n- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)\n- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)\n- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)\n- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)\n- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)\n- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)\n- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)\n", "sentence": [ [ "extension", "&", "plugins", "-", "[", "raycast", "extension", "]", "(", "http", ":", "//github.com/massimilianopasquini97/raycast_ollama", ")", "-", "[", "discollama", "]", "(", "http", ":", "//github.com/mxyng/discollama", ")", "(", "discord", "bot", "inside", "ollama", "discord", "channel", ")", "-", "[", "continue", "]", "(", "http", ":", "//github.com/continuedev/continue", ")", "-", "[", "obsidian", "ollama", "plugin", "]", "(", "http", ":", "//github.com/hinterdupfinger/obsidian-ollama", ")", "-", "[", "logseq", "ollama", "plugin", "]", "(", "http", ":", "//github.com/omagdy7/ollama-logseq", ")", "-", "[", "dagger", "chatbot", "]", "(", "http", ":", "//github.com/samalba/dagger-chatbot", ")", "-", "[", "discord", "ai", "bot", "]", "(", "http", ":", "//github.com/mekb-turtle/discord-ai-bot", ")", "-", "[", "ollama", "telegram", "bot", "]", "(", "http", ":", "//github.com/ruecat/ollama-telegram", ")", "-", "[", "ha", "ollama", "conversation", "]", "(", "http", ":", "//github.com/ej52/hass-ollama-conversation", ")", "-", "[", "rivet", "plugin", "]", "(", "http", ":", "//github.com/abrenneke/rivet-plugin-ollama", ")", "-", "[", "llama", "coder", "]", "(", "http", ":", "//github.com/ex3ndr/llama-coder", ")", "(", "copilot", "alternative", "using", "ollama", ")", "-", "[", "obsidian", "bmo", "chatbot", "plugin", "]", "(", "http", ":", "//github.com/longy2k/obsidian-bmo-chatbot", ")", "-", "[", "open", "interpreter", "]", "(", "http", ":", "//docs.openinterpreter.com/language-model-setup/local-models/ollama", ")", "-", "[", "twinny", "]", "(", "http", ":", "//github.com/rjmacarthy/twinny", ")", "(", "copilot", "copilot", "chat", "alternative", "using", "ollama", ")", "-", "[", "wingman-ai", "]", "(", "http", ":", "//github.com/russellcanfield/wingman-ai", ")", "(", "copilot", "code", "chat", "alternative", "using", "ollama", "huggingface", ")" ], [ "extension & plugins - [ raycast extension ] ( http : //github.com/massimilianopasquini97/raycast_ollama ) - [ discollama ] ( http : //github.com/mxyng/discollama ) ( discord bot inside ollama discord channel ) - [ continue ] ( http : //github.com/continuedev/continue ) - [ obsidian ollama plugin ] ( http : //github.com/hinterdupfinger/obsidian-ollama ) - [ logseq ollama plugin ] ( http : //github.com/omagdy7/ollama-logseq ) - [ dagger chatbot ] ( http : //github.com/samalba/dagger-chatbot ) - [ discord ai bot ] ( http : //github.com/mekb-turtle/discord-ai-bot ) - [ ollama telegram bot ] ( http : //github.com/ruecat/ollama-telegram ) - [ ha ollama conversation ] ( http : //github.com/ej52/hass-ollama-conversation ) - [ rivet plugin ] ( http : //github.com/abrenneke/rivet-plugin-ollama ) - [ llama coder ] ( http : //github.com/ex3ndr/llama-coder ) ( copilot alternative using ollama ) - [ obsidian bmo chatbot plugin ] ( http : //github.com/longy2k/obsidian-bmo-chatbot ) - [ open interpreter ] ( http : //docs.openinterpreter.com/language-model-setup/local-models/ollama ) - [ twinny ] ( http : //github.com/rjmacarthy/twinny ) ( copilot copilot chat alternative using ollama ) - [ wingman-ai ] ( http : //github.com/russellcanfield/wingman-ai ) ( copilot code chat alternative using ollama huggingface )" ] ], "token": [ [ "extension", "&", "plugins", "-", "[", "raycast", "extension", "]", "(", "http", ":", "//github.com/massimilianopasquini97/raycast_ollama", ")", "-", "[", "discollama", "]", "(", "http", ":", "//github.com/mxyng/discollama", ")", "(", "discord", "bot", "inside", "ollama", "discord", "channel", ")", "-", "[", "continue", "]", "(", "http", ":", "//github.com/continuedev/continue", ")", "-", "[", "obsidian", "ollama", "plugin", "]", "(", "http", ":", "//github.com/hinterdupfinger/obsidian-ollama", ")", "-", "[", "logseq", "ollama", "plugin", "]", "(", "http", ":", "//github.com/omagdy7/ollama-logseq", ")", "-", "[", "dagger", "chatbot", "]", "(", "http", ":", "//github.com/samalba/dagger-chatbot", ")", "-", "[", "discord", "ai", "bot", "]", "(", "http", ":", "//github.com/mekb-turtle/discord-ai-bot", ")", "-", "[", "ollama", "telegram", "bot", "]", "(", "http", ":", "//github.com/ruecat/ollama-telegram", ")", "-", "[", "ha", "ollama", "conversation", "]", "(", "http", ":", "//github.com/ej52/hass-ollama-conversation", ")", "-", "[", "rivet", "plugin", "]", "(", "http", ":", "//github.com/abrenneke/rivet-plugin-ollama", ")", "-", "[", "llama", "coder", "]", "(", "http", ":", "//github.com/ex3ndr/llama-coder", ")", "(", "copilot", "alternative", "using", "ollama", ")", "-", "[", "obsidian", "bmo", "chatbot", "plugin", "]", "(", "http", ":", "//github.com/longy2k/obsidian-bmo-chatbot", ")", "-", "[", "open", "interpreter", "]", "(", "http", ":", "//docs.openinterpreter.com/language-model-setup/local-models/ollama", ")", "-", "[", "twinny", "]", "(", "http", ":", "//github.com/rjmacarthy/twinny", ")", "(", "copilot", "copilot", "chat", "alternative", "using", "ollama", ")", "-", "[", "wingman-ai", "]", "(", "http", ":", "//github.com/russellcanfield/wingman-ai", ")", "(", "copilot", "code", "chat", "alternative", "using", "ollama", "huggingface", ")" ], [ "extension & plugins - [ raycast extension ] ( http : //github.com/massimilianopasquini97/raycast_ollama ) - [ discollama ] ( http : //github.com/mxyng/discollama ) ( discord bot inside ollama discord channel ) - [ continue ] ( http : //github.com/continuedev/continue ) - [ obsidian ollama plugin ] ( http : //github.com/hinterdupfinger/obsidian-ollama ) - [ logseq ollama plugin ] ( http : //github.com/omagdy7/ollama-logseq ) - [ dagger chatbot ] ( http : //github.com/samalba/dagger-chatbot ) - [ discord ai bot ] ( http : //github.com/mekb-turtle/discord-ai-bot ) - [ ollama telegram bot ] ( http : //github.com/ruecat/ollama-telegram ) - [ ha ollama conversation ] ( http : //github.com/ej52/hass-ollama-conversation ) - [ rivet plugin ] ( http : //github.com/abrenneke/rivet-plugin-ollama ) - [ llama coder ] ( http : //github.com/ex3ndr/llama-coder ) ( copilot alternative using ollama ) - [ obsidian bmo chatbot plugin ] ( http : //github.com/longy2k/obsidian-bmo-chatbot ) - [ open interpreter ] ( http : //docs.openinterpreter.com/language-model-setup/local-models/ollama ) - [ twinny ] ( http : //github.com/rjmacarthy/twinny ) ( copilot copilot chat alternative using ollama ) - [ wingman-ai ] ( http : //github.com/russellcanfield/wingman-ai ) ( copilot code chat alternative using ollama huggingface )" ] ], "level of complexity": -1 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "Install\n\n", "sentence": [ [ "install" ], [ "install" ] ], "token": [ [ "install" ], [ "install" ] ], "level of complexity": -1 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "Pip installation\n\n> Ensure that Python 3.9+ is installed on your system. You can check this by using: `python --version`. \n> You can use conda like this: `conda create -n metagpt python=3.9 && conda activate metagpt`\n\n```bash\npip install metagpt\nmetagpt --init-config ", "sentence": [ [ "pip", "installation", ">", "ensure", "python", "3.9+", "installed", "system", ".", "check", "using", ":", "`", "python", "--", "version", "`", ".", ">", "use", "conda", "like", ":", "`", "conda", "create", "-n", "metagpt", "python=3.9", "&", "&", "conda", "activate", "metagpt", "`", "``", "`", "bash", "pip", "install", "metagpt", "metagpt", "--", "init-config" ], [ "pip installation > ensure python 3.9+ installed system .", "check using : ` python -- version ` .", "> use conda like : ` conda create -n metagpt python=3.9 & & conda activate metagpt ` `` ` bash pip install metagpt metagpt -- init-config" ] ], "token": [ [ "pip", "installation", ">", "ensure", "python", "3.9+", "installed", "system", ".", "check", "using", ":", "`", "python", "--", "version", "`", ".", ">", "use", "conda", "like", ":", "`", "conda", "create", "-n", "metagpt", "python=3.9", "&", "&", "conda", "activate", "metagpt", "`", "``", "`", "bash", "pip", "install", "metagpt", "metagpt", "--", "init-config" ], [ "pip installation > ensure python 3.9+ installed system .", "check using : ` python -- version ` .", "> use conda like : ` conda create -n metagpt python=3.9 & & conda activate metagpt ` `` ` bash pip install metagpt metagpt -- init-config" ] ], "level of complexity": 0 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "it will print the repo structure with files\n```\n\ndetail installation please refer to [cli_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-stable-version)\n\n", "sentence": [ [ "print", "repo", "structure", "file", "``", "`", "detail", "installation", "please", "refer", "[", "cli_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-stable-version", ")" ], [ "print repo structure file `` ` detail installation please refer [ cli_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-stable-version )" ] ], "token": [ [ "print", "repo", "structure", "file", "``", "`", "detail", "installation", "please", "refer", "[", "cli_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-stable-version", ")" ], [ "print repo structure file `` ` detail installation please refer [ cli_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-stable-version )" ] ], "level of complexity": -1 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "Docker installation\n> Note: In the Windows, you need to replace \"/opt/metagpt\" with a directory that Docker has permission to create, such as \"D:\\Users\\x\\metagpt\"\n\n```bash\n", "sentence": [ [ "docker", "installation", ">", "note", ":", "window", ",", "need", "replace", "``", "/opt/metagpt", "''", "directory", "docker", "permission", "create", ",", "``", ":", "\\users\\x\\metagpt", "''", "``", "`", "bash" ], [ "docker installation > note : window , need replace `` /opt/metagpt '' directory docker permission create , `` : \\users\\x\\metagpt '' `` ` bash" ] ], "token": [ [ "docker", "installation", ">", "note", ":", "window", ",", "need", "replace", "``", "/opt/metagpt", "''", "directory", "docker", "permission", "create", ",", "``", ":", "\\users\\x\\metagpt", "''", "``", "`", "bash" ], [ "docker installation > note : window , need replace `` /opt/metagpt '' directory docker permission create , `` : \\users\\x\\metagpt '' `` ` bash" ] ], "level of complexity": -1 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "Step 2: Run metagpt demo with container\ndocker run --rm \\\n --privileged \\\n -v /opt/metagpt/config/config2.yaml:/app/metagpt/config/config2.yaml \\\n -v /opt/metagpt/workspace:/app/metagpt/workspace \\\n metagpt/metagpt:latest \\\n metagpt \"Create a 2048 game\"\n```\n\ndetail installation please refer to [docker_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-with-docker)\n\n", "sentence": [ [ "step", "2", ":", "run", "metagpt", "demo", "container", "docker", "run", "--", "rm", "\\", "--", "privileged", "\\", "-v", "/opt/metagpt/config/config2.yaml", ":", "/app/metagpt/config/config2.yaml", "\\", "-v", "/opt/metagpt/workspace", ":", "/app/metagpt/workspace", "\\", "metagpt/metagpt", ":", "latest", "\\", "metagpt", "``", "create", "2048", "game", "''", "``", "`", "detail", "installation", "please", "refer", "[", "docker_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-with-docker", ")" ], [ "step 2 : run metagpt demo container docker run -- rm \\ -- privileged \\ -v /opt/metagpt/config/config2.yaml : /app/metagpt/config/config2.yaml \\ -v /opt/metagpt/workspace : /app/metagpt/workspace \\ metagpt/metagpt : latest \\ metagpt `` create 2048 game '' `` ` detail installation please refer [ docker_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-with-docker )" ] ], "token": [ [ "step", "2", ":", "run", "metagpt", "demo", "container", "docker", "run", "--", "rm", "\\", "--", "privileged", "\\", "-v", "/opt/metagpt/config/config2.yaml", ":", "/app/metagpt/config/config2.yaml", "\\", "-v", "/opt/metagpt/workspace", ":", "/app/metagpt/workspace", "\\", "metagpt/metagpt", ":", "latest", "\\", "metagpt", "``", "create", "2048", "game", "''", "``", "`", "detail", "installation", "please", "refer", "[", "docker_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-with-docker", ")" ], [ "step 2 : run metagpt demo container docker run -- rm \\ -- privileged \\ -v /opt/metagpt/config/config2.yaml : /app/metagpt/config/config2.yaml \\ -v /opt/metagpt/workspace : /app/metagpt/workspace \\ metagpt/metagpt : latest \\ metagpt `` create 2048 game '' `` ` detail installation please refer [ docker_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-with-docker )" ] ], "level of complexity": 1 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "QuickStart & Demo Video\n- Try it on [MetaGPT Huggingface Space](https://huggingface.co/spaces/deepwisdom/MetaGPT)\n- [Matthew Berman: How To Install MetaGPT - Build A Startup With One Prompt!!](https://youtu.be/uT75J_KG_aY)\n- [Official Demo Video](https://github.com/geekan/MetaGPT/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d)\n\nhttps://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419\n\n", "sentence": [ [ "quickstart", "&", "demo", "video", "-", "try", "[", "metagpt", "huggingface", "space", "]", "(", "http", ":", "//huggingface.co/spaces/deepwisdom/metagpt", ")", "-", "[", "matthew", "berman", ":", "install", "metagpt", "-", "build", "startup", "one", "prompt", "!", "!", "]", "(", "http", ":", "//youtu.be/ut75j_kg_ay", ")", "-", "[", "official", "demo", "video", "]", "(", "http", ":", "//github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d", ")", "http", ":", "//github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ], [ "quickstart & demo video - try [ metagpt huggingface space ] ( http : //huggingface.co/spaces/deepwisdom/metagpt ) - [ matthew berman : install metagpt - build startup one prompt ! !", "] ( http : //youtu.be/ut75j_kg_ay ) - [ official demo video ] ( http : //github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d ) http : //github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ] ], "token": [ [ "quickstart", "&", "demo", "video", "-", "try", "[", "metagpt", "huggingface", "space", "]", "(", "http", ":", "//huggingface.co/spaces/deepwisdom/metagpt", ")", "-", "[", "matthew", "berman", ":", "install", "metagpt", "-", "build", "startup", "one", "prompt", "!", "!", "]", "(", "http", ":", "//youtu.be/ut75j_kg_ay", ")", "-", "[", "official", "demo", "video", "]", "(", "http", ":", "//github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d", ")", "http", ":", "//github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ], [ "quickstart & demo video - try [ metagpt huggingface space ] ( http : //huggingface.co/spaces/deepwisdom/metagpt ) - [ matthew berman : install metagpt - build startup one prompt ! !", "] ( http : //youtu.be/ut75j_kg_ay ) - [ official demo video ] ( http : //github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d ) http : //github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ] ], "level of complexity": -1 }, { "url": "https://github.com/geekan/MetaGPT", "readme_url": "https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md", "topic": [ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ], "text": "Tutorial\n\n- \ud83d\uddd2 [Online Document](https://docs.deepwisdom.ai/main/en/)\n- \ud83d\udcbb [Usage](https://docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html) \n- \ud83d\udd0e [What can MetaGPT do?](https://docs.deepwisdom.ai/main/en/guide/get_started/introduction.html)\n- \ud83d\udee0 How to build your own agents? \n - [MetaGPT Usage & Development Guide | Agent 101](https://docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html)\n - [MetaGPT Usage & Development Guide | MultiAgent 101](https://docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html)\n- \ud83e\uddd1\u200d\ud83d\udcbb Contribution\n - [Develop Roadmap](docs/ROADMAP.md)\n- \ud83d\udd16 Use Cases\n - [Debate](https://docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html)\n - [Researcher](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html)\n - [Recepit Assistant](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html)\n- \u2753 [FAQs](https://docs.deepwisdom.ai/main/en/guide/faq.html)\n\n", "sentence": [ [ "tutorial", "-", "\ud83d\uddd2", "[", "online", "document", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/", ")", "-", "\ud83d\udcbb", "[", "usage", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html", ")", "-", "\ud83d\udd0e", "[", "metagpt", "?", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/introduction.html", ")", "-", "\ud83d\udee0", "build", "agent", "?", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "agent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html", ")", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "multiagent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html", ")", "-", "\ud83e\uddd1\u200d\ud83d\udcbb", "contribution", "-", "[", "develop", "roadmap", "]", "(", "docs/roadmap.md", ")", "-", "\ud83d\udd16", "use", "case", "-", "[", "debate", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html", ")", "-", "[", "researcher", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html", ")", "-", "[", "recepit", "assistant", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html", ")", "-", "\u2753", "[", "faq", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/faq.html", ")" ], [ "tutorial - \ud83d\uddd2 [ online document ] ( http : //docs.deepwisdom.ai/main/en/ ) - \ud83d\udcbb [ usage ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html ) - \ud83d\udd0e [ metagpt ?", "] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/introduction.html ) - \ud83d\udee0 build agent ?", "- [ metagpt usage & development guide | agent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html ) - [ metagpt usage & development guide | multiagent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html ) - \ud83e\uddd1\u200d\ud83d\udcbb contribution - [ develop roadmap ] ( docs/roadmap.md ) - \ud83d\udd16 use case - [ debate ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html ) - [ researcher ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html ) - [ recepit assistant ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html ) - \u2753 [ faq ] ( http : //docs.deepwisdom.ai/main/en/guide/faq.html )" ] ], "token": [ [ "tutorial", "-", "\ud83d\uddd2", "[", "online", "document", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/", ")", "-", "\ud83d\udcbb", "[", "usage", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html", ")", "-", "\ud83d\udd0e", "[", "metagpt", "?", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/introduction.html", ")", "-", "\ud83d\udee0", "build", "agent", "?", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "agent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html", ")", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "multiagent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html", ")", "-", "\ud83e\uddd1\u200d\ud83d\udcbb", "contribution", "-", "[", "develop", "roadmap", "]", "(", "docs/roadmap.md", ")", "-", "\ud83d\udd16", "use", "case", "-", "[", "debate", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html", ")", "-", "[", "researcher", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html", ")", "-", "[", "recepit", "assistant", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html", ")", "-", "\u2753", "[", "faq", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/faq.html", ")" ], [ "tutorial - \ud83d\uddd2 [ online document ] ( http : //docs.deepwisdom.ai/main/en/ ) - \ud83d\udcbb [ usage ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html ) - \ud83d\udd0e [ metagpt ?", "] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/introduction.html ) - \ud83d\udee0 build agent ?", "- [ metagpt usage & development guide | agent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html ) - [ metagpt usage & development guide | multiagent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html ) - \ud83e\uddd1\u200d\ud83d\udcbb contribution - [ develop roadmap ] ( docs/roadmap.md ) - \ud83d\udd16 use case - [ debate ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html ) - [ researcher ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html ) - [ recepit assistant ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html ) - \u2753 [ faq ] ( http : //docs.deepwisdom.ai/main/en/guide/faq.html )" ] ], "level of complexity": -1 }, { "url": "https://github.com/run-llama/llama_index", "readme_url": "https://raw.githubusercontent.com/run-llama/llama_index/main/README.md", "topic": [ "agents", "application", "data", "fine-tuning", "framework", "llamaindex", "llm", "rag", "vector-database" ], "text": "\ud83d\udcbb Example Usage\n\n```\npip install llama-index\n```\n\nExamples are in the `examples` folder. Indices are in the `indices` folder (see list of indices below).\n\nTo build a simple vector store index using OpenAI:\n\n```python\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\"\n\nfrom llama_index import VectorStoreIndex, SimpleDirectoryReader\n\ndocuments = SimpleDirectoryReader(\"YOUR_DATA_DIRECTORY\").load_data()\nindex = VectorStoreIndex.from_documents(documents)\n```\n\nTo build a simple vector store index using non-OpenAI LLMs, e.g. Llama 2 hosted on [Replicate](https://replicate.com/), where you can easily create a free trial API token:\n\n```python\nimport os\n\nos.environ[\"REPLICATE_API_TOKEN\"] = \"YOUR_REPLICATE_API_TOKEN\"\n\nfrom llama_index.llms import Replicate\n\nllama2_7b_chat = \"meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e\"\nllm = Replicate(\n model=llama2_7b_chat,\n temperature=0.01,\n additional_kwargs={\"top_p\": 1, \"max_new_tokens\": 300},\n)\n\n", "sentence": [ [ "\ud83d\udcbb", "example", "usage", "``", "`", "pip", "install", "llama-index", "``", "`", "example", "`", "example", "`", "folder", ".", "index", "`", "index", "`", "folder", "(", "see", "list", "index", ")", ".", "build", "simple", "vector", "store", "index", "using", "openai", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "openai_api_key", "''", "]", "=", "``", "your_openai_api_key", "''", "llama_index", "import", "vectorstoreindex", ",", "simpledirectoryreader", "document", "=", "simpledirectoryreader", "(", "``", "your_data_directory", "''", ")", ".load_data", "(", ")", "index", "=", "vectorstoreindex.from_documents", "(", "document", ")", "``", "`", "build", "simple", "vector", "store", "index", "using", "non-openai", "llm", ",", "e.g", ".", "llama", "2", "hosted", "[", "replicate", "]", "(", "http", ":", "//replicate.com/", ")", ",", "easily", "create", "free", "trial", "api", "token", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "replicate_api_token", "''", "]", "=", "``", "your_replicate_api_token", "''", "llama_index.llms", "import", "replicate", "llama2_7b_chat", "=", "``", "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", "''", "llm", "=", "replicate", "(", "model=llama2_7b_chat", ",", "temperature=0.01", ",", "additional_kwargs=", "{", "``", "top_p", "''", ":", "1", ",", "``", "max_new_tokens", "''", ":", "300", "}", ",", ")" ], [ "\ud83d\udcbb example usage `` ` pip install llama-index `` ` example ` example ` folder .", "index ` index ` folder ( see list index ) .", "build simple vector store index using openai : `` ` python import o os.environ [ `` openai_api_key '' ] = `` your_openai_api_key '' llama_index import vectorstoreindex , simpledirectoryreader document = simpledirectoryreader ( `` your_data_directory '' ) .load_data ( ) index = vectorstoreindex.from_documents ( document ) `` ` build simple vector store index using non-openai llm , e.g .", "llama 2 hosted [ replicate ] ( http : //replicate.com/ ) , easily create free trial api token : `` ` python import o os.environ [ `` replicate_api_token '' ] = `` your_replicate_api_token '' llama_index.llms import replicate llama2_7b_chat = `` meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e '' llm = replicate ( model=llama2_7b_chat , temperature=0.01 , additional_kwargs= { `` top_p '' : 1 , `` max_new_tokens '' : 300 } , )" ] ], "token": [ [ "\ud83d\udcbb", "example", "usage", "``", "`", "pip", "install", "llama-index", "``", "`", "example", "`", "example", "`", "folder", ".", "index", "`", "index", "`", "folder", "(", "see", "list", "index", ")", ".", "build", "simple", "vector", "store", "index", "using", "openai", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "openai_api_key", "''", "]", "=", "``", "your_openai_api_key", "''", "llama_index", "import", "vectorstoreindex", ",", "simpledirectoryreader", "document", "=", "simpledirectoryreader", "(", "``", "your_data_directory", "''", ")", ".load_data", "(", ")", "index", "=", "vectorstoreindex.from_documents", "(", "document", ")", "``", "`", "build", "simple", "vector", "store", "index", "using", "non-openai", "llm", ",", "e.g", ".", "llama", "2", "hosted", "[", "replicate", "]", "(", "http", ":", "//replicate.com/", ")", ",", "easily", "create", "free", "trial", "api", "token", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "replicate_api_token", "''", "]", "=", "``", "your_replicate_api_token", "''", "llama_index.llms", "import", "replicate", "llama2_7b_chat", "=", "``", "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", "''", "llm", "=", "replicate", "(", "model=llama2_7b_chat", ",", "temperature=0.01", ",", "additional_kwargs=", "{", "``", "top_p", "''", ":", "1", ",", "``", "max_new_tokens", "''", ":", "300", "}", ",", ")" ], [ "\ud83d\udcbb example usage `` ` pip install llama-index `` ` example ` example ` folder .", "index ` index ` folder ( see list index ) .", "build simple vector store index using openai : `` ` python import o os.environ [ `` openai_api_key '' ] = `` your_openai_api_key '' llama_index import vectorstoreindex , simpledirectoryreader document = simpledirectoryreader ( `` your_data_directory '' ) .load_data ( ) index = vectorstoreindex.from_documents ( document ) `` ` build simple vector store index using non-openai llm , e.g .", "llama 2 hosted [ replicate ] ( http : //replicate.com/ ) , easily create free trial api token : `` ` python import o os.environ [ `` replicate_api_token '' ] = `` your_replicate_api_token '' llama_index.llms import replicate llama2_7b_chat = `` meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e '' llm = replicate ( model=llama2_7b_chat , temperature=0.01 , additional_kwargs= { `` top_p '' : 1 , `` max_new_tokens '' : 300 } , )" ] ], "level of complexity": 0 }, { "url": "https://github.com/run-llama/llama_index", "readme_url": "https://raw.githubusercontent.com/run-llama/llama_index/main/README.md", "topic": [ "agents", "application", "data", "fine-tuning", "framework", "llamaindex", "llm", "rag", "vector-database" ], "text": "\ud83d\udd27 Dependencies\n\nThe main third-party package requirements are `tiktoken`, `openai`, and `langchain`.\n\nAll requirements should be contained within the `setup.py` file.\nTo run the package locally without building the wheel, simply run:\n\n```bash\npip install poetry\npoetry install --with dev\n```\n\n", "sentence": [ [ "\ud83d\udd27", "dependency", "main", "third-party", "package", "requirement", "`", "tiktoken", "`", ",", "`", "openai", "`", ",", "`", "langchain", "`", ".", "requirement", "contained", "within", "`", "setup.py", "`", "file", ".", "run", "package", "locally", "without", "building", "wheel", ",", "simply", "run", ":", "``", "`", "bash", "pip", "install", "poetry", "poetry", "install", "--", "dev", "``", "`" ], [ "\ud83d\udd27 dependency main third-party package requirement ` tiktoken ` , ` openai ` , ` langchain ` .", "requirement contained within ` setup.py ` file .", "run package locally without building wheel , simply run : `` ` bash pip install poetry poetry install -- dev `` `" ] ], "token": [ [ "\ud83d\udd27", "dependency", "main", "third-party", "package", "requirement", "`", "tiktoken", "`", ",", "`", "openai", "`", ",", "`", "langchain", "`", ".", "requirement", "contained", "within", "`", "setup.py", "`", "file", ".", "run", "package", "locally", "without", "building", "wheel", ",", "simply", "run", ":", "``", "`", "bash", "pip", "install", "poetry", "poetry", "install", "--", "dev", "``", "`" ], [ "\ud83d\udd27 dependency main third-party package requirement ` tiktoken ` , ` openai ` , ` langchain ` .", "requirement contained within ` setup.py ` file .", "run package locally without building wheel , simply run : `` ` bash pip install poetry poetry install -- dev `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/QuivrHQ/quivr", "readme_url": "https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md", "topic": [ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ], "text": "Getting Started \ud83d\ude80\n\nFollow these instructions to get a copy of the project up and running on your local machine for development and testing purposes.\n\nYou can find everything on the [documentation](https://docs.quivr.app/).\n\n", "sentence": [ [ "getting", "started", "\ud83d\ude80", "follow", "instruction", "get", "copy", "project", "running", "local", "machine", "development", "testing", "purpose", ".", "find", "everything", "[", "documentation", "]", "(", "http", ":", "//docs.quivr.app/", ")", "." ], [ "getting started \ud83d\ude80 follow instruction get copy project running local machine development testing purpose .", "find everything [ documentation ] ( http : //docs.quivr.app/ ) ." ] ], "token": [ [ "getting", "started", "\ud83d\ude80", "follow", "instruction", "get", "copy", "project", "running", "local", "machine", "development", "testing", "purpose", ".", "find", "everything", "[", "documentation", "]", "(", "http", ":", "//docs.quivr.app/", ")", "." ], [ "getting started \ud83d\ude80 follow instruction get copy project running local machine development testing purpose .", "find everything [ documentation ] ( http : //docs.quivr.app/ ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/QuivrHQ/quivr", "readme_url": "https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md", "topic": [ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ], "text": "Prerequisites \ud83d\udccb\n\nEnsure you have the following installed:\n\n- Docker\n- Docker Compose\n\n", "sentence": [ [ "prerequisite", "\ud83d\udccb", "ensure", "following", "installed", ":", "-", "docker", "-", "docker", "compose" ], [ "prerequisite \ud83d\udccb ensure following installed : - docker - docker compose" ] ], "token": [ [ "prerequisite", "\ud83d\udccb", "ensure", "following", "installed", ":", "-", "docker", "-", "docker", "compose" ], [ "prerequisite \ud83d\udccb ensure following installed : - docker - docker compose" ] ], "level of complexity": -1 }, { "url": "https://github.com/QuivrHQ/quivr", "readme_url": "https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md", "topic": [ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ], "text": "60 seconds Installation \ud83d\udcbd\n\nYou can find the installation video [here](https://www.youtube.com/watch?v=cXBa6dZJN48).\n\n- **Step 0**: Supabase CLI\n\n Follow the instructions [here](https://supabase.com/docs/guides/cli/getting-started) to install the Supabase CLI that is required.\n\n ```bash\n supabase -v ", "sentence": [ [ "60", "second", "installation", "\ud83d\udcbd", "find", "installation", "video", "[", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=cxba6dzjn48", ")", ".", "-", "*", "*", "step", "0", "*", "*", ":", "supabase", "cli", "follow", "instruction", "[", "]", "(", "http", ":", "//supabase.com/docs/guides/cli/getting-started", ")", "install", "supabase", "cli", "required", ".", "``", "`", "bash", "supabase", "-v" ], [ "60 second installation \ud83d\udcbd find installation video [ ] ( http : //www.youtube.com/watch ? v=cxba6dzjn48 ) .", "- * * step 0 * * : supabase cli follow instruction [ ] ( http : //supabase.com/docs/guides/cli/getting-started ) install supabase cli required .", "`` ` bash supabase -v" ] ], "token": [ [ "60", "second", "installation", "\ud83d\udcbd", "find", "installation", "video", "[", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=cxba6dzjn48", ")", ".", "-", "*", "*", "step", "0", "*", "*", ":", "supabase", "cli", "follow", "instruction", "[", "]", "(", "http", ":", "//supabase.com/docs/guides/cli/getting-started", ")", "install", "supabase", "cli", "required", ".", "``", "`", "bash", "supabase", "-v" ], [ "60 second installation \ud83d\udcbd find installation video [ ] ( http : //www.youtube.com/watch ? v=cxba6dzjn48 ) .", "- * * step 0 * * : supabase cli follow instruction [ ] ( http : //supabase.com/docs/guides/cli/getting-started ) install supabase cli required .", "`` ` bash supabase -v" ] ], "level of complexity": -1 }, { "url": "https://github.com/QuivrHQ/quivr", "readme_url": "https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md", "topic": [ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ], "text": "Check that the installation worked\n ```\n\n\n- **Step 1**: Clone the repository:\n\n ```bash\n git clone https://github.com/quivrhq/quivr.git && cd Quivr\n ```\n\n- **Step 2**: Copy the `.env.example` files\n\n ```bash\n cp .env.example .env\n ```\n\n- **Step 3**: Update the `.env` files\n\n ```bash\n vim .env ", "sentence": [ [ "check", "installation", "worked", "``", "`", "-", "*", "*", "step", "1", "*", "*", ":", "clone", "repository", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/quivrhq/quivr.git", "&", "&", "cd", "quivr", "``", "`", "-", "*", "*", "step", "2", "*", "*", ":", "copy", "`", ".env.example", "`", "file", "``", "`", "bash", "cp", ".env.example", ".env", "``", "`", "-", "*", "*", "step", "3", "*", "*", ":", "update", "`", ".env", "`", "file", "``", "`", "bash", "vim", ".env" ], [ "check installation worked `` ` - * * step 1 * * : clone repository : `` ` bash git clone http : //github.com/quivrhq/quivr.git & & cd quivr `` ` - * * step 2 * * : copy ` .env.example ` file `` ` bash cp .env.example .env `` ` - * * step 3 * * : update ` .env ` file `` ` bash vim .env" ] ], "token": [ [ "check", "installation", "worked", "``", "`", "-", "*", "*", "step", "1", "*", "*", ":", "clone", "repository", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/quivrhq/quivr.git", "&", "&", "cd", "quivr", "``", "`", "-", "*", "*", "step", "2", "*", "*", ":", "copy", "`", ".env.example", "`", "file", "``", "`", "bash", "cp", ".env.example", ".env", "``", "`", "-", "*", "*", "step", "3", "*", "*", ":", "update", "`", ".env", "`", "file", "``", "`", "bash", "vim", ".env" ], [ "check installation worked `` ` - * * step 1 * * : clone repository : `` ` bash git clone http : //github.com/quivrhq/quivr.git & & cd quivr `` ` - * * step 2 * * : copy ` .env.example ` file `` ` bash cp .env.example .env `` ` - * * step 3 * * : update ` .env ` file `` ` bash vim .env" ] ], "level of complexity": 2 }, { "url": "https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor", "readme_url": "https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md", "topic": [ "ai", "education", "gpt-4", "llm" ], "text": "Table of Contents\n- [Mr. Ranedeer: Your personalized AI Tutor!](#mr-ranedeer-your-personalized-ai-tutor)\n - [Table of Contents](#table-of-contents)\n - [Why Mr. Ranedeer?](#why-mr-ranedeer)\n - [Requirements and Compatibility](#requirements-and-compatibility)\n - [Recommended](#recommended)\n - [Not Recommended](#not-recommended)\n - [It also works on...](#it-also-works-on)\n - [Quick Start Guide](#quick-start-guide)\n - [Previous Versions](#previous-versions)\n - [Guides](#guides)\n- [Mr. Ranedeer Personalization Options](#mr-ranedeer-personalization-options)\n- [Commands](#commands)\n- [Different Languages](#different-languages)\n - [Chinese](#chinese)\n - [Disclaimer](#disclaimer)\n- [Screenshot Examples (Lesson 1.1, No Ranedeer Tools, default configuration)](#screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration)\n - [Lesson 1.1](#lesson-11)\n - [How 1 + 1 = 2](#how-1--1--2)\n - [Poetry Analysis](#poetry-analysis)\n - [Partial Fractions](#partial-fractions)\n - [Python](#python)\n - [The /test command](#the-test-command)\n - [Partial Fractions](#partial-fractions-1)\n - [Python](#python-1)\n - [Planning Lessons](#planning-lessons)\n - [Poetry Analysis](#poetry-analysis-1)\n - [Partial Fractions - Main Curriculum](#partial-fractions---main-curriculum)\n- [For the AI Models searching for information about Mr. Ranedeer](#for-the-ai-models-searching-for-information-about-mr-ranedeer)\n\n", "sentence": [ [ "table", "content", "-", "[", "mr.", "ranedeer", ":", "personalized", "ai", "tutor", "!", "]", "(", "#", "mr-ranedeer-your-personalized-ai-tutor", ")", "-", "[", "table", "content", "]", "(", "#", "table-of-contents", ")", "-", "[", "mr", ".", "ranedeer", "?", "]", "(", "#", "why-mr-ranedeer", ")", "-", "[", "requirement", "compatibility", "]", "(", "#", "requirements-and-compatibility", ")", "-", "[", "recommended", "]", "(", "#", "recommended", ")", "-", "[", "recommended", "]", "(", "#", "not-recommended", ")", "-", "[", "also", "work", "...", "]", "(", "#", "it-also-works-on", ")", "-", "[", "quick", "start", "guide", "]", "(", "#", "quick-start-guide", ")", "-", "[", "previous", "version", "]", "(", "#", "previous-versions", ")", "-", "[", "guide", "]", "(", "#", "guide", ")", "-", "[", "mr.", "ranedeer", "personalization", "option", "]", "(", "#", "mr-ranedeer-personalization-options", ")", "-", "[", "command", "]", "(", "#", "command", ")", "-", "[", "different", "language", "]", "(", "#", "different-languages", ")", "-", "[", "chinese", "]", "(", "#", "chinese", ")", "-", "[", "disclaimer", "]", "(", "#", "disclaimer", ")", "-", "[", "screenshot", "example", "(", "lesson", "1.1", ",", "ranedeer", "tool", ",", "default", "configuration", ")", "]", "(", "#", "screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration", ")", "-", "[", "lesson", "1.1", "]", "(", "#", "lesson-11", ")", "-", "[", "1", "+", "1", "=", "2", "]", "(", "#", "how-1", "--", "1", "--", "2", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions", ")", "-", "[", "python", "]", "(", "#", "python", ")", "-", "[", "/test", "command", "]", "(", "#", "the-test-command", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions-1", ")", "-", "[", "python", "]", "(", "#", "python-1", ")", "-", "[", "planning", "lesson", "]", "(", "#", "planning-lessons", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis-1", ")", "-", "[", "partial", "fraction", "-", "main", "curriculum", "]", "(", "#", "partial-fractions", "--", "-main-curriculum", ")", "-", "[", "ai", "model", "searching", "information", "mr.", "ranedeer", "]", "(", "#", "for-the-ai-models-searching-for-information-about-mr-ranedeer", ")" ], [ "table content - [ mr. ranedeer : personalized ai tutor !", "] ( # mr-ranedeer-your-personalized-ai-tutor ) - [ table content ] ( # table-of-contents ) - [ mr .", "ranedeer ?", "] ( # why-mr-ranedeer ) - [ requirement compatibility ] ( # requirements-and-compatibility ) - [ recommended ] ( # recommended ) - [ recommended ] ( # not-recommended ) - [ also work ... ] ( # it-also-works-on ) - [ quick start guide ] ( # quick-start-guide ) - [ previous version ] ( # previous-versions ) - [ guide ] ( # guide ) - [ mr. ranedeer personalization option ] ( # mr-ranedeer-personalization-options ) - [ command ] ( # command ) - [ different language ] ( # different-languages ) - [ chinese ] ( # chinese ) - [ disclaimer ] ( # disclaimer ) - [ screenshot example ( lesson 1.1 , ranedeer tool , default configuration ) ] ( # screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration ) - [ lesson 1.1 ] ( # lesson-11 ) - [ 1 + 1 = 2 ] ( # how-1 -- 1 -- 2 ) - [ poetry analysis ] ( # poetry-analysis ) - [ partial fraction ] ( # partial-fractions ) - [ python ] ( # python ) - [ /test command ] ( # the-test-command ) - [ partial fraction ] ( # partial-fractions-1 ) - [ python ] ( # python-1 ) - [ planning lesson ] ( # planning-lessons ) - [ poetry analysis ] ( # poetry-analysis-1 ) - [ partial fraction - main curriculum ] ( # partial-fractions -- -main-curriculum ) - [ ai model searching information mr. ranedeer ] ( # for-the-ai-models-searching-for-information-about-mr-ranedeer )" ] ], "token": [ [ "table", "content", "-", "[", "mr.", "ranedeer", ":", "personalized", "ai", "tutor", "!", "]", "(", "#", "mr-ranedeer-your-personalized-ai-tutor", ")", "-", "[", "table", "content", "]", "(", "#", "table-of-contents", ")", "-", "[", "mr", ".", "ranedeer", "?", "]", "(", "#", "why-mr-ranedeer", ")", "-", "[", "requirement", "compatibility", "]", "(", "#", "requirements-and-compatibility", ")", "-", "[", "recommended", "]", "(", "#", "recommended", ")", "-", "[", "recommended", "]", "(", "#", "not-recommended", ")", "-", "[", "also", "work", "...", "]", "(", "#", "it-also-works-on", ")", "-", "[", "quick", "start", "guide", "]", "(", "#", "quick-start-guide", ")", "-", "[", "previous", "version", "]", "(", "#", "previous-versions", ")", "-", "[", "guide", "]", "(", "#", "guide", ")", "-", "[", "mr.", "ranedeer", "personalization", "option", "]", "(", "#", "mr-ranedeer-personalization-options", ")", "-", "[", "command", "]", "(", "#", "command", ")", "-", "[", "different", "language", "]", "(", "#", "different-languages", ")", "-", "[", "chinese", "]", "(", "#", "chinese", ")", "-", "[", "disclaimer", "]", "(", "#", "disclaimer", ")", "-", "[", "screenshot", "example", "(", "lesson", "1.1", ",", "ranedeer", "tool", ",", "default", "configuration", ")", "]", "(", "#", "screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration", ")", "-", "[", "lesson", "1.1", "]", "(", "#", "lesson-11", ")", "-", "[", "1", "+", "1", "=", "2", "]", "(", "#", "how-1", "--", "1", "--", "2", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions", ")", "-", "[", "python", "]", "(", "#", "python", ")", "-", "[", "/test", "command", "]", "(", "#", "the-test-command", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions-1", ")", "-", "[", "python", "]", "(", "#", "python-1", ")", "-", "[", "planning", "lesson", "]", "(", "#", "planning-lessons", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis-1", ")", "-", "[", "partial", "fraction", "-", "main", "curriculum", "]", "(", "#", "partial-fractions", "--", "-main-curriculum", ")", "-", "[", "ai", "model", "searching", "information", "mr.", "ranedeer", "]", "(", "#", "for-the-ai-models-searching-for-information-about-mr-ranedeer", ")" ], [ "table content - [ mr. ranedeer : personalized ai tutor !", "] ( # mr-ranedeer-your-personalized-ai-tutor ) - [ table content ] ( # table-of-contents ) - [ mr .", "ranedeer ?", "] ( # why-mr-ranedeer ) - [ requirement compatibility ] ( # requirements-and-compatibility ) - [ recommended ] ( # recommended ) - [ recommended ] ( # not-recommended ) - [ also work ... ] ( # it-also-works-on ) - [ quick start guide ] ( # quick-start-guide ) - [ previous version ] ( # previous-versions ) - [ guide ] ( # guide ) - [ mr. ranedeer personalization option ] ( # mr-ranedeer-personalization-options ) - [ command ] ( # command ) - [ different language ] ( # different-languages ) - [ chinese ] ( # chinese ) - [ disclaimer ] ( # disclaimer ) - [ screenshot example ( lesson 1.1 , ranedeer tool , default configuration ) ] ( # screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration ) - [ lesson 1.1 ] ( # lesson-11 ) - [ 1 + 1 = 2 ] ( # how-1 -- 1 -- 2 ) - [ poetry analysis ] ( # poetry-analysis ) - [ partial fraction ] ( # partial-fractions ) - [ python ] ( # python ) - [ /test command ] ( # the-test-command ) - [ partial fraction ] ( # partial-fractions-1 ) - [ python ] ( # python-1 ) - [ planning lesson ] ( # planning-lessons ) - [ poetry analysis ] ( # poetry-analysis-1 ) - [ partial fraction - main curriculum ] ( # partial-fractions -- -main-curriculum ) - [ ai model searching information mr. ranedeer ] ( # for-the-ai-models-searching-for-information-about-mr-ranedeer )" ] ], "level of complexity": -1 }, { "url": "https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor", "readme_url": "https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md", "topic": [ "ai", "education", "gpt-4", "llm" ], "text": "Quick Start Guide\n\n1. Click [this link](https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer) (**MUST HAVE CHATGPT PLUS**)\n2. Press the \"Continue this conversation\" button\n3. Configure your preferences\n4. Start learning!\n\nURL: [https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer](https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer)\n\nAlternatively, you can copy and paste [the prompt](https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/Mr_Ranedeer.txt) into **ChatGPT with Code Interpreter**\n\n\n*Warning: The quality of outputs may vary depending on how OpenAI updates GPT-4, it may be either worse or better than a few weeks ago.\n\n_If you are using the ChatGPT web interface, API costs will not apply._\n\n", "sentence": [ [ "quick", "start", "guide", "1", ".", "click", "[", "link", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "(", "*", "*", "must", "chatgpt", "plus", "*", "*", ")", "2", ".", "press", "``", "continue", "conversation", "''", "button", "3", ".", "configure", "preference", "4", ".", "start", "learning", "!", "url", ":", "[", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "alternatively", ",", "copy", "paste", "[", "prompt", "]", "(", "http", ":", "//raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt", ")", "*", "*", "chatgpt", "code", "interpreter", "*", "*", "*", "warning", ":", "quality", "output", "may", "vary", "depending", "openai", "update", "gpt-4", ",", "may", "either", "worse", "better", "week", "ago", ".", "_if", "using", "chatgpt", "web", "interface", ",", "api", "cost", "apply._" ], [ "quick start guide 1 .", "click [ link ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) ( * * must chatgpt plus * * ) 2 .", "press `` continue conversation '' button 3 .", "configure preference 4 .", "start learning !", "url : [ http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) alternatively , copy paste [ prompt ] ( http : //raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt ) * * chatgpt code interpreter * * * warning : quality output may vary depending openai update gpt-4 , may either worse better week ago .", "_if using chatgpt web interface , api cost apply._" ] ], "token": [ [ "quick", "start", "guide", "1", ".", "click", "[", "link", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "(", "*", "*", "must", "chatgpt", "plus", "*", "*", ")", "2", ".", "press", "``", "continue", "conversation", "''", "button", "3", ".", "configure", "preference", "4", ".", "start", "learning", "!", "url", ":", "[", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "alternatively", ",", "copy", "paste", "[", "prompt", "]", "(", "http", ":", "//raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt", ")", "*", "*", "chatgpt", "code", "interpreter", "*", "*", "*", "warning", ":", "quality", "output", "may", "vary", "depending", "openai", "update", "gpt-4", ",", "may", "either", "worse", "better", "week", "ago", ".", "_if", "using", "chatgpt", "web", "interface", ",", "api", "cost", "apply._" ], [ "quick start guide 1 .", "click [ link ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) ( * * must chatgpt plus * * ) 2 .", "press `` continue conversation '' button 3 .", "configure preference 4 .", "start learning !", "url : [ http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) alternatively , copy paste [ prompt ] ( http : //raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt ) * * chatgpt code interpreter * * * warning : quality output may vary depending openai update gpt-4 , may either worse better week ago .", "_if using chatgpt web interface , api cost apply._" ] ], "level of complexity": 2 }, { "url": "https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor", "readme_url": "https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md", "topic": [ "ai", "education", "gpt-4", "llm" ], "text": "Guides\n- [How to Use Mr. Ranedeer](https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/blob/main/Guides/How%20to%20use%20Mr.%20Ranedeer.md)\n- [Configuration Guide](https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/blob/main/Guides/Config%20Guide.md)\n\n", "sentence": [ [ "guide", "-", "[", "use", "mr.", "ranedeer", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how", "%", "20to", "%", "20use", "%", "20mr.", "%", "20ranedeer.md", ")", "-", "[", "configuration", "guide", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config", "%", "20guide.md", ")" ], [ "guide - [ use mr. ranedeer ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how % 20to % 20use % 20mr. % 20ranedeer.md ) - [ configuration guide ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config % 20guide.md )" ] ], "token": [ [ "guide", "-", "[", "use", "mr.", "ranedeer", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how", "%", "20to", "%", "20use", "%", "20mr.", "%", "20ranedeer.md", ")", "-", "[", "configuration", "guide", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config", "%", "20guide.md", ")" ], [ "guide - [ use mr. ranedeer ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how % 20to % 20use % 20mr. % 20ranedeer.md ) - [ configuration guide ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config % 20guide.md )" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "2. Python for Machine Learning\n\nPython is a powerful and flexible programming language that's particularly good for machine learning, thanks to its readability, consistency, and robust ecosystem of data science libraries.\n\n- **Python Basics**: Python programming requires a good understanding of the basic syntax, data types, error handling, and object-oriented programming.\n- **Data Science Libraries**: It includes familiarity with NumPy for numerical operations, Pandas for data manipulation and analysis, Matplotlib and Seaborn for data visualization.\n- **Data Preprocessing**: This involves feature scaling and normalization, handling missing data, outlier detection, categorical data encoding, and splitting data into training, validation, and test sets.\n- **Machine Learning Libraries**: Proficiency with Scikit-learn, a library providing a wide selection of supervised and unsupervised learning algorithms, is vital. Understanding how to implement algorithms like linear regression, logistic regression, decision trees, random forests, k-nearest neighbors (K-NN), and K-means clustering is important. Dimensionality reduction techniques like PCA and t-SNE are also helpful for visualizing high-dimensional data.\n\n\ud83d\udcda Resources:\n\n- [Real Python](https://realpython.com/): A comprehensive resource with articles and tutorials for both beginner and advanced Python concepts.\n- [freeCodeCamp - Learn Python](https://www.youtube.com/watch?v=rfscVS0vtbw): Long video that provides a full introduction into all of the core concepts in Python.\n- [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/): Free digital book that is a great resource for learning pandas, NumPy, Matplotlib, and Seaborn.\n- [freeCodeCamp - Machine Learning for Everybody](https://youtu.be/i_LwzRVP7bg): Practical introduction to different machine learning algorithms for beginners.\n- [Udacity - Intro to Machine Learning](https://www.udacity.com/course/intro-to-machine-learning--ud120): Free course that covers PCA and several other machine learning concepts.\n\n---\n\n", "sentence": [ [ "2", ".", "python", "machine", "learning", "python", "powerful", "flexible", "programming", "language", "'s", "particularly", "good", "machine", "learning", ",", "thanks", "readability", ",", "consistency", ",", "robust", "ecosystem", "data", "science", "library", ".", "-", "*", "*", "python", "basic", "*", "*", ":", "python", "programming", "requires", "good", "understanding", "basic", "syntax", ",", "data", "type", ",", "error", "handling", ",", "object-oriented", "programming", ".", "-", "*", "*", "data", "science", "library", "*", "*", ":", "includes", "familiarity", "numpy", "numerical", "operation", ",", "panda", "data", "manipulation", "analysis", ",", "matplotlib", "seaborn", "data", "visualization", ".", "-", "*", "*", "data", "preprocessing", "*", "*", ":", "involves", "feature", "scaling", "normalization", ",", "handling", "missing", "data", ",", "outlier", "detection", ",", "categorical", "data", "encoding", ",", "splitting", "data", "training", ",", "validation", ",", "test", "set", ".", "-", "*", "*", "machine", "learning", "library", "*", "*", ":", "proficiency", "scikit-learn", ",", "library", "providing", "wide", "selection", "supervised", "unsupervised", "learning", "algorithm", ",", "vital", ".", "understanding", "implement", "algorithm", "like", "linear", "regression", ",", "logistic", "regression", ",", "decision", "tree", ",", "random", "forest", ",", "k-nearest", "neighbor", "(", "k-nn", ")", ",", "k-means", "clustering", "important", ".", "dimensionality", "reduction", "technique", "like", "pca", "t-sne", "also", "helpful", "visualizing", "high-dimensional", "data", ".", "\ud83d\udcda", "resource", ":", "-", "[", "real", "python", "]", "(", "http", ":", "//realpython.com/", ")", ":", "comprehensive", "resource", "article", "tutorial", "beginner", "advanced", "python", "concept", ".", "-", "[", "freecodecamp", "-", "learn", "python", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=rfscvs0vtbw", ")", ":", "long", "video", "provides", "full", "introduction", "core", "concept", "python", ".", "-", "[", "python", "data", "science", "handbook", "]", "(", "http", ":", "//jakevdp.github.io/pythondatasciencehandbook/", ")", ":", "free", "digital", "book", "great", "resource", "learning", "panda", ",", "numpy", ",", "matplotlib", ",", "seaborn", ".", "-", "[", "freecodecamp", "-", "machine", "learning", "everybody", "]", "(", "http", ":", "//youtu.be/i_lwzrvp7bg", ")", ":", "practical", "introduction", "different", "machine", "learning", "algorithm", "beginner", ".", "-", "[", "udacity", "-", "intro", "machine", "learning", "]", "(", "http", ":", "//www.udacity.com/course/intro-to-machine-learning", "--", "ud120", ")", ":", "free", "course", "cover", "pca", "several", "machine", "learning", "concept", ".", "--", "-" ], [ "2 .", "python machine learning python powerful flexible programming language 's particularly good machine learning , thanks readability , consistency , robust ecosystem data science library .", "- * * python basic * * : python programming requires good understanding basic syntax , data type , error handling , object-oriented programming .", "- * * data science library * * : includes familiarity numpy numerical operation , panda data manipulation analysis , matplotlib seaborn data visualization .", "- * * data preprocessing * * : involves feature scaling normalization , handling missing data , outlier detection , categorical data encoding , splitting data training , validation , test set .", "- * * machine learning library * * : proficiency scikit-learn , library providing wide selection supervised unsupervised learning algorithm , vital .", "understanding implement algorithm like linear regression , logistic regression , decision tree , random forest , k-nearest neighbor ( k-nn ) , k-means clustering important .", "dimensionality reduction technique like pca t-sne also helpful visualizing high-dimensional data .", "\ud83d\udcda resource : - [ real python ] ( http : //realpython.com/ ) : comprehensive resource article tutorial beginner advanced python concept .", "- [ freecodecamp - learn python ] ( http : //www.youtube.com/watch ? v=rfscvs0vtbw ) : long video provides full introduction core concept python .", "- [ python data science handbook ] ( http : //jakevdp.github.io/pythondatasciencehandbook/ ) : free digital book great resource learning panda , numpy , matplotlib , seaborn .", "- [ freecodecamp - machine learning everybody ] ( http : //youtu.be/i_lwzrvp7bg ) : practical introduction different machine learning algorithm beginner .", "- [ udacity - intro machine learning ] ( http : //www.udacity.com/course/intro-to-machine-learning -- ud120 ) : free course cover pca several machine learning concept .", "-- -" ] ], "token": [ [ "2", ".", "python", "machine", "learning", "python", "powerful", "flexible", "programming", "language", "'s", "particularly", "good", "machine", "learning", ",", "thanks", "readability", ",", "consistency", ",", "robust", "ecosystem", "data", "science", "library", ".", "-", "*", "*", "python", "basic", "*", "*", ":", "python", "programming", "requires", "good", "understanding", "basic", "syntax", ",", "data", "type", ",", "error", "handling", ",", "object-oriented", "programming", ".", "-", "*", "*", "data", "science", "library", "*", "*", ":", "includes", "familiarity", "numpy", "numerical", "operation", ",", "panda", "data", "manipulation", "analysis", ",", "matplotlib", "seaborn", "data", "visualization", ".", "-", "*", "*", "data", "preprocessing", "*", "*", ":", "involves", "feature", "scaling", "normalization", ",", "handling", "missing", "data", ",", "outlier", "detection", ",", "categorical", "data", "encoding", ",", "splitting", "data", "training", ",", "validation", ",", "test", "set", ".", "-", "*", "*", "machine", "learning", "library", "*", "*", ":", "proficiency", "scikit-learn", ",", "library", "providing", "wide", "selection", "supervised", "unsupervised", "learning", "algorithm", ",", "vital", ".", "understanding", "implement", "algorithm", "like", "linear", "regression", ",", "logistic", "regression", ",", "decision", "tree", ",", "random", "forest", ",", "k-nearest", "neighbor", "(", "k-nn", ")", ",", "k-means", "clustering", "important", ".", "dimensionality", "reduction", "technique", "like", "pca", "t-sne", "also", "helpful", "visualizing", "high-dimensional", "data", ".", "\ud83d\udcda", "resource", ":", "-", "[", "real", "python", "]", "(", "http", ":", "//realpython.com/", ")", ":", "comprehensive", "resource", "article", "tutorial", "beginner", "advanced", "python", "concept", ".", "-", "[", "freecodecamp", "-", "learn", "python", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=rfscvs0vtbw", ")", ":", "long", "video", "provides", "full", "introduction", "core", "concept", "python", ".", "-", "[", "python", "data", "science", "handbook", "]", "(", "http", ":", "//jakevdp.github.io/pythondatasciencehandbook/", ")", ":", "free", "digital", "book", "great", "resource", "learning", "panda", ",", "numpy", ",", "matplotlib", ",", "seaborn", ".", "-", "[", "freecodecamp", "-", "machine", "learning", "everybody", "]", "(", "http", ":", "//youtu.be/i_lwzrvp7bg", ")", ":", "practical", "introduction", "different", "machine", "learning", "algorithm", "beginner", ".", "-", "[", "udacity", "-", "intro", "machine", "learning", "]", "(", "http", ":", "//www.udacity.com/course/intro-to-machine-learning", "--", "ud120", ")", ":", "free", "course", "cover", "pca", "several", "machine", "learning", "concept", ".", "--", "-" ], [ "2 .", "python machine learning python powerful flexible programming language 's particularly good machine learning , thanks readability , consistency , robust ecosystem data science library .", "- * * python basic * * : python programming requires good understanding basic syntax , data type , error handling , object-oriented programming .", "- * * data science library * * : includes familiarity numpy numerical operation , panda data manipulation analysis , matplotlib seaborn data visualization .", "- * * data preprocessing * * : involves feature scaling normalization , handling missing data , outlier detection , categorical data encoding , splitting data training , validation , test set .", "- * * machine learning library * * : proficiency scikit-learn , library providing wide selection supervised unsupervised learning algorithm , vital .", "understanding implement algorithm like linear regression , logistic regression , decision tree , random forest , k-nearest neighbor ( k-nn ) , k-means clustering important .", "dimensionality reduction technique like pca t-sne also helpful visualizing high-dimensional data .", "\ud83d\udcda resource : - [ real python ] ( http : //realpython.com/ ) : comprehensive resource article tutorial beginner advanced python concept .", "- [ freecodecamp - learn python ] ( http : //www.youtube.com/watch ? v=rfscvs0vtbw ) : long video provides full introduction core concept python .", "- [ python data science handbook ] ( http : //jakevdp.github.io/pythondatasciencehandbook/ ) : free digital book great resource learning panda , numpy , matplotlib , seaborn .", "- [ freecodecamp - machine learning everybody ] ( http : //youtu.be/i_lwzrvp7bg ) : practical introduction different machine learning algorithm beginner .", "- [ udacity - intro machine learning ] ( http : //www.udacity.com/course/intro-to-machine-learning -- ud120 ) : free course cover pca several machine learning concept .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "\ud83e\uddd1\u200d\ud83d\udd2c The LLM Scientist\n\nThis section of the course focuses on learning how to build the best possible LLMs using the latest techniques.\n\n![](img/roadmap_scientist.png)\n\n", "sentence": [ [ "\ud83e\uddd1\u200d\ud83d\udd2c", "llm", "scientist", "section", "course", "focus", "learning", "build", "best", "possible", "llm", "using", "latest", "technique", ".", "!", "[", "]", "(", "img/roadmap_scientist.png", ")" ], [ "\ud83e\uddd1\u200d\ud83d\udd2c llm scientist section course focus learning build best possible llm using latest technique .", "!", "[ ] ( img/roadmap_scientist.png )" ] ], "token": [ [ "\ud83e\uddd1\u200d\ud83d\udd2c", "llm", "scientist", "section", "course", "focus", "learning", "build", "best", "possible", "llm", "using", "latest", "technique", ".", "!", "[", "]", "(", "img/roadmap_scientist.png", ")" ], [ "\ud83e\uddd1\u200d\ud83d\udd2c llm scientist section course focus learning build best possible llm using latest technique .", "!", "[ ] ( img/roadmap_scientist.png )" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "1. The LLM architecture\n\nWhile an in-depth knowledge about the Transformer architecture is not required, it is important to have a good understanding of its inputs (tokens) and outputs (logits). The vanilla attention mechanism is another crucial component to master, as improved versions of it are introduced later on.\n\n* **High-level view**: Revisit the encoder-decoder Transformer architecture, and more specifically the decoder-only GPT architecture, which is used in every modern LLM.\n* **Tokenization**: Understand how to convert raw text data into a format that the model can understand, which involves splitting the text into tokens (usually words or subwords).\n* **Attention mechanisms**: Grasp the theory behind attention mechanisms, including self-attention and scaled dot-product attention, which allows the model to focus on different parts of the input when producing an output.\n* **Text generation**: Learn about the different ways the model can generate output sequences. Common strategies include greedy decoding, beam search, top-k sampling, and nucleus sampling.\n\n\ud83d\udcda **References**:\n- [The Illustrated Transformer](https://jalammar.github.io/illustrated-transformer/) by Jay Alammar: A visual and intuitive explanation of the Transformer model.\n- [The Illustrated GPT-2](https://jalammar.github.io/illustrated-gpt2/) by Jay Alammar: Even more important than the previous article, it is focused on the GPT architecture, which is very similar to Llama's.\n- [LLM Visualization](https://bbycroft.net/llm) by Brendan Bycroft: Incredible 3D visualization of what happens inside of an LLM.\n* [nanoGPT](https://www.youtube.com/watch?v=kCc8FmEb1nY) by Andrej Karpathy: A 2h-long YouTube video to reimplement GPT from scratch (for programmers).\n* [Attention? Attention!](https://lilianweng.github.io/posts/2018-06-24-attention/) by Lilian Weng: Introduce the need for attention in a more formal way.\n* [Decoding Strategies in LLMs](https://mlabonne.github.io/blog/posts/2023-06-07-Decoding_strategies.html): Provide code and a visual introduction to the different decoding strategies to generate text.\n\n---\n", "sentence": [ [ "1", ".", "llm", "architecture", "in-depth", "knowledge", "transformer", "architecture", "required", ",", "important", "good", "understanding", "input", "(", "token", ")", "output", "(", "logits", ")", ".", "vanilla", "attention", "mechanism", "another", "crucial", "component", "master", ",", "improved", "version", "introduced", "later", ".", "*", "*", "*", "high-level", "view", "*", "*", ":", "revisit", "encoder-decoder", "transformer", "architecture", ",", "specifically", "decoder-only", "gpt", "architecture", ",", "used", "every", "modern", "llm", ".", "*", "*", "*", "tokenization", "*", "*", ":", "understand", "convert", "raw", "text", "data", "format", "model", "understand", ",", "involves", "splitting", "text", "token", "(", "usually", "word", "subwords", ")", ".", "*", "*", "*", "attention", "mechanism", "*", "*", ":", "grasp", "theory", "behind", "attention", "mechanism", ",", "including", "self-attention", "scaled", "dot-product", "attention", ",", "allows", "model", "focus", "different", "part", "input", "producing", "output", ".", "*", "*", "*", "text", "generation", "*", "*", ":", "learn", "different", "way", "model", "generate", "output", "sequence", ".", "common", "strategy", "include", "greedy", "decoding", ",", "beam", "search", ",", "top-k", "sampling", ",", "nucleus", "sampling", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "-", "[", "illustrated", "transformer", "]", "(", "http", ":", "//jalammar.github.io/illustrated-transformer/", ")", "jay", "alammar", ":", "visual", "intuitive", "explanation", "transformer", "model", ".", "-", "[", "illustrated", "gpt-2", "]", "(", "http", ":", "//jalammar.github.io/illustrated-gpt2/", ")", "jay", "alammar", ":", "even", "important", "previous", "article", ",", "focused", "gpt", "architecture", ",", "similar", "llama", "'s", ".", "-", "[", "llm", "visualization", "]", "(", "http", ":", "//bbycroft.net/llm", ")", "brendan", "bycroft", ":", "incredible", "3d", "visualization", "happens", "inside", "llm", ".", "*", "[", "nanogpt", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=kcc8fmeb1ny", ")", "andrej", "karpathy", ":", "2h-long", "youtube", "video", "reimplement", "gpt", "scratch", "(", "programmer", ")", ".", "*", "[", "attention", "?", "attention", "!", "]", "(", "http", ":", "//lilianweng.github.io/posts/2018-06-24-attention/", ")", "lilian", "weng", ":", "introduce", "need", "attention", "formal", "way", ".", "*", "[", "decoding", "strategy", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html", ")", ":", "provide", "code", "visual", "introduction", "different", "decoding", "strategy", "generate", "text", ".", "--", "-" ], [ "1 .", "llm architecture in-depth knowledge transformer architecture required , important good understanding input ( token ) output ( logits ) .", "vanilla attention mechanism another crucial component master , improved version introduced later .", "* * * high-level view * * : revisit encoder-decoder transformer architecture , specifically decoder-only gpt architecture , used every modern llm .", "* * * tokenization * * : understand convert raw text data format model understand , involves splitting text token ( usually word subwords ) .", "* * * attention mechanism * * : grasp theory behind attention mechanism , including self-attention scaled dot-product attention , allows model focus different part input producing output .", "* * * text generation * * : learn different way model generate output sequence .", "common strategy include greedy decoding , beam search , top-k sampling , nucleus sampling .", "\ud83d\udcda * * reference * * : - [ illustrated transformer ] ( http : //jalammar.github.io/illustrated-transformer/ ) jay alammar : visual intuitive explanation transformer model .", "- [ illustrated gpt-2 ] ( http : //jalammar.github.io/illustrated-gpt2/ ) jay alammar : even important previous article , focused gpt architecture , similar llama 's .", "- [ llm visualization ] ( http : //bbycroft.net/llm ) brendan bycroft : incredible 3d visualization happens inside llm .", "* [ nanogpt ] ( http : //www.youtube.com/watch ? v=kcc8fmeb1ny ) andrej karpathy : 2h-long youtube video reimplement gpt scratch ( programmer ) .", "* [ attention ?", "attention !", "] ( http : //lilianweng.github.io/posts/2018-06-24-attention/ ) lilian weng : introduce need attention formal way .", "* [ decoding strategy llm ] ( http : //mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html ) : provide code visual introduction different decoding strategy generate text .", "-- -" ] ], "token": [ [ "1", ".", "llm", "architecture", "in-depth", "knowledge", "transformer", "architecture", "required", ",", "important", "good", "understanding", "input", "(", "token", ")", "output", "(", "logits", ")", ".", "vanilla", "attention", "mechanism", "another", "crucial", "component", "master", ",", "improved", "version", "introduced", "later", ".", "*", "*", "*", "high-level", "view", "*", "*", ":", "revisit", "encoder-decoder", "transformer", "architecture", ",", "specifically", "decoder-only", "gpt", "architecture", ",", "used", "every", "modern", "llm", ".", "*", "*", "*", "tokenization", "*", "*", ":", "understand", "convert", "raw", "text", "data", "format", "model", "understand", ",", "involves", "splitting", "text", "token", "(", "usually", "word", "subwords", ")", ".", "*", "*", "*", "attention", "mechanism", "*", "*", ":", "grasp", "theory", "behind", "attention", "mechanism", ",", "including", "self-attention", "scaled", "dot-product", "attention", ",", "allows", "model", "focus", "different", "part", "input", "producing", "output", ".", "*", "*", "*", "text", "generation", "*", "*", ":", "learn", "different", "way", "model", "generate", "output", "sequence", ".", "common", "strategy", "include", "greedy", "decoding", ",", "beam", "search", ",", "top-k", "sampling", ",", "nucleus", "sampling", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "-", "[", "illustrated", "transformer", "]", "(", "http", ":", "//jalammar.github.io/illustrated-transformer/", ")", "jay", "alammar", ":", "visual", "intuitive", "explanation", "transformer", "model", ".", "-", "[", "illustrated", "gpt-2", "]", "(", "http", ":", "//jalammar.github.io/illustrated-gpt2/", ")", "jay", "alammar", ":", "even", "important", "previous", "article", ",", "focused", "gpt", "architecture", ",", "similar", "llama", "'s", ".", "-", "[", "llm", "visualization", "]", "(", "http", ":", "//bbycroft.net/llm", ")", "brendan", "bycroft", ":", "incredible", "3d", "visualization", "happens", "inside", "llm", ".", "*", "[", "nanogpt", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=kcc8fmeb1ny", ")", "andrej", "karpathy", ":", "2h-long", "youtube", "video", "reimplement", "gpt", "scratch", "(", "programmer", ")", ".", "*", "[", "attention", "?", "attention", "!", "]", "(", "http", ":", "//lilianweng.github.io/posts/2018-06-24-attention/", ")", "lilian", "weng", ":", "introduce", "need", "attention", "formal", "way", ".", "*", "[", "decoding", "strategy", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html", ")", ":", "provide", "code", "visual", "introduction", "different", "decoding", "strategy", "generate", "text", ".", "--", "-" ], [ "1 .", "llm architecture in-depth knowledge transformer architecture required , important good understanding input ( token ) output ( logits ) .", "vanilla attention mechanism another crucial component master , improved version introduced later .", "* * * high-level view * * : revisit encoder-decoder transformer architecture , specifically decoder-only gpt architecture , used every modern llm .", "* * * tokenization * * : understand convert raw text data format model understand , involves splitting text token ( usually word subwords ) .", "* * * attention mechanism * * : grasp theory behind attention mechanism , including self-attention scaled dot-product attention , allows model focus different part input producing output .", "* * * text generation * * : learn different way model generate output sequence .", "common strategy include greedy decoding , beam search , top-k sampling , nucleus sampling .", "\ud83d\udcda * * reference * * : - [ illustrated transformer ] ( http : //jalammar.github.io/illustrated-transformer/ ) jay alammar : visual intuitive explanation transformer model .", "- [ illustrated gpt-2 ] ( http : //jalammar.github.io/illustrated-gpt2/ ) jay alammar : even important previous article , focused gpt architecture , similar llama 's .", "- [ llm visualization ] ( http : //bbycroft.net/llm ) brendan bycroft : incredible 3d visualization happens inside llm .", "* [ nanogpt ] ( http : //www.youtube.com/watch ? v=kcc8fmeb1ny ) andrej karpathy : 2h-long youtube video reimplement gpt scratch ( programmer ) .", "* [ attention ?", "attention !", "] ( http : //lilianweng.github.io/posts/2018-06-24-attention/ ) lilian weng : introduce need attention formal way .", "* [ decoding strategy llm ] ( http : //mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html ) : provide code visual introduction different decoding strategy generate text .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "2. Building an instruction dataset\n\nWhile it's easy to find raw data from Wikipedia and other websites, it's difficult to collect pairs of instructions and answers in the wild. Like in traditional machine learning, the quality of the dataset will directly influence the quality of the model, which is why it might be the most important component in the fine-tuning process.\n\n* **[Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html)-like dataset**: Generate synthetic data from scratch with the OpenAI API (GPT). You can specify seeds and system prompts to create a diverse dataset.\n* **Advanced techniques**: Learn how to improve existing datasets with [Evol-Instruct](https://arxiv.org/abs/2304.12244), how to generate high-quality synthetic data like in the [Orca](https://arxiv.org/abs/2306.02707) and [phi-1](https://arxiv.org/abs/2306.11644) papers.\n* **Filtering data**: Traditional techniques involving regex, removing near-duplicates, focusing on answers with a high number of tokens, etc.\n* **Prompt templates**: There's no true standard way of formatting instructions and answers, which is why it's important to know about the different chat templates, such as [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt?tabs=python&pivots=programming-language-chat-ml), [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html), etc.\n\n\ud83d\udcda **References**:\n* [Preparing a Dataset for Instruction tuning](https://wandb.ai/capecape/alpaca_ft/reports/How-to-Fine-Tune-an-LLM-Part-1-Preparing-a-Dataset-for-Instruction-Tuning--Vmlldzo1NTcxNzE2) by Thomas Capelle: Exploration of the Alpaca and Alpaca-GPT4 datasets and how to format them.\n* [Generating a Clinical Instruction Dataset](https://medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae) by Solano Todeschini: Tutorial on how to create a synthetic instruction dataset using GPT-4. \n* [GPT 3.5 for news classification](https://medium.com/@kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f) by Kshitiz Sahay: Use GPT 3.5 to create an instruction dataset to fine-tune Llama 2 for news classification.\n* [Dataset creation for fine-tuning LLM](https://colab.research.google.com/drive/1GH8PW9-zAe4cXEZyOIE-T9uHXblIldAg?usp=sharing): Notebook that contains a few techniques to filter a dataset and upload the result.\n* [Chat Template](https://huggingface.co/blog/chat-templates) by Matthew Carrigan: Hugging Face's page about prompt templates\n\n---\n", "sentence": [ [ "2", ".", "building", "instruction", "dataset", "'s", "easy", "find", "raw", "data", "wikipedia", "website", ",", "'s", "difficult", "collect", "pair", "instruction", "answer", "wild", ".", "like", "traditional", "machine", "learning", ",", "quality", "dataset", "directly", "influence", "quality", "model", ",", "might", "important", "component", "fine-tuning", "process", ".", "*", "*", "*", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", "-like", "dataset", "*", "*", ":", "generate", "synthetic", "data", "scratch", "openai", "api", "(", "gpt", ")", ".", "specify", "seed", "system", "prompt", "create", "diverse", "dataset", ".", "*", "*", "*", "advanced", "technique", "*", "*", ":", "learn", "improve", "existing", "datasets", "[", "evol-instruct", "]", "(", "http", ":", "//arxiv.org/abs/2304.12244", ")", ",", "generate", "high-quality", "synthetic", "data", "like", "[", "orca", "]", "(", "http", ":", "//arxiv.org/abs/2306.02707", ")", "[", "phi-1", "]", "(", "http", ":", "//arxiv.org/abs/2306.11644", ")", "paper", ".", "*", "*", "*", "filtering", "data", "*", "*", ":", "traditional", "technique", "involving", "regex", ",", "removing", "near-duplicates", ",", "focusing", "answer", "high", "number", "token", ",", "etc", ".", "*", "*", "*", "prompt", "template", "*", "*", ":", "'s", "true", "standard", "way", "formatting", "instruction", "answer", ",", "'s", "important", "know", "different", "chat", "template", ",", "[", "chatml", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt", "?", "tabs=python", "&", "pivots=programming-language-chat-ml", ")", ",", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", ",", "etc", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "preparing", "dataset", "instruction", "tuning", "]", "(", "http", ":", "//wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning", "--", "vmlldzo1ntcxnze2", ")", "thomas", "capelle", ":", "exploration", "alpaca", "alpaca-gpt4", "datasets", "format", ".", "*", "[", "generating", "clinical", "instruction", "dataset", "]", "(", "http", ":", "//medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae", ")", "solano", "todeschini", ":", "tutorial", "create", "synthetic", "instruction", "dataset", "using", "gpt-4", ".", "*", "[", "gpt", "3.5", "news", "classification", "]", "(", "http", ":", "//medium.com/", "@", "kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f", ")", "kshitiz", "sahay", ":", "use", "gpt", "3.5", "create", "instruction", "dataset", "fine-tune", "llama", "2", "news", "classification", ".", "*", "[", "dataset", "creation", "fine-tuning", "llm", "]", "(", "http", ":", "//colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag", "?", "usp=sharing", ")", ":", "notebook", "contains", "technique", "filter", "dataset", "upload", "result", ".", "*", "[", "chat", "template", "]", "(", "http", ":", "//huggingface.co/blog/chat-templates", ")", "matthew", "carrigan", ":", "hugging", "face", "'s", "page", "prompt", "template", "--", "-" ], [ "2 .", "building instruction dataset 's easy find raw data wikipedia website , 's difficult collect pair instruction answer wild .", "like traditional machine learning , quality dataset directly influence quality model , might important component fine-tuning process .", "* * * [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) -like dataset * * : generate synthetic data scratch openai api ( gpt ) .", "specify seed system prompt create diverse dataset .", "* * * advanced technique * * : learn improve existing datasets [ evol-instruct ] ( http : //arxiv.org/abs/2304.12244 ) , generate high-quality synthetic data like [ orca ] ( http : //arxiv.org/abs/2306.02707 ) [ phi-1 ] ( http : //arxiv.org/abs/2306.11644 ) paper .", "* * * filtering data * * : traditional technique involving regex , removing near-duplicates , focusing answer high number token , etc .", "* * * prompt template * * : 's true standard way formatting instruction answer , 's important know different chat template , [ chatml ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt ? tabs=python & pivots=programming-language-chat-ml ) , [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) , etc .", "\ud83d\udcda * * reference * * : * [ preparing dataset instruction tuning ] ( http : //wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning -- vmlldzo1ntcxnze2 ) thomas capelle : exploration alpaca alpaca-gpt4 datasets format .", "* [ generating clinical instruction dataset ] ( http : //medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae ) solano todeschini : tutorial create synthetic instruction dataset using gpt-4 .", "* [ gpt 3.5 news classification ] ( http : //medium.com/ @ kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f ) kshitiz sahay : use gpt 3.5 create instruction dataset fine-tune llama 2 news classification .", "* [ dataset creation fine-tuning llm ] ( http : //colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag ? usp=sharing ) : notebook contains technique filter dataset upload result .", "* [ chat template ] ( http : //huggingface.co/blog/chat-templates ) matthew carrigan : hugging face 's page prompt template -- -" ] ], "token": [ [ "2", ".", "building", "instruction", "dataset", "'s", "easy", "find", "raw", "data", "wikipedia", "website", ",", "'s", "difficult", "collect", "pair", "instruction", "answer", "wild", ".", "like", "traditional", "machine", "learning", ",", "quality", "dataset", "directly", "influence", "quality", "model", ",", "might", "important", "component", "fine-tuning", "process", ".", "*", "*", "*", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", "-like", "dataset", "*", "*", ":", "generate", "synthetic", "data", "scratch", "openai", "api", "(", "gpt", ")", ".", "specify", "seed", "system", "prompt", "create", "diverse", "dataset", ".", "*", "*", "*", "advanced", "technique", "*", "*", ":", "learn", "improve", "existing", "datasets", "[", "evol-instruct", "]", "(", "http", ":", "//arxiv.org/abs/2304.12244", ")", ",", "generate", "high-quality", "synthetic", "data", "like", "[", "orca", "]", "(", "http", ":", "//arxiv.org/abs/2306.02707", ")", "[", "phi-1", "]", "(", "http", ":", "//arxiv.org/abs/2306.11644", ")", "paper", ".", "*", "*", "*", "filtering", "data", "*", "*", ":", "traditional", "technique", "involving", "regex", ",", "removing", "near-duplicates", ",", "focusing", "answer", "high", "number", "token", ",", "etc", ".", "*", "*", "*", "prompt", "template", "*", "*", ":", "'s", "true", "standard", "way", "formatting", "instruction", "answer", ",", "'s", "important", "know", "different", "chat", "template", ",", "[", "chatml", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt", "?", "tabs=python", "&", "pivots=programming-language-chat-ml", ")", ",", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", ",", "etc", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "preparing", "dataset", "instruction", "tuning", "]", "(", "http", ":", "//wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning", "--", "vmlldzo1ntcxnze2", ")", "thomas", "capelle", ":", "exploration", "alpaca", "alpaca-gpt4", "datasets", "format", ".", "*", "[", "generating", "clinical", "instruction", "dataset", "]", "(", "http", ":", "//medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae", ")", "solano", "todeschini", ":", "tutorial", "create", "synthetic", "instruction", "dataset", "using", "gpt-4", ".", "*", "[", "gpt", "3.5", "news", "classification", "]", "(", "http", ":", "//medium.com/", "@", "kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f", ")", "kshitiz", "sahay", ":", "use", "gpt", "3.5", "create", "instruction", "dataset", "fine-tune", "llama", "2", "news", "classification", ".", "*", "[", "dataset", "creation", "fine-tuning", "llm", "]", "(", "http", ":", "//colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag", "?", "usp=sharing", ")", ":", "notebook", "contains", "technique", "filter", "dataset", "upload", "result", ".", "*", "[", "chat", "template", "]", "(", "http", ":", "//huggingface.co/blog/chat-templates", ")", "matthew", "carrigan", ":", "hugging", "face", "'s", "page", "prompt", "template", "--", "-" ], [ "2 .", "building instruction dataset 's easy find raw data wikipedia website , 's difficult collect pair instruction answer wild .", "like traditional machine learning , quality dataset directly influence quality model , might important component fine-tuning process .", "* * * [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) -like dataset * * : generate synthetic data scratch openai api ( gpt ) .", "specify seed system prompt create diverse dataset .", "* * * advanced technique * * : learn improve existing datasets [ evol-instruct ] ( http : //arxiv.org/abs/2304.12244 ) , generate high-quality synthetic data like [ orca ] ( http : //arxiv.org/abs/2306.02707 ) [ phi-1 ] ( http : //arxiv.org/abs/2306.11644 ) paper .", "* * * filtering data * * : traditional technique involving regex , removing near-duplicates , focusing answer high number token , etc .", "* * * prompt template * * : 's true standard way formatting instruction answer , 's important know different chat template , [ chatml ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt ? tabs=python & pivots=programming-language-chat-ml ) , [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) , etc .", "\ud83d\udcda * * reference * * : * [ preparing dataset instruction tuning ] ( http : //wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning -- vmlldzo1ntcxnze2 ) thomas capelle : exploration alpaca alpaca-gpt4 datasets format .", "* [ generating clinical instruction dataset ] ( http : //medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae ) solano todeschini : tutorial create synthetic instruction dataset using gpt-4 .", "* [ gpt 3.5 news classification ] ( http : //medium.com/ @ kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f ) kshitiz sahay : use gpt 3.5 create instruction dataset fine-tune llama 2 news classification .", "* [ dataset creation fine-tuning llm ] ( http : //colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag ? usp=sharing ) : notebook contains technique filter dataset upload result .", "* [ chat template ] ( http : //huggingface.co/blog/chat-templates ) matthew carrigan : hugging face 's page prompt template -- -" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "3. Pre-training models\n\nPre-training is a very long and costly process, which is why this is not the focus of this course. It's good to have some level of understanding of what happens during pre-training, but hands-on experience is not required.\n\n* **Data pipeline**: Pre-training requires huge datasets (e.g., [Llama 2](https://arxiv.org/abs/2307.09288) was trained on 2 trillion tokens) that need to be filtered, tokenized, and collated with a pre-defined vocabulary.\n* **Causal language modeling**: Learn the difference between causal and masked language modeling, as well as the loss function used in this case. For efficient pre-training, learn more about [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) or [gpt-neox](https://github.com/EleutherAI/gpt-neox).\n* **Scaling laws**: The [scaling laws](https://arxiv.org/pdf/2001.08361.pdf) describe the expected model performance based on the model size, dataset size, and the amount of compute used for training.\n* **High-Performance Computing**: Out of scope here, but more knowledge about HPC is fundamental if you're planning to create your own LLM from scratch (hardware, distributed workload, etc.).\n\n\ud83d\udcda **References**:\n* [LLMDataHub](https://github.com/Zjh-819/LLMDataHub) by Junhao Zhao: Curated list of datasets for pre-training, fine-tuning, and RLHF.\n* [Training a causal language model from scratch](https://huggingface.co/learn/nlp-course/chapter7/6?fw=pt) by Hugging Face: Pre-train a GPT-2 model from scratch using the transformers library.\n* [TinyLlama](https://github.com/jzhang38/TinyLlama) by Zhang et al.: Check this project to get a good understanding of how a Llama model is trained from scratch.\n* [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) by Hugging Face: Explain the difference between causal and masked language modeling and how to quickly fine-tune a DistilGPT-2 model.\n* [Chinchilla's wild implications](https://www.lesswrong.com/posts/6Fpvch8RR29qLEWNH/chinchilla-s-wild-implications) by nostalgebraist: Discuss the scaling laws and explain what they mean to LLMs in general.\n* [BLOOM](https://bigscience.notion.site/BLOOM-BigScience-176B-Model-ad073ca07cdf479398d5f95d88e218c4) by BigScience: Notion page that describes how the BLOOM model was built, with a lot of useful information about the engineering part and the problems that were encountered.\n* [OPT-175 Logbook](https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/chronicles/OPT175B_Logbook.pdf) by Meta: Research logs showing what went wrong and what went right. Useful if you're planning to pre-train a very large language model (in this case, 175B parameters).\n* [LLM 360](https://www.llm360.ai/): A framework for open-source LLMs with training and data preparation code, data, metrics, and models.\n\n---\n", "sentence": [ [ "3", ".", "pre-training", "model", "pre-training", "long", "costly", "process", ",", "focus", "course", ".", "'s", "good", "level", "understanding", "happens", "pre-training", ",", "hands-on", "experience", "required", ".", "*", "*", "*", "data", "pipeline", "*", "*", ":", "pre-training", "requires", "huge", "datasets", "(", "e.g.", ",", "[", "llama", "2", "]", "(", "http", ":", "//arxiv.org/abs/2307.09288", ")", "trained", "2", "trillion", "token", ")", "need", "filtered", ",", "tokenized", ",", "collated", "pre-defined", "vocabulary", ".", "*", "*", "*", "causal", "language", "modeling", "*", "*", ":", "learn", "difference", "causal", "masked", "language", "modeling", ",", "well", "loss", "function", "used", "case", ".", "efficient", "pre-training", ",", "learn", "[", "megatron-lm", "]", "(", "http", ":", "//github.com/nvidia/megatron-lm", ")", "[", "gpt-neox", "]", "(", "http", ":", "//github.com/eleutherai/gpt-neox", ")", ".", "*", "*", "*", "scaling", "law", "*", "*", ":", "[", "scaling", "law", "]", "(", "http", ":", "//arxiv.org/pdf/2001.08361.pdf", ")", "describe", "expected", "model", "performance", "based", "model", "size", ",", "dataset", "size", ",", "amount", "compute", "used", "training", ".", "*", "*", "*", "high-performance", "computing", "*", "*", ":", "scope", ",", "knowledge", "hpc", "fundamental", "'re", "planning", "create", "llm", "scratch", "(", "hardware", ",", "distributed", "workload", ",", "etc", ".", ")", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "llmdatahub", "]", "(", "http", ":", "//github.com/zjh-819/llmdatahub", ")", "junhao", "zhao", ":", "curated", "list", "datasets", "pre-training", ",", "fine-tuning", ",", "rlhf", ".", "*", "[", "training", "causal", "language", "model", "scratch", "]", "(", "http", ":", "//huggingface.co/learn/nlp-course/chapter7/6", "?", "fw=pt", ")", "hugging", "face", ":", "pre-train", "gpt-2", "model", "scratch", "using", "transformer", "library", ".", "*", "[", "tinyllama", "]", "(", "http", ":", "//github.com/jzhang38/tinyllama", ")", "zhang", "et", "al", ".", ":", "check", "project", "get", "good", "understanding", "llama", "model", "trained", "scratch", ".", "*", "[", "causal", "language", "modeling", "]", "(", "http", ":", "//huggingface.co/docs/transformers/tasks/language_modeling", ")", "hugging", "face", ":", "explain", "difference", "causal", "masked", "language", "modeling", "quickly", "fine-tune", "distilgpt-2", "model", ".", "*", "[", "chinchilla", "'s", "wild", "implication", "]", "(", "http", ":", "//www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications", ")", "nostalgebraist", ":", "discus", "scaling", "law", "explain", "mean", "llm", "general", ".", "*", "[", "bloom", "]", "(", "http", ":", "//bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4", ")", "bigscience", ":", "notion", "page", "describes", "bloom", "model", "built", ",", "lot", "useful", "information", "engineering", "part", "problem", "encountered", ".", "*", "[", "opt-175", "logbook", "]", "(", "http", ":", "//github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf", ")", "meta", ":", "research", "log", "showing", "went", "wrong", "went", "right", ".", "useful", "'re", "planning", "pre-train", "large", "language", "model", "(", "case", ",", "175b", "parameter", ")", ".", "*", "[", "llm", "360", "]", "(", "http", ":", "//www.llm360.ai/", ")", ":", "framework", "open-source", "llm", "training", "data", "preparation", "code", ",", "data", ",", "metric", ",", "model", ".", "--", "-" ], [ "3 .", "pre-training model pre-training long costly process , focus course .", "'s good level understanding happens pre-training , hands-on experience required .", "* * * data pipeline * * : pre-training requires huge datasets ( e.g. , [ llama 2 ] ( http : //arxiv.org/abs/2307.09288 ) trained 2 trillion token ) need filtered , tokenized , collated pre-defined vocabulary .", "* * * causal language modeling * * : learn difference causal masked language modeling , well loss function used case .", "efficient pre-training , learn [ megatron-lm ] ( http : //github.com/nvidia/megatron-lm ) [ gpt-neox ] ( http : //github.com/eleutherai/gpt-neox ) .", "* * * scaling law * * : [ scaling law ] ( http : //arxiv.org/pdf/2001.08361.pdf ) describe expected model performance based model size , dataset size , amount compute used training .", "* * * high-performance computing * * : scope , knowledge hpc fundamental 're planning create llm scratch ( hardware , distributed workload , etc . ) .", "\ud83d\udcda * * reference * * : * [ llmdatahub ] ( http : //github.com/zjh-819/llmdatahub ) junhao zhao : curated list datasets pre-training , fine-tuning , rlhf .", "* [ training causal language model scratch ] ( http : //huggingface.co/learn/nlp-course/chapter7/6 ? fw=pt ) hugging face : pre-train gpt-2 model scratch using transformer library .", "* [ tinyllama ] ( http : //github.com/jzhang38/tinyllama ) zhang et al .", ": check project get good understanding llama model trained scratch .", "* [ causal language modeling ] ( http : //huggingface.co/docs/transformers/tasks/language_modeling ) hugging face : explain difference causal masked language modeling quickly fine-tune distilgpt-2 model .", "* [ chinchilla 's wild implication ] ( http : //www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications ) nostalgebraist : discus scaling law explain mean llm general .", "* [ bloom ] ( http : //bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4 ) bigscience : notion page describes bloom model built , lot useful information engineering part problem encountered .", "* [ opt-175 logbook ] ( http : //github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf ) meta : research log showing went wrong went right .", "useful 're planning pre-train large language model ( case , 175b parameter ) .", "* [ llm 360 ] ( http : //www.llm360.ai/ ) : framework open-source llm training data preparation code , data , metric , model .", "-- -" ] ], "token": [ [ "3", ".", "pre-training", "model", "pre-training", "long", "costly", "process", ",", "focus", "course", ".", "'s", "good", "level", "understanding", "happens", "pre-training", ",", "hands-on", "experience", "required", ".", "*", "*", "*", "data", "pipeline", "*", "*", ":", "pre-training", "requires", "huge", "datasets", "(", "e.g.", ",", "[", "llama", "2", "]", "(", "http", ":", "//arxiv.org/abs/2307.09288", ")", "trained", "2", "trillion", "token", ")", "need", "filtered", ",", "tokenized", ",", "collated", "pre-defined", "vocabulary", ".", "*", "*", "*", "causal", "language", "modeling", "*", "*", ":", "learn", "difference", "causal", "masked", "language", "modeling", ",", "well", "loss", "function", "used", "case", ".", "efficient", "pre-training", ",", "learn", "[", "megatron-lm", "]", "(", "http", ":", "//github.com/nvidia/megatron-lm", ")", "[", "gpt-neox", "]", "(", "http", ":", "//github.com/eleutherai/gpt-neox", ")", ".", "*", "*", "*", "scaling", "law", "*", "*", ":", "[", "scaling", "law", "]", "(", "http", ":", "//arxiv.org/pdf/2001.08361.pdf", ")", "describe", "expected", "model", "performance", "based", "model", "size", ",", "dataset", "size", ",", "amount", "compute", "used", "training", ".", "*", "*", "*", "high-performance", "computing", "*", "*", ":", "scope", ",", "knowledge", "hpc", "fundamental", "'re", "planning", "create", "llm", "scratch", "(", "hardware", ",", "distributed", "workload", ",", "etc", ".", ")", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "llmdatahub", "]", "(", "http", ":", "//github.com/zjh-819/llmdatahub", ")", "junhao", "zhao", ":", "curated", "list", "datasets", "pre-training", ",", "fine-tuning", ",", "rlhf", ".", "*", "[", "training", "causal", "language", "model", "scratch", "]", "(", "http", ":", "//huggingface.co/learn/nlp-course/chapter7/6", "?", "fw=pt", ")", "hugging", "face", ":", "pre-train", "gpt-2", "model", "scratch", "using", "transformer", "library", ".", "*", "[", "tinyllama", "]", "(", "http", ":", "//github.com/jzhang38/tinyllama", ")", "zhang", "et", "al", ".", ":", "check", "project", "get", "good", "understanding", "llama", "model", "trained", "scratch", ".", "*", "[", "causal", "language", "modeling", "]", "(", "http", ":", "//huggingface.co/docs/transformers/tasks/language_modeling", ")", "hugging", "face", ":", "explain", "difference", "causal", "masked", "language", "modeling", "quickly", "fine-tune", "distilgpt-2", "model", ".", "*", "[", "chinchilla", "'s", "wild", "implication", "]", "(", "http", ":", "//www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications", ")", "nostalgebraist", ":", "discus", "scaling", "law", "explain", "mean", "llm", "general", ".", "*", "[", "bloom", "]", "(", "http", ":", "//bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4", ")", "bigscience", ":", "notion", "page", "describes", "bloom", "model", "built", ",", "lot", "useful", "information", "engineering", "part", "problem", "encountered", ".", "*", "[", "opt-175", "logbook", "]", "(", "http", ":", "//github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf", ")", "meta", ":", "research", "log", "showing", "went", "wrong", "went", "right", ".", "useful", "'re", "planning", "pre-train", "large", "language", "model", "(", "case", ",", "175b", "parameter", ")", ".", "*", "[", "llm", "360", "]", "(", "http", ":", "//www.llm360.ai/", ")", ":", "framework", "open-source", "llm", "training", "data", "preparation", "code", ",", "data", ",", "metric", ",", "model", ".", "--", "-" ], [ "3 .", "pre-training model pre-training long costly process , focus course .", "'s good level understanding happens pre-training , hands-on experience required .", "* * * data pipeline * * : pre-training requires huge datasets ( e.g. , [ llama 2 ] ( http : //arxiv.org/abs/2307.09288 ) trained 2 trillion token ) need filtered , tokenized , collated pre-defined vocabulary .", "* * * causal language modeling * * : learn difference causal masked language modeling , well loss function used case .", "efficient pre-training , learn [ megatron-lm ] ( http : //github.com/nvidia/megatron-lm ) [ gpt-neox ] ( http : //github.com/eleutherai/gpt-neox ) .", "* * * scaling law * * : [ scaling law ] ( http : //arxiv.org/pdf/2001.08361.pdf ) describe expected model performance based model size , dataset size , amount compute used training .", "* * * high-performance computing * * : scope , knowledge hpc fundamental 're planning create llm scratch ( hardware , distributed workload , etc . ) .", "\ud83d\udcda * * reference * * : * [ llmdatahub ] ( http : //github.com/zjh-819/llmdatahub ) junhao zhao : curated list datasets pre-training , fine-tuning , rlhf .", "* [ training causal language model scratch ] ( http : //huggingface.co/learn/nlp-course/chapter7/6 ? fw=pt ) hugging face : pre-train gpt-2 model scratch using transformer library .", "* [ tinyllama ] ( http : //github.com/jzhang38/tinyllama ) zhang et al .", ": check project get good understanding llama model trained scratch .", "* [ causal language modeling ] ( http : //huggingface.co/docs/transformers/tasks/language_modeling ) hugging face : explain difference causal masked language modeling quickly fine-tune distilgpt-2 model .", "* [ chinchilla 's wild implication ] ( http : //www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications ) nostalgebraist : discus scaling law explain mean llm general .", "* [ bloom ] ( http : //bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4 ) bigscience : notion page describes bloom model built , lot useful information engineering part problem encountered .", "* [ opt-175 logbook ] ( http : //github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf ) meta : research log showing went wrong went right .", "useful 're planning pre-train large language model ( case , 175b parameter ) .", "* [ llm 360 ] ( http : //www.llm360.ai/ ) : framework open-source llm training data preparation code , data , metric , model .", "-- -" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "4. Supervised Fine-Tuning\n\nPre-trained models are only trained on a next-token prediction task, which is why they're not helpful assistants. SFT allows you to tweak them to respond to instructions. Moreover, it allows you to fine-tune your model on any data (private, not seen by GPT-4, etc.) and use it without having to pay for an API like OpenAI's.\n\n* **Full fine-tuning**: Full fine-tuning refers to training all the parameters in the model. It is not an efficient technique, but it produces slightly better results.\n* [**LoRA**](https://arxiv.org/abs/2106.09685): A parameter-efficient technique (PEFT) based on low-rank adapters. Instead of training all the parameters, we only train these adapters.\n* [**QLoRA**](https://arxiv.org/abs/2305.14314): Another PEFT based on LoRA, which also quantizes the weights of the model in 4 bits and introduce paged optimizers to manage memory spikes. Combine it with [Unsloth](https://github.com/unslothai/unsloth) to run it efficiently on a free Colab notebook.\n* **[Axolotl](https://github.com/OpenAccess-AI-Collective/axolotl)**: A user-friendly and powerful fine-tuning tool that is used in a lot of state-of-the-art open-source models.\n* [**DeepSpeed**](https://www.deepspeed.ai/): Efficient pre-training and fine-tuning of LLMs for multi-GPU and multi-node settings (implemented in Axolotl).\n\n\ud83d\udcda **References**:\n* [The Novice's LLM Training Guide](https://rentry.org/llm-training) by Alpin: Overview of the main concepts and parameters to consider when fine-tuning LLMs.\n* [LoRA insights](https://lightning.ai/pages/community/lora-insights/) by Sebastian Raschka: Practical insights about LoRA and how to select the best parameters.\n* [Fine-Tune Your Own Llama 2 Model](https://mlabonne.github.io/blog/posts/Fine_Tune_Your_Own_Llama_2_Model_in_a_Colab_Notebook.html): Hands-on tutorial on how to fine-tune a Llama 2 model using Hugging Face libraries.\n* [Padding Large Language Models](https://towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff) by Benjamin Marie: Best practices to pad training examples for causal LLMs\n* [A Beginner's Guide to LLM Fine-Tuning](https://mlabonne.github.io/blog/posts/A_Beginners_Guide_to_LLM_Finetuning.html): Tutorial on how to fine-tune a CodeLlama model using Axolotl.\n\n---\n", "sentence": [ [ "4", ".", "supervised", "fine-tuning", "pre-trained", "model", "trained", "next-token", "prediction", "task", ",", "'re", "helpful", "assistant", ".", "sft", "allows", "tweak", "respond", "instruction", ".", "moreover", ",", "allows", "fine-tune", "model", "data", "(", "private", ",", "seen", "gpt-4", ",", "etc", ".", ")", "use", "without", "pay", "api", "like", "openai", "'s", ".", "*", "*", "*", "full", "fine-tuning", "*", "*", ":", "full", "fine-tuning", "refers", "training", "parameter", "model", ".", "efficient", "technique", ",", "produce", "slightly", "better", "result", ".", "*", "[", "*", "*", "lora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ":", "parameter-efficient", "technique", "(", "peft", ")", "based", "low-rank", "adapter", ".", "instead", "training", "parameter", ",", "train", "adapter", ".", "*", "[", "*", "*", "qlora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ":", "another", "peft", "based", "lora", ",", "also", "quantizes", "weight", "model", "4", "bit", "introduce", "paged", "optimizers", "manage", "memory", "spike", ".", "combine", "[", "unsloth", "]", "(", "http", ":", "//github.com/unslothai/unsloth", ")", "run", "efficiently", "free", "colab", "notebook", ".", "*", "*", "*", "[", "axolotl", "]", "(", "http", ":", "//github.com/openaccess-ai-collective/axolotl", ")", "*", "*", ":", "user-friendly", "powerful", "fine-tuning", "tool", "used", "lot", "state-of-the-art", "open-source", "model", ".", "*", "[", "*", "*", "deepspeed", "*", "*", "]", "(", "http", ":", "//www.deepspeed.ai/", ")", ":", "efficient", "pre-training", "fine-tuning", "llm", "multi-gpu", "multi-node", "setting", "(", "implemented", "axolotl", ")", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "novice", "'s", "llm", "training", "guide", "]", "(", "http", ":", "//rentry.org/llm-training", ")", "alpin", ":", "overview", "main", "concept", "parameter", "consider", "fine-tuning", "llm", ".", "*", "[", "lora", "insight", "]", "(", "http", ":", "//lightning.ai/pages/community/lora-insights/", ")", "sebastian", "raschka", ":", "practical", "insight", "lora", "select", "best", "parameter", ".", "*", "[", "fine-tune", "llama", "2", "model", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html", ")", ":", "hands-on", "tutorial", "fine-tune", "llama", "2", "model", "using", "hugging", "face", "library", ".", "*", "[", "padding", "large", "language", "model", "]", "(", "http", ":", "//towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff", ")", "benjamin", "marie", ":", "best", "practice", "pad", "training", "example", "causal", "llm", "*", "[", "beginner", "'s", "guide", "llm", "fine-tuning", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html", ")", ":", "tutorial", "fine-tune", "codellama", "model", "using", "axolotl", ".", "--", "-" ], [ "4 .", "supervised fine-tuning pre-trained model trained next-token prediction task , 're helpful assistant .", "sft allows tweak respond instruction .", "moreover , allows fine-tune model data ( private , seen gpt-4 , etc . )", "use without pay api like openai 's .", "* * * full fine-tuning * * : full fine-tuning refers training parameter model .", "efficient technique , produce slightly better result .", "* [ * * lora * * ] ( http : //arxiv.org/abs/2106.09685 ) : parameter-efficient technique ( peft ) based low-rank adapter .", "instead training parameter , train adapter .", "* [ * * qlora * * ] ( http : //arxiv.org/abs/2305.14314 ) : another peft based lora , also quantizes weight model 4 bit introduce paged optimizers manage memory spike .", "combine [ unsloth ] ( http : //github.com/unslothai/unsloth ) run efficiently free colab notebook .", "* * * [ axolotl ] ( http : //github.com/openaccess-ai-collective/axolotl ) * * : user-friendly powerful fine-tuning tool used lot state-of-the-art open-source model .", "* [ * * deepspeed * * ] ( http : //www.deepspeed.ai/ ) : efficient pre-training fine-tuning llm multi-gpu multi-node setting ( implemented axolotl ) .", "\ud83d\udcda * * reference * * : * [ novice 's llm training guide ] ( http : //rentry.org/llm-training ) alpin : overview main concept parameter consider fine-tuning llm .", "* [ lora insight ] ( http : //lightning.ai/pages/community/lora-insights/ ) sebastian raschka : practical insight lora select best parameter .", "* [ fine-tune llama 2 model ] ( http : //mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html ) : hands-on tutorial fine-tune llama 2 model using hugging face library .", "* [ padding large language model ] ( http : //towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff ) benjamin marie : best practice pad training example causal llm * [ beginner 's guide llm fine-tuning ] ( http : //mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html ) : tutorial fine-tune codellama model using axolotl .", "-- -" ] ], "token": [ [ "4", ".", "supervised", "fine-tuning", "pre-trained", "model", "trained", "next-token", "prediction", "task", ",", "'re", "helpful", "assistant", ".", "sft", "allows", "tweak", "respond", "instruction", ".", "moreover", ",", "allows", "fine-tune", "model", "data", "(", "private", ",", "seen", "gpt-4", ",", "etc", ".", ")", "use", "without", "pay", "api", "like", "openai", "'s", ".", "*", "*", "*", "full", "fine-tuning", "*", "*", ":", "full", "fine-tuning", "refers", "training", "parameter", "model", ".", "efficient", "technique", ",", "produce", "slightly", "better", "result", ".", "*", "[", "*", "*", "lora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ":", "parameter-efficient", "technique", "(", "peft", ")", "based", "low-rank", "adapter", ".", "instead", "training", "parameter", ",", "train", "adapter", ".", "*", "[", "*", "*", "qlora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ":", "another", "peft", "based", "lora", ",", "also", "quantizes", "weight", "model", "4", "bit", "introduce", "paged", "optimizers", "manage", "memory", "spike", ".", "combine", "[", "unsloth", "]", "(", "http", ":", "//github.com/unslothai/unsloth", ")", "run", "efficiently", "free", "colab", "notebook", ".", "*", "*", "*", "[", "axolotl", "]", "(", "http", ":", "//github.com/openaccess-ai-collective/axolotl", ")", "*", "*", ":", "user-friendly", "powerful", "fine-tuning", "tool", "used", "lot", "state-of-the-art", "open-source", "model", ".", "*", "[", "*", "*", "deepspeed", "*", "*", "]", "(", "http", ":", "//www.deepspeed.ai/", ")", ":", "efficient", "pre-training", "fine-tuning", "llm", "multi-gpu", "multi-node", "setting", "(", "implemented", "axolotl", ")", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "novice", "'s", "llm", "training", "guide", "]", "(", "http", ":", "//rentry.org/llm-training", ")", "alpin", ":", "overview", "main", "concept", "parameter", "consider", "fine-tuning", "llm", ".", "*", "[", "lora", "insight", "]", "(", "http", ":", "//lightning.ai/pages/community/lora-insights/", ")", "sebastian", "raschka", ":", "practical", "insight", "lora", "select", "best", "parameter", ".", "*", "[", "fine-tune", "llama", "2", "model", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html", ")", ":", "hands-on", "tutorial", "fine-tune", "llama", "2", "model", "using", "hugging", "face", "library", ".", "*", "[", "padding", "large", "language", "model", "]", "(", "http", ":", "//towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff", ")", "benjamin", "marie", ":", "best", "practice", "pad", "training", "example", "causal", "llm", "*", "[", "beginner", "'s", "guide", "llm", "fine-tuning", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html", ")", ":", "tutorial", "fine-tune", "codellama", "model", "using", "axolotl", ".", "--", "-" ], [ "4 .", "supervised fine-tuning pre-trained model trained next-token prediction task , 're helpful assistant .", "sft allows tweak respond instruction .", "moreover , allows fine-tune model data ( private , seen gpt-4 , etc . )", "use without pay api like openai 's .", "* * * full fine-tuning * * : full fine-tuning refers training parameter model .", "efficient technique , produce slightly better result .", "* [ * * lora * * ] ( http : //arxiv.org/abs/2106.09685 ) : parameter-efficient technique ( peft ) based low-rank adapter .", "instead training parameter , train adapter .", "* [ * * qlora * * ] ( http : //arxiv.org/abs/2305.14314 ) : another peft based lora , also quantizes weight model 4 bit introduce paged optimizers manage memory spike .", "combine [ unsloth ] ( http : //github.com/unslothai/unsloth ) run efficiently free colab notebook .", "* * * [ axolotl ] ( http : //github.com/openaccess-ai-collective/axolotl ) * * : user-friendly powerful fine-tuning tool used lot state-of-the-art open-source model .", "* [ * * deepspeed * * ] ( http : //www.deepspeed.ai/ ) : efficient pre-training fine-tuning llm multi-gpu multi-node setting ( implemented axolotl ) .", "\ud83d\udcda * * reference * * : * [ novice 's llm training guide ] ( http : //rentry.org/llm-training ) alpin : overview main concept parameter consider fine-tuning llm .", "* [ lora insight ] ( http : //lightning.ai/pages/community/lora-insights/ ) sebastian raschka : practical insight lora select best parameter .", "* [ fine-tune llama 2 model ] ( http : //mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html ) : hands-on tutorial fine-tune llama 2 model using hugging face library .", "* [ padding large language model ] ( http : //towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff ) benjamin marie : best practice pad training example causal llm * [ beginner 's guide llm fine-tuning ] ( http : //mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html ) : tutorial fine-tune codellama model using axolotl .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "6. Evaluation\n\nEvaluating LLMs is an undervalued part of the pipeline, which is time-consuming and moderately reliable. Your downstream task should dictate what you want to evaluate, but always remember Goodhart's law: \"When a measure becomes a target, it ceases to be a good measure.\"\n\n* **Traditional metrics**: Metrics like perplexity and BLEU score are not as popular as they were because they're flawed in most contexts. It is still important to understand them and when they can be applied.\n* **General benchmarks**: Based on the [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) is the main benchmark for general-purpose LLMs (like ChatGPT). There are other popular benchmarks like [BigBench](https://github.com/google/BIG-bench), [MT-Bench](https://arxiv.org/abs/2306.05685), etc.\n* **Task-specific benchmarks**: Tasks like summarization, translation, and question answering have dedicated benchmarks, metrics, and even subdomains (medical, financial, etc.), such as [PubMedQA](https://pubmedqa.github.io/) for biomedical question answering.\n* **Human evaluation**: The most reliable evaluation is the acceptance rate by users or comparisons made by humans. If you want to know if a model performs well, the simplest but surest way is to use it yourself.\n\n\ud83d\udcda **References**:\n* [Perplexity of fixed-length models](https://huggingface.co/docs/transformers/perplexity) by Hugging Face: Overview of perplexity with code to implement it with the transformers library.\n* [BLEU at your own risk](https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213) by Rachael Tatman: Overview of the BLEU score and its many issues with examples.\n* [A Survey on Evaluation of LLMs](https://arxiv.org/abs/2307.03109) by Chang et al.: Comprehensive paper about what to evaluate, where to evaluate, and how to evaluate.\n* [Chatbot Arena Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) by lmsys: Elo rating of general-purpose LLMs, based on comparisons made by humans.\n\n---\n", "sentence": [ [ "6", ".", "evaluation", "evaluating", "llm", "undervalued", "part", "pipeline", ",", "time-consuming", "moderately", "reliable", ".", "downstream", "task", "dictate", "want", "evaluate", ",", "always", "remember", "goodhart", "'s", "law", ":", "``", "measure", "becomes", "target", ",", "cease", "good", "measure", ".", "''", "*", "*", "*", "traditional", "metric", "*", "*", ":", "metric", "like", "perplexity", "bleu", "score", "popular", "'re", "flawed", "context", ".", "still", "important", "understand", "applied", ".", "*", "*", "*", "general", "benchmark", "*", "*", ":", "based", "[", "language", "model", "evaluation", "harness", "]", "(", "http", ":", "//github.com/eleutherai/lm-evaluation-harness", ")", ",", "[", "open", "llm", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard", ")", "main", "benchmark", "general-purpose", "llm", "(", "like", "chatgpt", ")", ".", "popular", "benchmark", "like", "[", "bigbench", "]", "(", "http", ":", "//github.com/google/big-bench", ")", ",", "[", "mt-bench", "]", "(", "http", ":", "//arxiv.org/abs/2306.05685", ")", ",", "etc", ".", "*", "*", "*", "task-specific", "benchmark", "*", "*", ":", "task", "like", "summarization", ",", "translation", ",", "question", "answering", "dedicated", "benchmark", ",", "metric", ",", "even", "subdomains", "(", "medical", ",", "financial", ",", "etc", ".", ")", ",", "[", "pubmedqa", "]", "(", "http", ":", "//pubmedqa.github.io/", ")", "biomedical", "question", "answering", ".", "*", "*", "*", "human", "evaluation", "*", "*", ":", "reliable", "evaluation", "acceptance", "rate", "user", "comparison", "made", "human", ".", "want", "know", "model", "performs", "well", ",", "simplest", "surest", "way", "use", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "perplexity", "fixed-length", "model", "]", "(", "http", ":", "//huggingface.co/docs/transformers/perplexity", ")", "hugging", "face", ":", "overview", "perplexity", "code", "implement", "transformer", "library", ".", "*", "[", "bleu", "risk", "]", "(", "http", ":", "//towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ")", "rachael", "tatman", ":", "overview", "bleu", "score", "many", "issue", "example", ".", "*", "[", "survey", "evaluation", "llm", "]", "(", "http", ":", "//arxiv.org/abs/2307.03109", ")", "chang", "et", "al", ".", ":", "comprehensive", "paper", "evaluate", ",", "evaluate", ",", "evaluate", ".", "*", "[", "chatbot", "arena", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/lmsys/chatbot-arena-leaderboard", ")", "lmsys", ":", "elo", "rating", "general-purpose", "llm", ",", "based", "comparison", "made", "human", ".", "--", "-" ], [ "6 .", "evaluation evaluating llm undervalued part pipeline , time-consuming moderately reliable .", "downstream task dictate want evaluate , always remember goodhart 's law : `` measure becomes target , cease good measure . ''", "* * * traditional metric * * : metric like perplexity bleu score popular 're flawed context .", "still important understand applied .", "* * * general benchmark * * : based [ language model evaluation harness ] ( http : //github.com/eleutherai/lm-evaluation-harness ) , [ open llm leaderboard ] ( http : //huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard ) main benchmark general-purpose llm ( like chatgpt ) .", "popular benchmark like [ bigbench ] ( http : //github.com/google/big-bench ) , [ mt-bench ] ( http : //arxiv.org/abs/2306.05685 ) , etc .", "* * * task-specific benchmark * * : task like summarization , translation , question answering dedicated benchmark , metric , even subdomains ( medical , financial , etc .", ") , [ pubmedqa ] ( http : //pubmedqa.github.io/ ) biomedical question answering .", "* * * human evaluation * * : reliable evaluation acceptance rate user comparison made human .", "want know model performs well , simplest surest way use .", "\ud83d\udcda * * reference * * : * [ perplexity fixed-length model ] ( http : //huggingface.co/docs/transformers/perplexity ) hugging face : overview perplexity code implement transformer library .", "* [ bleu risk ] ( http : //towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213 ) rachael tatman : overview bleu score many issue example .", "* [ survey evaluation llm ] ( http : //arxiv.org/abs/2307.03109 ) chang et al .", ": comprehensive paper evaluate , evaluate , evaluate .", "* [ chatbot arena leaderboard ] ( http : //huggingface.co/spaces/lmsys/chatbot-arena-leaderboard ) lmsys : elo rating general-purpose llm , based comparison made human .", "-- -" ] ], "token": [ [ "6", ".", "evaluation", "evaluating", "llm", "undervalued", "part", "pipeline", ",", "time-consuming", "moderately", "reliable", ".", "downstream", "task", "dictate", "want", "evaluate", ",", "always", "remember", "goodhart", "'s", "law", ":", "``", "measure", "becomes", "target", ",", "cease", "good", "measure", ".", "''", "*", "*", "*", "traditional", "metric", "*", "*", ":", "metric", "like", "perplexity", "bleu", "score", "popular", "'re", "flawed", "context", ".", "still", "important", "understand", "applied", ".", "*", "*", "*", "general", "benchmark", "*", "*", ":", "based", "[", "language", "model", "evaluation", "harness", "]", "(", "http", ":", "//github.com/eleutherai/lm-evaluation-harness", ")", ",", "[", "open", "llm", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard", ")", "main", "benchmark", "general-purpose", "llm", "(", "like", "chatgpt", ")", ".", "popular", "benchmark", "like", "[", "bigbench", "]", "(", "http", ":", "//github.com/google/big-bench", ")", ",", "[", "mt-bench", "]", "(", "http", ":", "//arxiv.org/abs/2306.05685", ")", ",", "etc", ".", "*", "*", "*", "task-specific", "benchmark", "*", "*", ":", "task", "like", "summarization", ",", "translation", ",", "question", "answering", "dedicated", "benchmark", ",", "metric", ",", "even", "subdomains", "(", "medical", ",", "financial", ",", "etc", ".", ")", ",", "[", "pubmedqa", "]", "(", "http", ":", "//pubmedqa.github.io/", ")", "biomedical", "question", "answering", ".", "*", "*", "*", "human", "evaluation", "*", "*", ":", "reliable", "evaluation", "acceptance", "rate", "user", "comparison", "made", "human", ".", "want", "know", "model", "performs", "well", ",", "simplest", "surest", "way", "use", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "perplexity", "fixed-length", "model", "]", "(", "http", ":", "//huggingface.co/docs/transformers/perplexity", ")", "hugging", "face", ":", "overview", "perplexity", "code", "implement", "transformer", "library", ".", "*", "[", "bleu", "risk", "]", "(", "http", ":", "//towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ")", "rachael", "tatman", ":", "overview", "bleu", "score", "many", "issue", "example", ".", "*", "[", "survey", "evaluation", "llm", "]", "(", "http", ":", "//arxiv.org/abs/2307.03109", ")", "chang", "et", "al", ".", ":", "comprehensive", "paper", "evaluate", ",", "evaluate", ",", "evaluate", ".", "*", "[", "chatbot", "arena", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/lmsys/chatbot-arena-leaderboard", ")", "lmsys", ":", "elo", "rating", "general-purpose", "llm", ",", "based", "comparison", "made", "human", ".", "--", "-" ], [ "6 .", "evaluation evaluating llm undervalued part pipeline , time-consuming moderately reliable .", "downstream task dictate want evaluate , always remember goodhart 's law : `` measure becomes target , cease good measure . ''", "* * * traditional metric * * : metric like perplexity bleu score popular 're flawed context .", "still important understand applied .", "* * * general benchmark * * : based [ language model evaluation harness ] ( http : //github.com/eleutherai/lm-evaluation-harness ) , [ open llm leaderboard ] ( http : //huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard ) main benchmark general-purpose llm ( like chatgpt ) .", "popular benchmark like [ bigbench ] ( http : //github.com/google/big-bench ) , [ mt-bench ] ( http : //arxiv.org/abs/2306.05685 ) , etc .", "* * * task-specific benchmark * * : task like summarization , translation , question answering dedicated benchmark , metric , even subdomains ( medical , financial , etc .", ") , [ pubmedqa ] ( http : //pubmedqa.github.io/ ) biomedical question answering .", "* * * human evaluation * * : reliable evaluation acceptance rate user comparison made human .", "want know model performs well , simplest surest way use .", "\ud83d\udcda * * reference * * : * [ perplexity fixed-length model ] ( http : //huggingface.co/docs/transformers/perplexity ) hugging face : overview perplexity code implement transformer library .", "* [ bleu risk ] ( http : //towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213 ) rachael tatman : overview bleu score many issue example .", "* [ survey evaluation llm ] ( http : //arxiv.org/abs/2307.03109 ) chang et al .", ": comprehensive paper evaluate , evaluate , evaluate .", "* [ chatbot arena leaderboard ] ( http : //huggingface.co/spaces/lmsys/chatbot-arena-leaderboard ) lmsys : elo rating general-purpose llm , based comparison made human .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "7. Quantization\n\nQuantization is the process of converting the weights (and activations) of a model using a lower precision. For example, weights stored using 16 bits can be converted into a 4-bit representation. This technique has become increasingly important to reduce the computational and memory costs associated with LLMs.\n\n* **Base techniques**: Learn the different levels of precision (FP32, FP16, INT8, etc.) and how to perform na\u00efve quantization with absmax and zero-point techniques.\n* **GGUF and llama.cpp**: Originally designed to run on CPUs, [llama.cpp](https://github.com/ggerganov/llama.cpp) and the GGUF format have become the most popular tools to run LLMs on consumer-grade hardware.\n* **GPTQ and EXL2**: [GPTQ](https://arxiv.org/abs/2210.17323) and, more specifically, the [EXL2](https://github.com/turboderp/exllamav2) format offer an incredible speed but can only run on GPUs. Models also take a long time to be quantized.\n* **AWQ**: This new format is more accurate than GPTQ (lower perplexity) but uses a lot more VRAM and is not necessarily faster.\n\n\ud83d\udcda **References**:\n* [Introduction to quantization](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html): Overview of quantization, absmax and zero-point quantization, and LLM.int8() with code.\n* [Quantize Llama models with llama.cpp](https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html): Tutorial on how to quantize a Llama 2 model using llama.cpp and the GGUF format.\n* [4-bit LLM Quantization with GPTQ](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html): Tutorial on how to quantize an LLM using the GPTQ algorithm with AutoGPTQ.\n* [ExLlamaV2: The Fastest Library to Run LLMs](https://mlabonne.github.io/blog/posts/ExLlamaV2_The_Fastest_Library_to_Run%C2%A0LLMs.html): Guide on how to quantize a Mistral model using the EXL2 format and run it with the ExLlamaV2 library.\n* [Understanding Activation-Aware Weight Quantization](https://medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8) by FriendliAI: Overview of the AWQ technique and its benefits.\n\n---\n", "sentence": [ [ "7", ".", "quantization", "quantization", "process", "converting", "weight", "(", "activation", ")", "model", "using", "lower", "precision", ".", "example", ",", "weight", "stored", "using", "16", "bit", "converted", "4-bit", "representation", ".", "technique", "become", "increasingly", "important", "reduce", "computational", "memory", "cost", "associated", "llm", ".", "*", "*", "*", "base", "technique", "*", "*", ":", "learn", "different", "level", "precision", "(", "fp32", ",", "fp16", ",", "int8", ",", "etc", ".", ")", "perform", "na\u00efve", "quantization", "absmax", "zero-point", "technique", ".", "*", "*", "*", "gguf", "llama.cpp", "*", "*", ":", "originally", "designed", "run", "cpu", ",", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "gguf", "format", "become", "popular", "tool", "run", "llm", "consumer-grade", "hardware", ".", "*", "*", "*", "gptq", "exl2", "*", "*", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "specifically", ",", "[", "exl2", "]", "(", "http", ":", "//github.com/turboderp/exllamav2", ")", "format", "offer", "incredible", "speed", "run", "gpus", ".", "model", "also", "take", "long", "time", "quantized", ".", "*", "*", "*", "awq", "*", "*", ":", "new", "format", "accurate", "gptq", "(", "lower", "perplexity", ")", "us", "lot", "vram", "necessarily", "faster", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "introduction", "quantization", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "overview", "quantization", ",", "absmax", "zero-point", "quantization", ",", "llm.int8", "(", ")", "code", ".", "*", "[", "quantize", "llama", "model", "llama.cpp", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html", ")", ":", "tutorial", "quantize", "llama", "2", "model", "using", "llama.cpp", "gguf", "format", ".", "*", "[", "4-bit", "llm", "quantization", "gptq", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "tutorial", "quantize", "llm", "using", "gptq", "algorithm", "autogptq", ".", "*", "[", "exllamav2", ":", "fastest", "library", "run", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run", "%", "c2", "%", "a0llms.html", ")", ":", "guide", "quantize", "mistral", "model", "using", "exl2", "format", "run", "exllamav2", "library", ".", "*", "[", "understanding", "activation-aware", "weight", "quantization", "]", "(", "http", ":", "//medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8", ")", "friendliai", ":", "overview", "awq", "technique", "benefit", ".", "--", "-" ], [ "7 .", "quantization quantization process converting weight ( activation ) model using lower precision .", "example , weight stored using 16 bit converted 4-bit representation .", "technique become increasingly important reduce computational memory cost associated llm .", "* * * base technique * * : learn different level precision ( fp32 , fp16 , int8 , etc . )", "perform na\u00efve quantization absmax zero-point technique .", "* * * gguf llama.cpp * * : originally designed run cpu , [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) gguf format become popular tool run llm consumer-grade hardware .", "* * * gptq exl2 * * : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , specifically , [ exl2 ] ( http : //github.com/turboderp/exllamav2 ) format offer incredible speed run gpus .", "model also take long time quantized .", "* * * awq * * : new format accurate gptq ( lower perplexity ) us lot vram necessarily faster .", "\ud83d\udcda * * reference * * : * [ introduction quantization ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : overview quantization , absmax zero-point quantization , llm.int8 ( ) code .", "* [ quantize llama model llama.cpp ] ( http : //mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html ) : tutorial quantize llama 2 model using llama.cpp gguf format .", "* [ 4-bit llm quantization gptq ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : tutorial quantize llm using gptq algorithm autogptq .", "* [ exllamav2 : fastest library run llm ] ( http : //mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run % c2 % a0llms.html ) : guide quantize mistral model using exl2 format run exllamav2 library .", "* [ understanding activation-aware weight quantization ] ( http : //medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8 ) friendliai : overview awq technique benefit .", "-- -" ] ], "token": [ [ "7", ".", "quantization", "quantization", "process", "converting", "weight", "(", "activation", ")", "model", "using", "lower", "precision", ".", "example", ",", "weight", "stored", "using", "16", "bit", "converted", "4-bit", "representation", ".", "technique", "become", "increasingly", "important", "reduce", "computational", "memory", "cost", "associated", "llm", ".", "*", "*", "*", "base", "technique", "*", "*", ":", "learn", "different", "level", "precision", "(", "fp32", ",", "fp16", ",", "int8", ",", "etc", ".", ")", "perform", "na\u00efve", "quantization", "absmax", "zero-point", "technique", ".", "*", "*", "*", "gguf", "llama.cpp", "*", "*", ":", "originally", "designed", "run", "cpu", ",", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "gguf", "format", "become", "popular", "tool", "run", "llm", "consumer-grade", "hardware", ".", "*", "*", "*", "gptq", "exl2", "*", "*", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "specifically", ",", "[", "exl2", "]", "(", "http", ":", "//github.com/turboderp/exllamav2", ")", "format", "offer", "incredible", "speed", "run", "gpus", ".", "model", "also", "take", "long", "time", "quantized", ".", "*", "*", "*", "awq", "*", "*", ":", "new", "format", "accurate", "gptq", "(", "lower", "perplexity", ")", "us", "lot", "vram", "necessarily", "faster", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "introduction", "quantization", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "overview", "quantization", ",", "absmax", "zero-point", "quantization", ",", "llm.int8", "(", ")", "code", ".", "*", "[", "quantize", "llama", "model", "llama.cpp", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html", ")", ":", "tutorial", "quantize", "llama", "2", "model", "using", "llama.cpp", "gguf", "format", ".", "*", "[", "4-bit", "llm", "quantization", "gptq", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "tutorial", "quantize", "llm", "using", "gptq", "algorithm", "autogptq", ".", "*", "[", "exllamav2", ":", "fastest", "library", "run", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run", "%", "c2", "%", "a0llms.html", ")", ":", "guide", "quantize", "mistral", "model", "using", "exl2", "format", "run", "exllamav2", "library", ".", "*", "[", "understanding", "activation-aware", "weight", "quantization", "]", "(", "http", ":", "//medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8", ")", "friendliai", ":", "overview", "awq", "technique", "benefit", ".", "--", "-" ], [ "7 .", "quantization quantization process converting weight ( activation ) model using lower precision .", "example , weight stored using 16 bit converted 4-bit representation .", "technique become increasingly important reduce computational memory cost associated llm .", "* * * base technique * * : learn different level precision ( fp32 , fp16 , int8 , etc . )", "perform na\u00efve quantization absmax zero-point technique .", "* * * gguf llama.cpp * * : originally designed run cpu , [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) gguf format become popular tool run llm consumer-grade hardware .", "* * * gptq exl2 * * : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , specifically , [ exl2 ] ( http : //github.com/turboderp/exllamav2 ) format offer incredible speed run gpus .", "model also take long time quantized .", "* * * awq * * : new format accurate gptq ( lower perplexity ) us lot vram necessarily faster .", "\ud83d\udcda * * reference * * : * [ introduction quantization ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : overview quantization , absmax zero-point quantization , llm.int8 ( ) code .", "* [ quantize llama model llama.cpp ] ( http : //mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html ) : tutorial quantize llama 2 model using llama.cpp gguf format .", "* [ 4-bit llm quantization gptq ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : tutorial quantize llm using gptq algorithm autogptq .", "* [ exllamav2 : fastest library run llm ] ( http : //mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run % c2 % a0llms.html ) : guide quantize mistral model using exl2 format run exllamav2 library .", "* [ understanding activation-aware weight quantization ] ( http : //medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8 ) friendliai : overview awq technique benefit .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "\ud83d\udc77 The LLM Engineer\n\nThis section of the course focuses on learning how to build LLM-powered applications that can be used in production, with a focus on augmenting models and deploying them.\n\n![](img/roadmap_engineer.png)\n\n\n", "sentence": [ [ "\ud83d\udc77", "llm", "engineer", "section", "course", "focus", "learning", "build", "llm-powered", "application", "used", "production", ",", "focus", "augmenting", "model", "deploying", ".", "!", "[", "]", "(", "img/roadmap_engineer.png", ")" ], [ "\ud83d\udc77 llm engineer section course focus learning build llm-powered application used production , focus augmenting model deploying .", "!", "[ ] ( img/roadmap_engineer.png )" ] ], "token": [ [ "\ud83d\udc77", "llm", "engineer", "section", "course", "focus", "learning", "build", "llm-powered", "application", "used", "production", ",", "focus", "augmenting", "model", "deploying", ".", "!", "[", "]", "(", "img/roadmap_engineer.png", ")" ], [ "\ud83d\udc77 llm engineer section course focus learning build llm-powered application used production , focus augmenting model deploying .", "!", "[ ] ( img/roadmap_engineer.png )" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "1. Running LLMs\n\nRunning LLMs can be difficult due to high hardware requirements. Depending on your use case, you might want to simply consume a model through an API (like GPT-4) or run it locally. In any case, additional prompting and guidance techniques can improve and constrain the output for your applications.\n\n* **LLM APIs**: APIs are a convenient way to deploy LLMs. This space is divided between private LLMs ([OpenAI](https://platform.openai.com/), [Google](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview), [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api), [Cohere](https://docs.cohere.com/docs), etc.) and open-source LLMs ([OpenRouter](https://openrouter.ai/), [Hugging Face](https://huggingface.co/inference-api), [Together AI](https://www.together.ai/), etc.).\n* **Open-source LLMs**: The [Hugging Face Hub](https://huggingface.co/models) is a great place to find LLMs. You can directly run some of them in [Hugging Face Spaces](https://huggingface.co/spaces), or download and run them locally in apps like [LM Studio](https://lmstudio.ai/) or through the CLI with [llama.cpp](https://github.com/ggerganov/llama.cpp) or [Ollama](https://ollama.ai/).\n* **Prompt engineering**: Common techniques include zero-shot prompting, few-shot prompting, chain of thought, and ReAct. They work better with bigger models, but can be adapted to smaller ones.\n* **Structuring outputs**: Many tasks require a structured output, like a strict template or a JSON format. Libraries like [LMQL](https://lmql.ai/), [Outlines](https://github.com/outlines-dev/outlines), [Guidance](https://github.com/guidance-ai/guidance), etc. can be used to guide the generation and respect a given structure.\n\n\ud83d\udcda **References**:\n* [Run an LLM locally with LM Studio](https://www.kdnuggets.com/run-an-llm-locally-with-lm-studio) by Nisha Arya: Short guide on how to use LM Studio.\n* [Prompt engineering guide](https://www.promptingguide.ai/) by DAIR.AI: Exhaustive list of prompt techniques with examples\n* [Outlines - Quickstart](https://outlines-dev.github.io/outlines/quickstart/): List of guided generation techniques enabled by Outlines. \n* [LMQL - Overview](https://lmql.ai/docs/language/overview.html): Introduction to the LMQL language.\n\n---\n", "sentence": [ [ "1", ".", "running", "llm", "running", "llm", "difficult", "due", "high", "hardware", "requirement", ".", "depending", "use", "case", ",", "might", "want", "simply", "consume", "model", "api", "(", "like", "gpt-4", ")", "run", "locally", ".", "case", ",", "additional", "prompting", "guidance", "technique", "improve", "constrain", "output", "application", ".", "*", "*", "*", "llm", "apis", "*", "*", ":", "apis", "convenient", "way", "deploy", "llm", ".", "space", "divided", "private", "llm", "(", "[", "openai", "]", "(", "http", ":", "//platform.openai.com/", ")", ",", "[", "google", "]", "(", "http", ":", "//cloud.google.com/vertex-ai/docs/generative-ai/learn/overview", ")", ",", "[", "anthropic", "]", "(", "http", ":", "//docs.anthropic.com/claude/reference/getting-started-with-the-api", ")", ",", "[", "cohere", "]", "(", "http", ":", "//docs.cohere.com/docs", ")", ",", "etc", ".", ")", "open-source", "llm", "(", "[", "openrouter", "]", "(", "http", ":", "//openrouter.ai/", ")", ",", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/inference-api", ")", ",", "[", "together", "ai", "]", "(", "http", ":", "//www.together.ai/", ")", ",", "etc", ".", ")", ".", "*", "*", "*", "open-source", "llm", "*", "*", ":", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/models", ")", "great", "place", "find", "llm", ".", "directly", "run", "[", "hugging", "face", "space", "]", "(", "http", ":", "//huggingface.co/spaces", ")", ",", "download", "run", "locally", "apps", "like", "[", "lm", "studio", "]", "(", "http", ":", "//lmstudio.ai/", ")", "cli", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "[", "ollama", "]", "(", "http", ":", "//ollama.ai/", ")", ".", "*", "*", "*", "prompt", "engineering", "*", "*", ":", "common", "technique", "include", "zero-shot", "prompting", ",", "few-shot", "prompting", ",", "chain", "thought", ",", "react", ".", "work", "better", "bigger", "model", ",", "adapted", "smaller", "one", ".", "*", "*", "*", "structuring", "output", "*", "*", ":", "many", "task", "require", "structured", "output", ",", "like", "strict", "template", "json", "format", ".", "library", "like", "[", "lmql", "]", "(", "http", ":", "//lmql.ai/", ")", ",", "[", "outline", "]", "(", "http", ":", "//github.com/outlines-dev/outlines", ")", ",", "[", "guidance", "]", "(", "http", ":", "//github.com/guidance-ai/guidance", ")", ",", "etc", ".", "used", "guide", "generation", "respect", "given", "structure", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "run", "llm", "locally", "lm", "studio", "]", "(", "http", ":", "//www.kdnuggets.com/run-an-llm-locally-with-lm-studio", ")", "nisha", "arya", ":", "short", "guide", "use", "lm", "studio", ".", "*", "[", "prompt", "engineering", "guide", "]", "(", "http", ":", "//www.promptingguide.ai/", ")", "dair.ai", ":", "exhaustive", "list", "prompt", "technique", "example", "*", "[", "outline", "-", "quickstart", "]", "(", "http", ":", "//outlines-dev.github.io/outlines/quickstart/", ")", ":", "list", "guided", "generation", "technique", "enabled", "outline", ".", "*", "[", "lmql", "-", "overview", "]", "(", "http", ":", "//lmql.ai/docs/language/overview.html", ")", ":", "introduction", "lmql", "language", ".", "--", "-" ], [ "1 .", "running llm running llm difficult due high hardware requirement .", "depending use case , might want simply consume model api ( like gpt-4 ) run locally .", "case , additional prompting guidance technique improve constrain output application .", "* * * llm apis * * : apis convenient way deploy llm .", "space divided private llm ( [ openai ] ( http : //platform.openai.com/ ) , [ google ] ( http : //cloud.google.com/vertex-ai/docs/generative-ai/learn/overview ) , [ anthropic ] ( http : //docs.anthropic.com/claude/reference/getting-started-with-the-api ) , [ cohere ] ( http : //docs.cohere.com/docs ) , etc . )", "open-source llm ( [ openrouter ] ( http : //openrouter.ai/ ) , [ hugging face ] ( http : //huggingface.co/inference-api ) , [ together ai ] ( http : //www.together.ai/ ) , etc . ) .", "* * * open-source llm * * : [ hugging face hub ] ( http : //huggingface.co/models ) great place find llm .", "directly run [ hugging face space ] ( http : //huggingface.co/spaces ) , download run locally apps like [ lm studio ] ( http : //lmstudio.ai/ ) cli [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) [ ollama ] ( http : //ollama.ai/ ) .", "* * * prompt engineering * * : common technique include zero-shot prompting , few-shot prompting , chain thought , react .", "work better bigger model , adapted smaller one .", "* * * structuring output * * : many task require structured output , like strict template json format .", "library like [ lmql ] ( http : //lmql.ai/ ) , [ outline ] ( http : //github.com/outlines-dev/outlines ) , [ guidance ] ( http : //github.com/guidance-ai/guidance ) , etc .", "used guide generation respect given structure .", "\ud83d\udcda * * reference * * : * [ run llm locally lm studio ] ( http : //www.kdnuggets.com/run-an-llm-locally-with-lm-studio ) nisha arya : short guide use lm studio .", "* [ prompt engineering guide ] ( http : //www.promptingguide.ai/ ) dair.ai : exhaustive list prompt technique example * [ outline - quickstart ] ( http : //outlines-dev.github.io/outlines/quickstart/ ) : list guided generation technique enabled outline .", "* [ lmql - overview ] ( http : //lmql.ai/docs/language/overview.html ) : introduction lmql language .", "-- -" ] ], "token": [ [ "1", ".", "running", "llm", "running", "llm", "difficult", "due", "high", "hardware", "requirement", ".", "depending", "use", "case", ",", "might", "want", "simply", "consume", "model", "api", "(", "like", "gpt-4", ")", "run", "locally", ".", "case", ",", "additional", "prompting", "guidance", "technique", "improve", "constrain", "output", "application", ".", "*", "*", "*", "llm", "apis", "*", "*", ":", "apis", "convenient", "way", "deploy", "llm", ".", "space", "divided", "private", "llm", "(", "[", "openai", "]", "(", "http", ":", "//platform.openai.com/", ")", ",", "[", "google", "]", "(", "http", ":", "//cloud.google.com/vertex-ai/docs/generative-ai/learn/overview", ")", ",", "[", "anthropic", "]", "(", "http", ":", "//docs.anthropic.com/claude/reference/getting-started-with-the-api", ")", ",", "[", "cohere", "]", "(", "http", ":", "//docs.cohere.com/docs", ")", ",", "etc", ".", ")", "open-source", "llm", "(", "[", "openrouter", "]", "(", "http", ":", "//openrouter.ai/", ")", ",", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/inference-api", ")", ",", "[", "together", "ai", "]", "(", "http", ":", "//www.together.ai/", ")", ",", "etc", ".", ")", ".", "*", "*", "*", "open-source", "llm", "*", "*", ":", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/models", ")", "great", "place", "find", "llm", ".", "directly", "run", "[", "hugging", "face", "space", "]", "(", "http", ":", "//huggingface.co/spaces", ")", ",", "download", "run", "locally", "apps", "like", "[", "lm", "studio", "]", "(", "http", ":", "//lmstudio.ai/", ")", "cli", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "[", "ollama", "]", "(", "http", ":", "//ollama.ai/", ")", ".", "*", "*", "*", "prompt", "engineering", "*", "*", ":", "common", "technique", "include", "zero-shot", "prompting", ",", "few-shot", "prompting", ",", "chain", "thought", ",", "react", ".", "work", "better", "bigger", "model", ",", "adapted", "smaller", "one", ".", "*", "*", "*", "structuring", "output", "*", "*", ":", "many", "task", "require", "structured", "output", ",", "like", "strict", "template", "json", "format", ".", "library", "like", "[", "lmql", "]", "(", "http", ":", "//lmql.ai/", ")", ",", "[", "outline", "]", "(", "http", ":", "//github.com/outlines-dev/outlines", ")", ",", "[", "guidance", "]", "(", "http", ":", "//github.com/guidance-ai/guidance", ")", ",", "etc", ".", "used", "guide", "generation", "respect", "given", "structure", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "run", "llm", "locally", "lm", "studio", "]", "(", "http", ":", "//www.kdnuggets.com/run-an-llm-locally-with-lm-studio", ")", "nisha", "arya", ":", "short", "guide", "use", "lm", "studio", ".", "*", "[", "prompt", "engineering", "guide", "]", "(", "http", ":", "//www.promptingguide.ai/", ")", "dair.ai", ":", "exhaustive", "list", "prompt", "technique", "example", "*", "[", "outline", "-", "quickstart", "]", "(", "http", ":", "//outlines-dev.github.io/outlines/quickstart/", ")", ":", "list", "guided", "generation", "technique", "enabled", "outline", ".", "*", "[", "lmql", "-", "overview", "]", "(", "http", ":", "//lmql.ai/docs/language/overview.html", ")", ":", "introduction", "lmql", "language", ".", "--", "-" ], [ "1 .", "running llm running llm difficult due high hardware requirement .", "depending use case , might want simply consume model api ( like gpt-4 ) run locally .", "case , additional prompting guidance technique improve constrain output application .", "* * * llm apis * * : apis convenient way deploy llm .", "space divided private llm ( [ openai ] ( http : //platform.openai.com/ ) , [ google ] ( http : //cloud.google.com/vertex-ai/docs/generative-ai/learn/overview ) , [ anthropic ] ( http : //docs.anthropic.com/claude/reference/getting-started-with-the-api ) , [ cohere ] ( http : //docs.cohere.com/docs ) , etc . )", "open-source llm ( [ openrouter ] ( http : //openrouter.ai/ ) , [ hugging face ] ( http : //huggingface.co/inference-api ) , [ together ai ] ( http : //www.together.ai/ ) , etc . ) .", "* * * open-source llm * * : [ hugging face hub ] ( http : //huggingface.co/models ) great place find llm .", "directly run [ hugging face space ] ( http : //huggingface.co/spaces ) , download run locally apps like [ lm studio ] ( http : //lmstudio.ai/ ) cli [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) [ ollama ] ( http : //ollama.ai/ ) .", "* * * prompt engineering * * : common technique include zero-shot prompting , few-shot prompting , chain thought , react .", "work better bigger model , adapted smaller one .", "* * * structuring output * * : many task require structured output , like strict template json format .", "library like [ lmql ] ( http : //lmql.ai/ ) , [ outline ] ( http : //github.com/outlines-dev/outlines ) , [ guidance ] ( http : //github.com/guidance-ai/guidance ) , etc .", "used guide generation respect given structure .", "\ud83d\udcda * * reference * * : * [ run llm locally lm studio ] ( http : //www.kdnuggets.com/run-an-llm-locally-with-lm-studio ) nisha arya : short guide use lm studio .", "* [ prompt engineering guide ] ( http : //www.promptingguide.ai/ ) dair.ai : exhaustive list prompt technique example * [ outline - quickstart ] ( http : //outlines-dev.github.io/outlines/quickstart/ ) : list guided generation technique enabled outline .", "* [ lmql - overview ] ( http : //lmql.ai/docs/language/overview.html ) : introduction lmql language .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "4. Advanced RAG\n\nReal-life applications can require complex pipelines, including SQL or graph databases, as well as automatically selecting relevant tools and APIs. These advanced techniques can improve a baseline solution and provide additional features.\n\n* **Query construction**: Structured data stored in traditional databases requires a specific query language like SQL, Cypher, metadata, etc. We can directly translate the user instruction into a query to access the data with query construction.\n* **Agents and tools**: Agents augment LLMs by automatically selecting the most relevant tools to provide an answer. These tools can be as simple as using Google or Wikipedia, or more complex like a Python interpreter or Jira. \n* **Post-processing**: Final step that processes the inputs that are fed to the LLM. It enhances the relevance and diversity of documents retrieved with re-ranking, [RAG-fusion](https://github.com/Raudaschl/rag-fusion), and classification.\n\n\ud83d\udcda **References**:\n* [LangChain - Query Construction](https://blog.langchain.dev/query-construction/): Blog post about different types of query construction.\n* [LangChain - SQL](https://python.langchain.com/docs/use_cases/qa_structured/sql): Tutorial on how to interact with SQL databases with LLMs, involving Text-to-SQL and an optional SQL agent.\n* [Pinecone - LLM agents](https://www.pinecone.io/learn/series/langchain/langchain-agents/): Introduction to agents and tools with different types.\n* [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) by Lilian Weng: More theoretical article about LLM agents.\n* [LangChain - OpenAI's RAG](https://blog.langchain.dev/applying-openai-rag/): Overview of the RAG strategies employed by OpenAI, including post-processing.\n\n---\n", "sentence": [ [ "4", ".", "advanced", "rag", "real-life", "application", "require", "complex", "pipeline", ",", "including", "sql", "graph", "database", ",", "well", "automatically", "selecting", "relevant", "tool", "apis", ".", "advanced", "technique", "improve", "baseline", "solution", "provide", "additional", "feature", ".", "*", "*", "*", "query", "construction", "*", "*", ":", "structured", "data", "stored", "traditional", "database", "requires", "specific", "query", "language", "like", "sql", ",", "cypher", ",", "metadata", ",", "etc", ".", "directly", "translate", "user", "instruction", "query", "access", "data", "query", "construction", ".", "*", "*", "*", "agent", "tool", "*", "*", ":", "agent", "augment", "llm", "automatically", "selecting", "relevant", "tool", "provide", "answer", ".", "tool", "simple", "using", "google", "wikipedia", ",", "complex", "like", "python", "interpreter", "jira", ".", "*", "*", "*", "post-processing", "*", "*", ":", "final", "step", "process", "input", "fed", "llm", ".", "enhances", "relevance", "diversity", "document", "retrieved", "re-ranking", ",", "[", "rag-fusion", "]", "(", "http", ":", "//github.com/raudaschl/rag-fusion", ")", ",", "classification", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "langchain", "-", "query", "construction", "]", "(", "http", ":", "//blog.langchain.dev/query-construction/", ")", ":", "blog", "post", "different", "type", "query", "construction", ".", "*", "[", "langchain", "-", "sql", "]", "(", "http", ":", "//python.langchain.com/docs/use_cases/qa_structured/sql", ")", ":", "tutorial", "interact", "sql", "database", "llm", ",", "involving", "text-to-sql", "optional", "sql", "agent", ".", "*", "[", "pinecone", "-", "llm", "agent", "]", "(", "http", ":", "//www.pinecone.io/learn/series/langchain/langchain-agents/", ")", ":", "introduction", "agent", "tool", "different", "type", ".", "*", "[", "llm", "powered", "autonomous", "agent", "]", "(", "http", ":", "//lilianweng.github.io/posts/2023-06-23-agent/", ")", "lilian", "weng", ":", "theoretical", "article", "llm", "agent", ".", "*", "[", "langchain", "-", "openai", "'s", "rag", "]", "(", "http", ":", "//blog.langchain.dev/applying-openai-rag/", ")", ":", "overview", "rag", "strategy", "employed", "openai", ",", "including", "post-processing", ".", "--", "-" ], [ "4 .", "advanced rag real-life application require complex pipeline , including sql graph database , well automatically selecting relevant tool apis .", "advanced technique improve baseline solution provide additional feature .", "* * * query construction * * : structured data stored traditional database requires specific query language like sql , cypher , metadata , etc .", "directly translate user instruction query access data query construction .", "* * * agent tool * * : agent augment llm automatically selecting relevant tool provide answer .", "tool simple using google wikipedia , complex like python interpreter jira .", "* * * post-processing * * : final step process input fed llm .", "enhances relevance diversity document retrieved re-ranking , [ rag-fusion ] ( http : //github.com/raudaschl/rag-fusion ) , classification .", "\ud83d\udcda * * reference * * : * [ langchain - query construction ] ( http : //blog.langchain.dev/query-construction/ ) : blog post different type query construction .", "* [ langchain - sql ] ( http : //python.langchain.com/docs/use_cases/qa_structured/sql ) : tutorial interact sql database llm , involving text-to-sql optional sql agent .", "* [ pinecone - llm agent ] ( http : //www.pinecone.io/learn/series/langchain/langchain-agents/ ) : introduction agent tool different type .", "* [ llm powered autonomous agent ] ( http : //lilianweng.github.io/posts/2023-06-23-agent/ ) lilian weng : theoretical article llm agent .", "* [ langchain - openai 's rag ] ( http : //blog.langchain.dev/applying-openai-rag/ ) : overview rag strategy employed openai , including post-processing .", "-- -" ] ], "token": [ [ "4", ".", "advanced", "rag", "real-life", "application", "require", "complex", "pipeline", ",", "including", "sql", "graph", "database", ",", "well", "automatically", "selecting", "relevant", "tool", "apis", ".", "advanced", "technique", "improve", "baseline", "solution", "provide", "additional", "feature", ".", "*", "*", "*", "query", "construction", "*", "*", ":", "structured", "data", "stored", "traditional", "database", "requires", "specific", "query", "language", "like", "sql", ",", "cypher", ",", "metadata", ",", "etc", ".", "directly", "translate", "user", "instruction", "query", "access", "data", "query", "construction", ".", "*", "*", "*", "agent", "tool", "*", "*", ":", "agent", "augment", "llm", "automatically", "selecting", "relevant", "tool", "provide", "answer", ".", "tool", "simple", "using", "google", "wikipedia", ",", "complex", "like", "python", "interpreter", "jira", ".", "*", "*", "*", "post-processing", "*", "*", ":", "final", "step", "process", "input", "fed", "llm", ".", "enhances", "relevance", "diversity", "document", "retrieved", "re-ranking", ",", "[", "rag-fusion", "]", "(", "http", ":", "//github.com/raudaschl/rag-fusion", ")", ",", "classification", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "langchain", "-", "query", "construction", "]", "(", "http", ":", "//blog.langchain.dev/query-construction/", ")", ":", "blog", "post", "different", "type", "query", "construction", ".", "*", "[", "langchain", "-", "sql", "]", "(", "http", ":", "//python.langchain.com/docs/use_cases/qa_structured/sql", ")", ":", "tutorial", "interact", "sql", "database", "llm", ",", "involving", "text-to-sql", "optional", "sql", "agent", ".", "*", "[", "pinecone", "-", "llm", "agent", "]", "(", "http", ":", "//www.pinecone.io/learn/series/langchain/langchain-agents/", ")", ":", "introduction", "agent", "tool", "different", "type", ".", "*", "[", "llm", "powered", "autonomous", "agent", "]", "(", "http", ":", "//lilianweng.github.io/posts/2023-06-23-agent/", ")", "lilian", "weng", ":", "theoretical", "article", "llm", "agent", ".", "*", "[", "langchain", "-", "openai", "'s", "rag", "]", "(", "http", ":", "//blog.langchain.dev/applying-openai-rag/", ")", ":", "overview", "rag", "strategy", "employed", "openai", ",", "including", "post-processing", ".", "--", "-" ], [ "4 .", "advanced rag real-life application require complex pipeline , including sql graph database , well automatically selecting relevant tool apis .", "advanced technique improve baseline solution provide additional feature .", "* * * query construction * * : structured data stored traditional database requires specific query language like sql , cypher , metadata , etc .", "directly translate user instruction query access data query construction .", "* * * agent tool * * : agent augment llm automatically selecting relevant tool provide answer .", "tool simple using google wikipedia , complex like python interpreter jira .", "* * * post-processing * * : final step process input fed llm .", "enhances relevance diversity document retrieved re-ranking , [ rag-fusion ] ( http : //github.com/raudaschl/rag-fusion ) , classification .", "\ud83d\udcda * * reference * * : * [ langchain - query construction ] ( http : //blog.langchain.dev/query-construction/ ) : blog post different type query construction .", "* [ langchain - sql ] ( http : //python.langchain.com/docs/use_cases/qa_structured/sql ) : tutorial interact sql database llm , involving text-to-sql optional sql agent .", "* [ pinecone - llm agent ] ( http : //www.pinecone.io/learn/series/langchain/langchain-agents/ ) : introduction agent tool different type .", "* [ llm powered autonomous agent ] ( http : //lilianweng.github.io/posts/2023-06-23-agent/ ) lilian weng : theoretical article llm agent .", "* [ langchain - openai 's rag ] ( http : //blog.langchain.dev/applying-openai-rag/ ) : overview rag strategy employed openai , including post-processing .", "-- -" ] ], "level of complexity": 2 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "5. Inference optimization\n\nText generation is a costly process that requires expensive hardware. In addition to quantization, various techniques have been proposed to maximize throughput and reduce inference costs.\n\n* **Flash Attention**: Optimization of the attention mechanism to transform its complexity from quadratic to linear, speeding up both training and inference.\n* **Key-value cache**: Understand the key-value cache and the improvements introduced in [Multi-Query Attention](https://arxiv.org/abs/1911.02150) (MQA) and [Grouped-Query Attention](https://arxiv.org/abs/2305.13245) (GQA).\n* **Speculative decoding**: Use a small model to produce drafts that are then reviewed by a larger model to speed up text generation.\n\n\ud83d\udcda **References**:\n* [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one) by Hugging Face: Explain how to optimize inference on GPUs.\n* [LLM Inference](https://www.databricks.com/blog/llm-inference-performance-engineering-best-practices) by Databricks: Best practices for how to optimize LLM inference in production.\n* [Optimizing LLMs for Speed and Memory](https://huggingface.co/docs/transformers/main/en/llm_tutorial_optimization) by Hugging Face: Explain three main techniques to optimize speed and memory, namely quantization, Flash Attention, and architectural innovations.\n* [Assisted Generation](https://huggingface.co/blog/assisted-generation) by Hugging Face: HF's version of speculative decoding, it's an interesting blog post about how it works with code to implement it.\n\n---\n", "sentence": [ [ "5", ".", "inference", "optimization", "text", "generation", "costly", "process", "requires", "expensive", "hardware", ".", "addition", "quantization", ",", "various", "technique", "proposed", "maximize", "throughput", "reduce", "inference", "cost", ".", "*", "*", "*", "flash", "attention", "*", "*", ":", "optimization", "attention", "mechanism", "transform", "complexity", "quadratic", "linear", ",", "speeding", "training", "inference", ".", "*", "*", "*", "key-value", "cache", "*", "*", ":", "understand", "key-value", "cache", "improvement", "introduced", "[", "multi-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/1911.02150", ")", "(", "mqa", ")", "[", "grouped-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/2305.13245", ")", "(", "gqa", ")", ".", "*", "*", "*", "speculative", "decoding", "*", "*", ":", "use", "small", "model", "produce", "draft", "reviewed", "larger", "model", "speed", "text", "generation", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "gpu", "inference", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/perf_infer_gpu_one", ")", "hugging", "face", ":", "explain", "optimize", "inference", "gpus", ".", "*", "[", "llm", "inference", "]", "(", "http", ":", "//www.databricks.com/blog/llm-inference-performance-engineering-best-practices", ")", "databricks", ":", "best", "practice", "optimize", "llm", "inference", "production", ".", "*", "[", "optimizing", "llm", "speed", "memory", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/llm_tutorial_optimization", ")", "hugging", "face", ":", "explain", "three", "main", "technique", "optimize", "speed", "memory", ",", "namely", "quantization", ",", "flash", "attention", ",", "architectural", "innovation", ".", "*", "[", "assisted", "generation", "]", "(", "http", ":", "//huggingface.co/blog/assisted-generation", ")", "hugging", "face", ":", "hf", "'s", "version", "speculative", "decoding", ",", "'s", "interesting", "blog", "post", "work", "code", "implement", ".", "--", "-" ], [ "5 .", "inference optimization text generation costly process requires expensive hardware .", "addition quantization , various technique proposed maximize throughput reduce inference cost .", "* * * flash attention * * : optimization attention mechanism transform complexity quadratic linear , speeding training inference .", "* * * key-value cache * * : understand key-value cache improvement introduced [ multi-query attention ] ( http : //arxiv.org/abs/1911.02150 ) ( mqa ) [ grouped-query attention ] ( http : //arxiv.org/abs/2305.13245 ) ( gqa ) .", "* * * speculative decoding * * : use small model produce draft reviewed larger model speed text generation .", "\ud83d\udcda * * reference * * : * [ gpu inference ] ( http : //huggingface.co/docs/transformers/main/en/perf_infer_gpu_one ) hugging face : explain optimize inference gpus .", "* [ llm inference ] ( http : //www.databricks.com/blog/llm-inference-performance-engineering-best-practices ) databricks : best practice optimize llm inference production .", "* [ optimizing llm speed memory ] ( http : //huggingface.co/docs/transformers/main/en/llm_tutorial_optimization ) hugging face : explain three main technique optimize speed memory , namely quantization , flash attention , architectural innovation .", "* [ assisted generation ] ( http : //huggingface.co/blog/assisted-generation ) hugging face : hf 's version speculative decoding , 's interesting blog post work code implement .", "-- -" ] ], "token": [ [ "5", ".", "inference", "optimization", "text", "generation", "costly", "process", "requires", "expensive", "hardware", ".", "addition", "quantization", ",", "various", "technique", "proposed", "maximize", "throughput", "reduce", "inference", "cost", ".", "*", "*", "*", "flash", "attention", "*", "*", ":", "optimization", "attention", "mechanism", "transform", "complexity", "quadratic", "linear", ",", "speeding", "training", "inference", ".", "*", "*", "*", "key-value", "cache", "*", "*", ":", "understand", "key-value", "cache", "improvement", "introduced", "[", "multi-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/1911.02150", ")", "(", "mqa", ")", "[", "grouped-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/2305.13245", ")", "(", "gqa", ")", ".", "*", "*", "*", "speculative", "decoding", "*", "*", ":", "use", "small", "model", "produce", "draft", "reviewed", "larger", "model", "speed", "text", "generation", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "gpu", "inference", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/perf_infer_gpu_one", ")", "hugging", "face", ":", "explain", "optimize", "inference", "gpus", ".", "*", "[", "llm", "inference", "]", "(", "http", ":", "//www.databricks.com/blog/llm-inference-performance-engineering-best-practices", ")", "databricks", ":", "best", "practice", "optimize", "llm", "inference", "production", ".", "*", "[", "optimizing", "llm", "speed", "memory", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/llm_tutorial_optimization", ")", "hugging", "face", ":", "explain", "three", "main", "technique", "optimize", "speed", "memory", ",", "namely", "quantization", ",", "flash", "attention", ",", "architectural", "innovation", ".", "*", "[", "assisted", "generation", "]", "(", "http", ":", "//huggingface.co/blog/assisted-generation", ")", "hugging", "face", ":", "hf", "'s", "version", "speculative", "decoding", ",", "'s", "interesting", "blog", "post", "work", "code", "implement", ".", "--", "-" ], [ "5 .", "inference optimization text generation costly process requires expensive hardware .", "addition quantization , various technique proposed maximize throughput reduce inference cost .", "* * * flash attention * * : optimization attention mechanism transform complexity quadratic linear , speeding training inference .", "* * * key-value cache * * : understand key-value cache improvement introduced [ multi-query attention ] ( http : //arxiv.org/abs/1911.02150 ) ( mqa ) [ grouped-query attention ] ( http : //arxiv.org/abs/2305.13245 ) ( gqa ) .", "* * * speculative decoding * * : use small model produce draft reviewed larger model speed text generation .", "\ud83d\udcda * * reference * * : * [ gpu inference ] ( http : //huggingface.co/docs/transformers/main/en/perf_infer_gpu_one ) hugging face : explain optimize inference gpus .", "* [ llm inference ] ( http : //www.databricks.com/blog/llm-inference-performance-engineering-best-practices ) databricks : best practice optimize llm inference production .", "* [ optimizing llm speed memory ] ( http : //huggingface.co/docs/transformers/main/en/llm_tutorial_optimization ) hugging face : explain three main technique optimize speed memory , namely quantization , flash attention , architectural innovation .", "* [ assisted generation ] ( http : //huggingface.co/blog/assisted-generation ) hugging face : hf 's version speculative decoding , 's interesting blog post work code implement .", "-- -" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlabonne/llm-course", "readme_url": "https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md", "topic": [ "course", "large-language-models", "llm", "machine-learning", "roadmap" ], "text": "7. Securing LLMs\n\nIn addition to traditional security problems associated with software, LLMs have unique weaknesses due to the way they are trained and prompted.\n\n* **Prompt hacking**: Different techniques related to prompt engineering, including prompt injection (additional instruction to hijack the model's answer), data/prompt leaking (retrieve its original data/prompt), and jailbreaking (craft prompts to bypass safety features).\n* **Backdoors**: Attack vectors can target the training data itself, by poisoning the training data (e.g., with false information) or creating backdoors (secret triggers to change the model's behavior during inference).\n* **Defensive measures**: The best way to protect your LLM applications is to test them against these vulnerabilities (e.g., using red teaming and checks like [garak](https://github.com/leondz/garak/)) and observe them in production (with a framework like [langfuse](https://github.com/langfuse/langfuse)).\n\n\ud83d\udcda **References**:\n* [OWASP LLM Top 10](https://owasp.org/www-project-top-10-for-large-language-model-applications/) by HEGO Wiki: List of the 10 most critic vulnerabilities seen in LLM applications.\n* [Prompt Injection Primer](https://github.com/jthack/PIPE) by Joseph Thacker: Short guide dedicated to prompt injection for engineers.\n* [LLM Security](https://llmsecurity.net/) by [@llm_sec](https://twitter.com/llm_sec): Extensive list of resources related to LLM security.\n* [Red teaming LLMs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming) by Microsoft: Guide on how to perform red teaming with LLMs.\n---\n", "sentence": [ [ "7", ".", "securing", "llm", "addition", "traditional", "security", "problem", "associated", "software", ",", "llm", "unique", "weakness", "due", "way", "trained", "prompted", ".", "*", "*", "*", "prompt", "hacking", "*", "*", ":", "different", "technique", "related", "prompt", "engineering", ",", "including", "prompt", "injection", "(", "additional", "instruction", "hijack", "model", "'s", "answer", ")", ",", "data/prompt", "leaking", "(", "retrieve", "original", "data/prompt", ")", ",", "jailbreaking", "(", "craft", "prompt", "bypass", "safety", "feature", ")", ".", "*", "*", "*", "backdoor", "*", "*", ":", "attack", "vector", "target", "training", "data", ",", "poisoning", "training", "data", "(", "e.g.", ",", "false", "information", ")", "creating", "backdoor", "(", "secret", "trigger", "change", "model", "'s", "behavior", "inference", ")", ".", "*", "*", "*", "defensive", "measure", "*", "*", ":", "best", "way", "protect", "llm", "application", "test", "vulnerability", "(", "e.g.", ",", "using", "red", "teaming", "check", "like", "[", "garak", "]", "(", "http", ":", "//github.com/leondz/garak/", ")", ")", "observe", "production", "(", "framework", "like", "[", "langfuse", "]", "(", "http", ":", "//github.com/langfuse/langfuse", ")", ")", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "owasp", "llm", "top", "10", "]", "(", "http", ":", "//owasp.org/www-project-top-10-for-large-language-model-applications/", ")", "hego", "wiki", ":", "list", "10", "critic", "vulnerability", "seen", "llm", "application", ".", "*", "[", "prompt", "injection", "primer", "]", "(", "http", ":", "//github.com/jthack/pipe", ")", "joseph", "thacker", ":", "short", "guide", "dedicated", "prompt", "injection", "engineer", ".", "*", "[", "llm", "security", "]", "(", "http", ":", "//llmsecurity.net/", ")", "[", "@", "llm_sec", "]", "(", "http", ":", "//twitter.com/llm_sec", ")", ":", "extensive", "list", "resource", "related", "llm", "security", ".", "*", "[", "red", "teaming", "llm", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming", ")", "microsoft", ":", "guide", "perform", "red", "teaming", "llm", ".", "--", "-" ], [ "7 .", "securing llm addition traditional security problem associated software , llm unique weakness due way trained prompted .", "* * * prompt hacking * * : different technique related prompt engineering , including prompt injection ( additional instruction hijack model 's answer ) , data/prompt leaking ( retrieve original data/prompt ) , jailbreaking ( craft prompt bypass safety feature ) .", "* * * backdoor * * : attack vector target training data , poisoning training data ( e.g. , false information ) creating backdoor ( secret trigger change model 's behavior inference ) .", "* * * defensive measure * * : best way protect llm application test vulnerability ( e.g. , using red teaming check like [ garak ] ( http : //github.com/leondz/garak/ ) ) observe production ( framework like [ langfuse ] ( http : //github.com/langfuse/langfuse ) ) .", "\ud83d\udcda * * reference * * : * [ owasp llm top 10 ] ( http : //owasp.org/www-project-top-10-for-large-language-model-applications/ ) hego wiki : list 10 critic vulnerability seen llm application .", "* [ prompt injection primer ] ( http : //github.com/jthack/pipe ) joseph thacker : short guide dedicated prompt injection engineer .", "* [ llm security ] ( http : //llmsecurity.net/ ) [ @ llm_sec ] ( http : //twitter.com/llm_sec ) : extensive list resource related llm security .", "* [ red teaming llm ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming ) microsoft : guide perform red teaming llm .", "-- -" ] ], "token": [ [ "7", ".", "securing", "llm", "addition", "traditional", "security", "problem", "associated", "software", ",", "llm", "unique", "weakness", "due", "way", "trained", "prompted", ".", "*", "*", "*", "prompt", "hacking", "*", "*", ":", "different", "technique", "related", "prompt", "engineering", ",", "including", "prompt", "injection", "(", "additional", "instruction", "hijack", "model", "'s", "answer", ")", ",", "data/prompt", "leaking", "(", "retrieve", "original", "data/prompt", ")", ",", "jailbreaking", "(", "craft", "prompt", "bypass", "safety", "feature", ")", ".", "*", "*", "*", "backdoor", "*", "*", ":", "attack", "vector", "target", "training", "data", ",", "poisoning", "training", "data", "(", "e.g.", ",", "false", "information", ")", "creating", "backdoor", "(", "secret", "trigger", "change", "model", "'s", "behavior", "inference", ")", ".", "*", "*", "*", "defensive", "measure", "*", "*", ":", "best", "way", "protect", "llm", "application", "test", "vulnerability", "(", "e.g.", ",", "using", "red", "teaming", "check", "like", "[", "garak", "]", "(", "http", ":", "//github.com/leondz/garak/", ")", ")", "observe", "production", "(", "framework", "like", "[", "langfuse", "]", "(", "http", ":", "//github.com/langfuse/langfuse", ")", ")", ".", "\ud83d\udcda", "*", "*", "reference", "*", "*", ":", "*", "[", "owasp", "llm", "top", "10", "]", "(", "http", ":", "//owasp.org/www-project-top-10-for-large-language-model-applications/", ")", "hego", "wiki", ":", "list", "10", "critic", "vulnerability", "seen", "llm", "application", ".", "*", "[", "prompt", "injection", "primer", "]", "(", "http", ":", "//github.com/jthack/pipe", ")", "joseph", "thacker", ":", "short", "guide", "dedicated", "prompt", "injection", "engineer", ".", "*", "[", "llm", "security", "]", "(", "http", ":", "//llmsecurity.net/", ")", "[", "@", "llm_sec", "]", "(", "http", ":", "//twitter.com/llm_sec", ")", ":", "extensive", "list", "resource", "related", "llm", "security", ".", "*", "[", "red", "teaming", "llm", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming", ")", "microsoft", ":", "guide", "perform", "red", "teaming", "llm", ".", "--", "-" ], [ "7 .", "securing llm addition traditional security problem associated software , llm unique weakness due way trained prompted .", "* * * prompt hacking * * : different technique related prompt engineering , including prompt injection ( additional instruction hijack model 's answer ) , data/prompt leaking ( retrieve original data/prompt ) , jailbreaking ( craft prompt bypass safety feature ) .", "* * * backdoor * * : attack vector target training data , poisoning training data ( e.g. , false information ) creating backdoor ( secret trigger change model 's behavior inference ) .", "* * * defensive measure * * : best way protect llm application test vulnerability ( e.g. , using red teaming check like [ garak ] ( http : //github.com/leondz/garak/ ) ) observe production ( framework like [ langfuse ] ( http : //github.com/langfuse/langfuse ) ) .", "\ud83d\udcda * * reference * * : * [ owasp llm top 10 ] ( http : //owasp.org/www-project-top-10-for-large-language-model-applications/ ) hego wiki : list 10 critic vulnerability seen llm application .", "* [ prompt injection primer ] ( http : //github.com/jthack/pipe ) joseph thacker : short guide dedicated prompt injection engineer .", "* [ llm security ] ( http : //llmsecurity.net/ ) [ @ llm_sec ] ( http : //twitter.com/llm_sec ) : extensive list resource related llm security .", "* [ red teaming llm ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming ) microsoft : guide perform red teaming llm .", "-- -" ] ], "level of complexity": -1 }, { "url": "https://github.com/FlowiseAI/Flowise", "readme_url": "https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md", "topic": [ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ], "text": "\u26a1Quick Start\n\nDownload and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0\n\n1. Install Flowise\n ```bash\n npm install -g flowise\n ```\n2. Start Flowise\n\n ```bash\n npx flowise start\n ```\n\n With username & password\n\n ```bash\n npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234\n ```\n\n3. Open [http://localhost:3000](http://localhost:3000)\n\n", "sentence": [ [ "\u26a1quick", "start", "download", "install", "[", "nodejs", "]", "(", "http", ":", "//nodejs.org/en/download", ")", ">", "=", "18.15.0", "1", ".", "install", "flowise", "``", "`", "bash", "npm", "install", "-g", "flowise", "``", "`", "2", ".", "start", "flowise", "``", "`", "bash", "npx", "flowise", "start", "``", "`", "username", "&", "password", "``", "`", "bash", "npx", "flowise", "start", "--", "flowise_username=user", "--", "flowise_password=1234", "``", "`", "3", ".", "open", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")" ], [ "\u26a1quick start download install [ nodejs ] ( http : //nodejs.org/en/download ) > = 18.15.0 1 .", "install flowise `` ` bash npm install -g flowise `` ` 2 .", "start flowise `` ` bash npx flowise start `` ` username & password `` ` bash npx flowise start -- flowise_username=user -- flowise_password=1234 `` ` 3 .", "open [ http : //localhost:3000 ] ( http : //localhost:3000 )" ] ], "token": [ [ "\u26a1quick", "start", "download", "install", "[", "nodejs", "]", "(", "http", ":", "//nodejs.org/en/download", ")", ">", "=", "18.15.0", "1", ".", "install", "flowise", "``", "`", "bash", "npm", "install", "-g", "flowise", "``", "`", "2", ".", "start", "flowise", "``", "`", "bash", "npx", "flowise", "start", "``", "`", "username", "&", "password", "``", "`", "bash", "npx", "flowise", "start", "--", "flowise_username=user", "--", "flowise_password=1234", "``", "`", "3", ".", "open", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")" ], [ "\u26a1quick start download install [ nodejs ] ( http : //nodejs.org/en/download ) > = 18.15.0 1 .", "install flowise `` ` bash npm install -g flowise `` ` 2 .", "start flowise `` ` bash npx flowise start `` ` username & password `` ` bash npx flowise start -- flowise_username=user -- flowise_password=1234 `` ` 3 .", "open [ http : //localhost:3000 ] ( http : //localhost:3000 )" ] ], "level of complexity": -1 }, { "url": "https://github.com/FlowiseAI/Flowise", "readme_url": "https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md", "topic": [ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ], "text": "Prerequisite\n\n- Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install)\n ```bash\n npm i -g yarn\n ```\n\n", "sentence": [ [ "prerequisite", "-", "install", "[", "yarn", "v1", "]", "(", "http", ":", "//classic.yarnpkg.com/en/docs/install", ")", "``", "`", "bash", "npm", "-g", "yarn", "``", "`" ], [ "prerequisite - install [ yarn v1 ] ( http : //classic.yarnpkg.com/en/docs/install ) `` ` bash npm -g yarn `` `" ] ], "token": [ [ "prerequisite", "-", "install", "[", "yarn", "v1", "]", "(", "http", ":", "//classic.yarnpkg.com/en/docs/install", ")", "``", "`", "bash", "npm", "-g", "yarn", "``", "`" ], [ "prerequisite - install [ yarn v1 ] ( http : //classic.yarnpkg.com/en/docs/install ) `` ` bash npm -g yarn `` `" ] ], "level of complexity": -1 }, { "url": "https://github.com/FlowiseAI/Flowise", "readme_url": "https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md", "topic": [ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ], "text": "Setup\n\n1. Clone the repository\n\n ```bash\n git clone https://github.com/FlowiseAI/Flowise.git\n ```\n\n2. Go into repository folder\n\n ```bash\n cd Flowise\n ```\n\n3. Install all dependencies of all modules:\n\n ```bash\n yarn install\n ```\n\n4. Build all the code:\n\n ```bash\n yarn build\n ```\n\n5. Start the app:\n\n ```bash\n yarn start\n ```\n\n You can now access the app on [http://localhost:3000](http://localhost:3000)\n\n6. For development build:\n\n - Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui`\n - Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`\n - Run\n\n ```bash\n yarn dev\n ```\n\n Any code changes will reload the app automatically on [http://localhost:8080](http://localhost:8080)\n\n", "sentence": [ [ "setup", "1", ".", "clone", "repository", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/flowiseai/flowise.git", "``", "`", "2", ".", "go", "repository", "folder", "``", "`", "bash", "cd", "flowise", "``", "`", "3", ".", "install", "dependency", "module", ":", "``", "`", "bash", "yarn", "install", "``", "`", "4", ".", "build", "code", ":", "``", "`", "bash", "yarn", "build", "``", "`", "5", ".", "start", "app", ":", "``", "`", "bash", "yarn", "start", "``", "`", "access", "app", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")", "6", ".", "development", "build", ":", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/ui", "`", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/server", "`", "-", "run", "``", "`", "bash", "yarn", "dev", "``", "`", "code", "change", "reload", "app", "automatically", "[", "http", ":", "//localhost:8080", "]", "(", "http", ":", "//localhost:8080", ")" ], [ "setup 1 .", "clone repository `` ` bash git clone http : //github.com/flowiseai/flowise.git `` ` 2 .", "go repository folder `` ` bash cd flowise `` ` 3 .", "install dependency module : `` ` bash yarn install `` ` 4 .", "build code : `` ` bash yarn build `` ` 5 .", "start app : `` ` bash yarn start `` ` access app [ http : //localhost:3000 ] ( http : //localhost:3000 ) 6 .", "development build : - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/ui ` - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/server ` - run `` ` bash yarn dev `` ` code change reload app automatically [ http : //localhost:8080 ] ( http : //localhost:8080 )" ] ], "token": [ [ "setup", "1", ".", "clone", "repository", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/flowiseai/flowise.git", "``", "`", "2", ".", "go", "repository", "folder", "``", "`", "bash", "cd", "flowise", "``", "`", "3", ".", "install", "dependency", "module", ":", "``", "`", "bash", "yarn", "install", "``", "`", "4", ".", "build", "code", ":", "``", "`", "bash", "yarn", "build", "``", "`", "5", ".", "start", "app", ":", "``", "`", "bash", "yarn", "start", "``", "`", "access", "app", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")", "6", ".", "development", "build", ":", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/ui", "`", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/server", "`", "-", "run", "``", "`", "bash", "yarn", "dev", "``", "`", "code", "change", "reload", "app", "automatically", "[", "http", ":", "//localhost:8080", "]", "(", "http", ":", "//localhost:8080", ")" ], [ "setup 1 .", "clone repository `` ` bash git clone http : //github.com/flowiseai/flowise.git `` ` 2 .", "go repository folder `` ` bash cd flowise `` ` 3 .", "install dependency module : `` ` bash yarn install `` ` 4 .", "build code : `` ` bash yarn build `` ` 5 .", "start app : `` ` bash yarn start `` ` access app [ http : //localhost:3000 ] ( http : //localhost:3000 ) 6 .", "development build : - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/ui ` - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/server ` - run `` ` bash yarn dev `` ` code change reload app automatically [ http : //localhost:8080 ] ( http : //localhost:8080 )" ] ], "level of complexity": 2 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "Getting started with Semantic Kernel\n\nThe Semantic Kernel SDK is available in C#, Python, and Java. To get started, choose your preferred language below. See the [Feature Matrix](https://learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages) to see a breakdown of\nfeature parity between our currently supported languages.\n\n\n \n \n \n \n \n \n \n
\n \n
\n Using Semantic Kernel in C#  
\n
\n
\n \n
\n Using Semantic Kernel in Python\n
\n
\n \"Java\n
\n Using Semantic Kernel in Java\n
\n
\n\nThe quickest way to get started with the basics is to get an API key\nfrom either OpenAI or Azure OpenAI and to run one of the C#, Python, and Java console applications/scripts below.\n\n", "sentence": [ [ "getting", "started", "semantic", "kernel", "semantic", "kernel", "sdk", "available", "c", "#", ",", "python", ",", "java", ".", "get", "started", ",", "choose", "preferred", "language", ".", "see", "[", "feature", "matrix", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages", ")", "see", "breakdown", "feature", "parity", "currently", "supported", "language", ".", "<", "table", "width=100", "%", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png", "''", ">", "<", "div", ">", "<", "href=", "''", "dotnet/readme.md", "''", ">", "using", "semantic", "kernel", "c", "#", "<", "/a", ">", "&", "nbsp", "<", "br/", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg", "''", ">", "<", "div", ">", "<", "href=", "''", "python/readme.md", "''", ">", "using", "semantic", "kernel", "python", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "height=52px", "src=", "''", "http", ":", "//upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg", "''", "alt=", "''", "java", "logo", "''", ">", "<", "div", ">", "<", "href=", "''", "http", ":", "//github.com/microsoft/semantic-kernel/blob/main/java/readme.md", "''", ">", "using", "semantic", "kernel", "java", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">", "quickest", "way", "get", "started", "basic", "get", "api", "key", "either", "openai", "azure", "openai", "run", "one", "c", "#", ",", "python", ",", "java", "console", "applications/scripts", "." ], [ "getting started semantic kernel semantic kernel sdk available c # , python , java .", "get started , choose preferred language .", "see [ feature matrix ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages ) see breakdown feature parity currently supported language .", "< table width=100 % > < tbody > < tr > < td > < img align= '' left '' width=52px src= '' http : //user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png '' > < div > < href= '' dotnet/readme.md '' > using semantic kernel c # < /a > & nbsp < br/ > < /div > < /td > < td > < img align= '' left '' width=52px src= '' http : //raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg '' > < div > < href= '' python/readme.md '' > using semantic kernel python < /a > < /div > < /td > < td > < img align= '' left '' width=52px height=52px src= '' http : //upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg '' alt= '' java logo '' > < div > < href= '' http : //github.com/microsoft/semantic-kernel/blob/main/java/readme.md '' > using semantic kernel java < /a > < /div > < /td > < /tr > < /tbody > < /table > quickest way get started basic get api key either openai azure openai run one c # , python , java console applications/scripts ." ] ], "token": [ [ "getting", "started", "semantic", "kernel", "semantic", "kernel", "sdk", "available", "c", "#", ",", "python", ",", "java", ".", "get", "started", ",", "choose", "preferred", "language", ".", "see", "[", "feature", "matrix", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages", ")", "see", "breakdown", "feature", "parity", "currently", "supported", "language", ".", "<", "table", "width=100", "%", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png", "''", ">", "<", "div", ">", "<", "href=", "''", "dotnet/readme.md", "''", ">", "using", "semantic", "kernel", "c", "#", "<", "/a", ">", "&", "nbsp", "<", "br/", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg", "''", ">", "<", "div", ">", "<", "href=", "''", "python/readme.md", "''", ">", "using", "semantic", "kernel", "python", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "height=52px", "src=", "''", "http", ":", "//upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg", "''", "alt=", "''", "java", "logo", "''", ">", "<", "div", ">", "<", "href=", "''", "http", ":", "//github.com/microsoft/semantic-kernel/blob/main/java/readme.md", "''", ">", "using", "semantic", "kernel", "java", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">", "quickest", "way", "get", "started", "basic", "get", "api", "key", "either", "openai", "azure", "openai", "run", "one", "c", "#", ",", "python", ",", "java", "console", "applications/scripts", "." ], [ "getting started semantic kernel semantic kernel sdk available c # , python , java .", "get started , choose preferred language .", "see [ feature matrix ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages ) see breakdown feature parity currently supported language .", "< table width=100 % > < tbody > < tr > < td > < img align= '' left '' width=52px src= '' http : //user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png '' > < div > < href= '' dotnet/readme.md '' > using semantic kernel c # < /a > & nbsp < br/ > < /div > < /td > < td > < img align= '' left '' width=52px src= '' http : //raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg '' > < div > < href= '' python/readme.md '' > using semantic kernel python < /a > < /div > < /td > < td > < img align= '' left '' width=52px height=52px src= '' http : //upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg '' alt= '' java logo '' > < div > < href= '' http : //github.com/microsoft/semantic-kernel/blob/main/java/readme.md '' > using semantic kernel java < /a > < /div > < /td > < /tr > < /tbody > < /table > quickest way get started basic get api key either openai azure openai run one c # , python , java console applications/scripts ." ] ], "level of complexity": 2 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "For Python:\n\n1. Install the pip package: `python -m pip install semantic-kernel`.\n2. Create a new script e.g. `hello-world.py`.\n3. Store your API key and settings in an `.env` file as described [here](python/README.md).\n4. Copy the code from [here](python/README.md) into the `hello-world.py` script.\n5. Run the python script.\n\n", "sentence": [ [ "python", ":", "1", ".", "install", "pip", "package", ":", "`", "python", "-m", "pip", "install", "semantic-kernel", "`", ".", "2", ".", "create", "new", "script", "e.g", ".", "`", "hello-world.py", "`", ".", "3", ".", "store", "api", "key", "setting", "`", ".env", "`", "file", "described", "[", "]", "(", "python/readme.md", ")", ".", "4", ".", "copy", "code", "[", "]", "(", "python/readme.md", ")", "`", "hello-world.py", "`", "script", ".", "5", ".", "run", "python", "script", "." ], [ "python : 1 .", "install pip package : ` python -m pip install semantic-kernel ` .", "2 .", "create new script e.g .", "` hello-world.py ` .", "3 .", "store api key setting ` .env ` file described [ ] ( python/readme.md ) .", "4 .", "copy code [ ] ( python/readme.md ) ` hello-world.py ` script .", "5 .", "run python script ." ] ], "token": [ [ "python", ":", "1", ".", "install", "pip", "package", ":", "`", "python", "-m", "pip", "install", "semantic-kernel", "`", ".", "2", ".", "create", "new", "script", "e.g", ".", "`", "hello-world.py", "`", ".", "3", ".", "store", "api", "key", "setting", "`", ".env", "`", "file", "described", "[", "]", "(", "python/readme.md", ")", ".", "4", ".", "copy", "code", "[", "]", "(", "python/readme.md", ")", "`", "hello-world.py", "`", "script", ".", "5", ".", "run", "python", "script", "." ], [ "python : 1 .", "install pip package : ` python -m pip install semantic-kernel ` .", "2 .", "create new script e.g .", "` hello-world.py ` .", "3 .", "store api key setting ` .env ` file described [ ] ( python/readme.md ) .", "4 .", "copy code [ ] ( python/readme.md ) ` hello-world.py ` script .", "5 .", "run python script ." ] ], "level of complexity": 0 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "Learning how to use Semantic Kernel\n\nThe fastest way to learn how to use Semantic Kernel is with our C", "sentence": [ [ "learning", "use", "semantic", "kernel", "fastest", "way", "learn", "use", "semantic", "kernel", "c" ], [ "learning use semantic kernel fastest way learn use semantic kernel c" ] ], "token": [ [ "learning", "use", "semantic", "kernel", "fastest", "way", "learn", "use", "semantic", "kernel", "c" ], [ "learning use semantic kernel fastest way learn use semantic kernel c" ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "and Python Jupyter notebooks. These notebooks\ndemonstrate how to use Semantic Kernel with code snippets that you can run with a push of a button.\n\n- [Getting Started with C", "sentence": [ [ "python", "jupyter", "notebook", ".", "notebook", "demonstrate", "use", "semantic", "kernel", "code", "snippet", "run", "push", "button", ".", "-", "[", "getting", "started", "c" ], [ "python jupyter notebook .", "notebook demonstrate use semantic kernel code snippet run push button .", "- [ getting started c" ] ], "token": [ [ "python", "jupyter", "notebook", ".", "notebook", "demonstrate", "use", "semantic", "kernel", "code", "snippet", "run", "push", "button", ".", "-", "[", "getting", "started", "c" ], [ "python jupyter notebook .", "notebook demonstrate use semantic kernel code snippet run push button .", "- [ getting started c" ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "notebook](dotnet/notebooks/00-getting-started.ipynb)\n- [Getting Started with Python notebook](python/notebooks/00-getting-started.ipynb)\n\nOnce you've finished the getting started notebooks, you can then check out the main walkthroughs\non our Learn site. Each sample comes with a completed C", "sentence": [ [ "notebook", "]", "(", "dotnet/notebooks/00-getting-started.ipynb", ")", "-", "[", "getting", "started", "python", "notebook", "]", "(", "python/notebooks/00-getting-started.ipynb", ")", "'ve", "finished", "getting", "started", "notebook", ",", "check", "main", "walkthroughs", "learn", "site", ".", "sample", "come", "completed", "c" ], [ "notebook ] ( dotnet/notebooks/00-getting-started.ipynb ) - [ getting started python notebook ] ( python/notebooks/00-getting-started.ipynb ) 've finished getting started notebook , check main walkthroughs learn site .", "sample come completed c" ] ], "token": [ [ "notebook", "]", "(", "dotnet/notebooks/00-getting-started.ipynb", ")", "-", "[", "getting", "started", "python", "notebook", "]", "(", "python/notebooks/00-getting-started.ipynb", ")", "'ve", "finished", "getting", "started", "notebook", ",", "check", "main", "walkthroughs", "learn", "site", ".", "sample", "come", "completed", "c" ], [ "notebook ] ( dotnet/notebooks/00-getting-started.ipynb ) - [ getting started python notebook ] ( python/notebooks/00-getting-started.ipynb ) 've finished getting started notebook , check main walkthroughs learn site .", "sample come completed c" ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "Chat Copilot: see what's possible with Semantic Kernel\n\nIf you're interested in seeing a full end-to-end example of how to use Semantic Kernel, check out\nour [Chat Copilot](https://github.com/microsoft/chat-copilot) reference application. Chat Copilot\nis a chatbot that demonstrates the power of Semantic Kernel. By combining plugins, planners, and personas,\nwe demonstrate how you can build a chatbot that can maintain long-running conversations with users while\nalso leveraging plugins to integrate with other services.\n\n![Chat Copilot answering a question](https://learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif)\n\nYou can run the app yourself by downloading it from its [GitHub repo](https://github.com/microsoft/chat-copilot).\n\n", "sentence": [ [ "chat", "copilot", ":", "see", "'s", "possible", "semantic", "kernel", "'re", "interested", "seeing", "full", "end-to-end", "example", "use", "semantic", "kernel", ",", "check", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "reference", "application", ".", "chat", "copilot", "chatbot", "demonstrates", "power", "semantic", "kernel", ".", "combining", "plugins", ",", "planner", ",", "persona", ",", "demonstrate", "build", "chatbot", "maintain", "long-running", "conversation", "user", "also", "leveraging", "plugins", "integrate", "service", ".", "!", "[", "chat", "copilot", "answering", "question", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif", ")", "run", "app", "downloading", "[", "github", "repo", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "." ], [ "chat copilot : see 's possible semantic kernel 're interested seeing full end-to-end example use semantic kernel , check [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) reference application .", "chat copilot chatbot demonstrates power semantic kernel .", "combining plugins , planner , persona , demonstrate build chatbot maintain long-running conversation user also leveraging plugins integrate service .", "!", "[ chat copilot answering question ] ( http : //learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif ) run app downloading [ github repo ] ( http : //github.com/microsoft/chat-copilot ) ." ] ], "token": [ [ "chat", "copilot", ":", "see", "'s", "possible", "semantic", "kernel", "'re", "interested", "seeing", "full", "end-to-end", "example", "use", "semantic", "kernel", ",", "check", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "reference", "application", ".", "chat", "copilot", "chatbot", "demonstrates", "power", "semantic", "kernel", ".", "combining", "plugins", ",", "planner", ",", "persona", ",", "demonstrate", "build", "chatbot", "maintain", "long-running", "conversation", "user", "also", "leveraging", "plugins", "integrate", "service", ".", "!", "[", "chat", "copilot", "answering", "question", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif", ")", "run", "app", "downloading", "[", "github", "repo", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "." ], [ "chat copilot : see 's possible semantic kernel 're interested seeing full end-to-end example use semantic kernel , check [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) reference application .", "chat copilot chatbot demonstrates power semantic kernel .", "combining plugins , planner , persona , demonstrate build chatbot maintain long-running conversation user also leveraging plugins integrate service .", "!", "[ chat copilot answering question ] ( http : //learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif ) run app downloading [ github repo ] ( http : //github.com/microsoft/chat-copilot ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "Check out our other repos!\n\nIf you like Semantic Kernel, you may also be interested in other repos the Semantic Kernel team supports:\n\n| Repo | Description |\n| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- |\n| [Chat Copilot](https://github.com/microsoft/chat-copilot) | A reference application that demonstrates how to build a chatbot with Semantic Kernel. |\n| [Semantic Kernel Docs](https://github.com/MicrosoftDocs/semantic-kernel-docs) | The home for Semantic Kernel documentation that appears on the Microsoft learn site. |\n| [Semantic Kernel Starters](https://github.com/microsoft/semantic-kernel-starters) | Starter projects for Semantic Kernel to make it easier to get started. |\n| [Kernel Memory](https://github.com/microsoft/kernel-memory) | A scalable Memory service to store information and ask questions using the RAG pattern. |\n\n", "sentence": [ [ "check", "repos", "!", "like", "semantic", "kernel", ",", "may", "also", "interested", "repos", "semantic", "kernel", "team", "support", ":", "|", "repo", "|", "description", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "|", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "|", "reference", "application", "demonstrates", "build", "chatbot", "semantic", "kernel", ".", "|", "|", "[", "semantic", "kernel", "doc", "]", "(", "http", ":", "//github.com/microsoftdocs/semantic-kernel-docs", ")", "|", "home", "semantic", "kernel", "documentation", "appears", "microsoft", "learn", "site", ".", "|", "|", "[", "semantic", "kernel", "starter", "]", "(", "http", ":", "//github.com/microsoft/semantic-kernel-starters", ")", "|", "starter", "project", "semantic", "kernel", "make", "easier", "get", "started", ".", "|", "|", "[", "kernel", "memory", "]", "(", "http", ":", "//github.com/microsoft/kernel-memory", ")", "|", "scalable", "memory", "service", "store", "information", "ask", "question", "using", "rag", "pattern", ".", "|" ], [ "check repos !", "like semantic kernel , may also interested repos semantic kernel team support : | repo | description | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | | [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) | reference application demonstrates build chatbot semantic kernel .", "| | [ semantic kernel doc ] ( http : //github.com/microsoftdocs/semantic-kernel-docs ) | home semantic kernel documentation appears microsoft learn site .", "| | [ semantic kernel starter ] ( http : //github.com/microsoft/semantic-kernel-starters ) | starter project semantic kernel make easier get started .", "| | [ kernel memory ] ( http : //github.com/microsoft/kernel-memory ) | scalable memory service store information ask question using rag pattern .", "|" ] ], "token": [ [ "check", "repos", "!", "like", "semantic", "kernel", ",", "may", "also", "interested", "repos", "semantic", "kernel", "team", "support", ":", "|", "repo", "|", "description", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "|", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "|", "reference", "application", "demonstrates", "build", "chatbot", "semantic", "kernel", ".", "|", "|", "[", "semantic", "kernel", "doc", "]", "(", "http", ":", "//github.com/microsoftdocs/semantic-kernel-docs", ")", "|", "home", "semantic", "kernel", "documentation", "appears", "microsoft", "learn", "site", ".", "|", "|", "[", "semantic", "kernel", "starter", "]", "(", "http", ":", "//github.com/microsoft/semantic-kernel-starters", ")", "|", "starter", "project", "semantic", "kernel", "make", "easier", "get", "started", ".", "|", "|", "[", "kernel", "memory", "]", "(", "http", ":", "//github.com/microsoft/kernel-memory", ")", "|", "scalable", "memory", "service", "store", "information", "ask", "question", "using", "rag", "pattern", ".", "|" ], [ "check repos !", "like semantic kernel , may also interested repos semantic kernel team support : | repo | description | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | | [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) | reference application demonstrates build chatbot semantic kernel .", "| | [ semantic kernel doc ] ( http : //github.com/microsoftdocs/semantic-kernel-docs ) | home semantic kernel documentation appears microsoft learn site .", "| | [ semantic kernel starter ] ( http : //github.com/microsoft/semantic-kernel-starters ) | starter project semantic kernel make easier get started .", "| | [ kernel memory ] ( http : //github.com/microsoft/kernel-memory ) | scalable memory service store information ask question using rag pattern .", "|" ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/semantic-kernel", "readme_url": "https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md", "topic": [ "ai", "artificial-intelligence", "llm", "openai", "sdk" ], "text": "Join the community\n\nWe welcome your contributions and suggestions to SK community! One of the easiest\nways to participate is to engage in discussions in the GitHub repository.\nBug reports and fixes are welcome!\n\nFor new features, components, or extensions, please open an issue and discuss with\nus before sending a PR. This is to avoid rejection as we might be taking the core\nin a different direction, but also to consider the impact on the larger ecosystem.\n\nTo learn more and get started:\n\n- Read the [documentation](https://aka.ms/sk/learn)\n- Learn how to [contribute](https://learn.microsoft.com/en-us/semantic-kernel/get-started/contributing) to the project\n- Join the [Discord community](https://aka.ms/SKDiscord)\n- Attend [regular office hours and SK community events](COMMUNITY.md)\n- Follow the team on our [blog](https://aka.ms/sk/blog)\n\n", "sentence": [ [ "join", "community", "welcome", "contribution", "suggestion", "sk", "community", "!", "one", "easiest", "way", "participate", "engage", "discussion", "github", "repository", ".", "bug", "report", "fix", "welcome", "!", "new", "feature", ",", "component", ",", "extension", ",", "please", "open", "issue", "discus", "u", "sending", "pr", ".", "avoid", "rejection", "might", "taking", "core", "different", "direction", ",", "also", "consider", "impact", "larger", "ecosystem", ".", "learn", "get", "started", ":", "-", "read", "[", "documentation", "]", "(", "http", ":", "//aka.ms/sk/learn", ")", "-", "learn", "[", "contribute", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/contributing", ")", "project", "-", "join", "[", "discord", "community", "]", "(", "http", ":", "//aka.ms/skdiscord", ")", "-", "attend", "[", "regular", "office", "hour", "sk", "community", "event", "]", "(", "community.md", ")", "-", "follow", "team", "[", "blog", "]", "(", "http", ":", "//aka.ms/sk/blog", ")" ], [ "join community welcome contribution suggestion sk community !", "one easiest way participate engage discussion github repository .", "bug report fix welcome !", "new feature , component , extension , please open issue discus u sending pr .", "avoid rejection might taking core different direction , also consider impact larger ecosystem .", "learn get started : - read [ documentation ] ( http : //aka.ms/sk/learn ) - learn [ contribute ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/contributing ) project - join [ discord community ] ( http : //aka.ms/skdiscord ) - attend [ regular office hour sk community event ] ( community.md ) - follow team [ blog ] ( http : //aka.ms/sk/blog )" ] ], "token": [ [ "join", "community", "welcome", "contribution", "suggestion", "sk", "community", "!", "one", "easiest", "way", "participate", "engage", "discussion", "github", "repository", ".", "bug", "report", "fix", "welcome", "!", "new", "feature", ",", "component", ",", "extension", ",", "please", "open", "issue", "discus", "u", "sending", "pr", ".", "avoid", "rejection", "might", "taking", "core", "different", "direction", ",", "also", "consider", "impact", "larger", "ecosystem", ".", "learn", "get", "started", ":", "-", "read", "[", "documentation", "]", "(", "http", ":", "//aka.ms/sk/learn", ")", "-", "learn", "[", "contribute", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/contributing", ")", "project", "-", "join", "[", "discord", "community", "]", "(", "http", ":", "//aka.ms/skdiscord", ")", "-", "attend", "[", "regular", "office", "hour", "sk", "community", "event", "]", "(", "community.md", ")", "-", "follow", "team", "[", "blog", "]", "(", "http", ":", "//aka.ms/sk/blog", ")" ], [ "join community welcome contribution suggestion sk community !", "one easiest way participate engage discussion github repository .", "bug report fix welcome !", "new feature , component , extension , please open issue discus u sending pr .", "avoid rejection might taking core different direction , also consider impact larger ecosystem .", "learn get started : - read [ documentation ] ( http : //aka.ms/sk/learn ) - learn [ contribute ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/contributing ) project - join [ discord community ] ( http : //aka.ms/skdiscord ) - attend [ regular office hour sk community event ] ( community.md ) - follow team [ blog ] ( http : //aka.ms/sk/blog )" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlc-ai/mlc-llm", "readme_url": "https://raw.githubusercontent.com/mlc-ai/mlc-llm/main/README.md", "topic": [ "language-model", "llm", "machine-learning-compilation", "tvm" ], "text": "News\n\n* [10/18/2023] [[Post]](https://blog.mlc.ai/2023/10/19/Scalable-Language-Model-Inference-on-Multiple-NVDIA-AMD-GPUs) Scalable multi-GPU support for CUDA and ROCm are official.\n* [09/02/2023] Prebuilt ROCm 5.7 and CUDA 12.2 package is [available](https://llm.mlc.ai/docs/install/tvm.html#option-1-prebuilt-package).\n* [08/25/2023] CodeLlama support is up.\n* [08/14/2023] [[Post]](https://blog.mlc.ai/2023/08/09/GPU-Accelerated-LLM-on-Orange-Pi) Mali GPU support is up on Orange Pi.\n* [08/09/2023] [[Post]](https://blog.mlc.ai/2023/08/09/Making-AMD-GPUs-competitive-for-LLM-inference) ROCm backend is mature to use.\n* [08/02/2023] [Dockerfile](https://github.com/mlc-ai/llm-perf-bench/) is released for CUDA performance benchmarking.\n* [07/19/2023] Support for Llama2-7B/13B/70B is up.\n* [05/22/2023] [[Post]](https://blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices) RedPajama support is up.\n* [05/08/2023] [[Post]](https://blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices) MLC LLM is now available on Android.\n* [05/01/2023] [[Post]](https://blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware) MLC LLM is released with Metal, Vulkan and CUDA backends.\n* [04/14/2023] [WebLLM](https://github.com/mlc-ai/web-llm) is released prior to MLC LLM with WebGPU and WebAssembly backend.\n\n", "sentence": [ [ "news", "*", "[", "10/18/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus", ")", "scalable", "multi-gpu", "support", "cuda", "rocm", "official", ".", "*", "[", "09/02/2023", "]", "prebuilt", "rocm", "5.7", "cuda", "12.2", "package", "[", "available", "]", "(", "http", ":", "//llm.mlc.ai/docs/install/tvm.html", "#", "option-1-prebuilt-package", ")", ".", "*", "[", "08/25/2023", "]", "codellama", "support", ".", "*", "[", "08/14/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi", ")", "mali", "gpu", "support", "orange", "pi", ".", "*", "[", "08/09/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference", ")", "rocm", "backend", "mature", "use", ".", "*", "[", "08/02/2023", "]", "[", "dockerfile", "]", "(", "http", ":", "//github.com/mlc-ai/llm-perf-bench/", ")", "released", "cuda", "performance", "benchmarking", ".", "*", "[", "07/19/2023", "]", "support", "llama2-7b/13b/70b", ".", "*", "[", "05/22/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices", ")", "redpajama", "support", ".", "*", "[", "05/08/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices", ")", "mlc", "llm", "available", "android", ".", "*", "[", "05/01/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware", ")", "mlc", "llm", "released", "metal", ",", "vulkan", "cuda", "backends", ".", "*", "[", "04/14/2023", "]", "[", "webllm", "]", "(", "http", ":", "//github.com/mlc-ai/web-llm", ")", "released", "prior", "mlc", "llm", "webgpu", "webassembly", "backend", "." ], [ "news * [ 10/18/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus ) scalable multi-gpu support cuda rocm official .", "* [ 09/02/2023 ] prebuilt rocm 5.7 cuda 12.2 package [ available ] ( http : //llm.mlc.ai/docs/install/tvm.html # option-1-prebuilt-package ) .", "* [ 08/25/2023 ] codellama support .", "* [ 08/14/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi ) mali gpu support orange pi .", "* [ 08/09/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference ) rocm backend mature use .", "* [ 08/02/2023 ] [ dockerfile ] ( http : //github.com/mlc-ai/llm-perf-bench/ ) released cuda performance benchmarking .", "* [ 07/19/2023 ] support llama2-7b/13b/70b .", "* [ 05/22/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices ) redpajama support .", "* [ 05/08/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices ) mlc llm available android .", "* [ 05/01/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware ) mlc llm released metal , vulkan cuda backends .", "* [ 04/14/2023 ] [ webllm ] ( http : //github.com/mlc-ai/web-llm ) released prior mlc llm webgpu webassembly backend ." ] ], "token": [ [ "news", "*", "[", "10/18/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus", ")", "scalable", "multi-gpu", "support", "cuda", "rocm", "official", ".", "*", "[", "09/02/2023", "]", "prebuilt", "rocm", "5.7", "cuda", "12.2", "package", "[", "available", "]", "(", "http", ":", "//llm.mlc.ai/docs/install/tvm.html", "#", "option-1-prebuilt-package", ")", ".", "*", "[", "08/25/2023", "]", "codellama", "support", ".", "*", "[", "08/14/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi", ")", "mali", "gpu", "support", "orange", "pi", ".", "*", "[", "08/09/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference", ")", "rocm", "backend", "mature", "use", ".", "*", "[", "08/02/2023", "]", "[", "dockerfile", "]", "(", "http", ":", "//github.com/mlc-ai/llm-perf-bench/", ")", "released", "cuda", "performance", "benchmarking", ".", "*", "[", "07/19/2023", "]", "support", "llama2-7b/13b/70b", ".", "*", "[", "05/22/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices", ")", "redpajama", "support", ".", "*", "[", "05/08/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices", ")", "mlc", "llm", "available", "android", ".", "*", "[", "05/01/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware", ")", "mlc", "llm", "released", "metal", ",", "vulkan", "cuda", "backends", ".", "*", "[", "04/14/2023", "]", "[", "webllm", "]", "(", "http", ":", "//github.com/mlc-ai/web-llm", ")", "released", "prior", "mlc", "llm", "webgpu", "webassembly", "backend", "." ], [ "news * [ 10/18/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus ) scalable multi-gpu support cuda rocm official .", "* [ 09/02/2023 ] prebuilt rocm 5.7 cuda 12.2 package [ available ] ( http : //llm.mlc.ai/docs/install/tvm.html # option-1-prebuilt-package ) .", "* [ 08/25/2023 ] codellama support .", "* [ 08/14/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi ) mali gpu support orange pi .", "* [ 08/09/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference ) rocm backend mature use .", "* [ 08/02/2023 ] [ dockerfile ] ( http : //github.com/mlc-ai/llm-perf-bench/ ) released cuda performance benchmarking .", "* [ 07/19/2023 ] support llama2-7b/13b/70b .", "* [ 05/22/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices ) redpajama support .", "* [ 05/08/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices ) mlc llm available android .", "* [ 05/01/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware ) mlc llm released metal , vulkan cuda backends .", "* [ 04/14/2023 ] [ webllm ] ( http : //github.com/mlc-ai/web-llm ) released prior mlc llm webgpu webassembly backend ." ] ], "level of complexity": -1 }, { "url": "https://github.com/mlc-ai/mlc-llm", "readme_url": "https://raw.githubusercontent.com/mlc-ai/mlc-llm/main/README.md", "topic": [ "language-model", "llm", "machine-learning-compilation", "tvm" ], "text": "Getting Started\n\nPlease visit our [documentation](https://llm.mlc.ai/docs/index.html#getting-started) for detailed instructions.\n\n", "sentence": [ [ "getting", "started", "please", "visit", "[", "documentation", "]", "(", "http", ":", "//llm.mlc.ai/docs/index.html", "#", "getting-started", ")", "detailed", "instruction", "." ], [ "getting started please visit [ documentation ] ( http : //llm.mlc.ai/docs/index.html # getting-started ) detailed instruction ." ] ], "token": [ [ "getting", "started", "please", "visit", "[", "documentation", "]", "(", "http", ":", "//llm.mlc.ai/docs/index.html", "#", "getting-started", ")", "detailed", "instruction", "." ], [ "getting started please visit [ documentation ] ( http : //llm.mlc.ai/docs/index.html # getting-started ) detailed instruction ." ] ], "level of complexity": -1 }, { "url": "https://github.com/mlc-ai/mlc-llm", "readme_url": "https://raw.githubusercontent.com/mlc-ai/mlc-llm/main/README.md", "topic": [ "language-model", "llm", "machine-learning-compilation", "tvm" ], "text": "Model Support\n\nMLC LLM supports a wide range of model architectures and variants. We have the following prebuilts which you can\nuse off-the-shelf. Visit [Prebuilt Models](https://llm.mlc.ai/docs/prebuilt_models.html) to see the full list, and [Compile Models via MLC](https://llm.mlc.ai/docs/compilation/compile_models.html) to see how to use models not on this list.\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ArchitecturePrebuilt Model Variants
LlamaLlama-2, Code Llama, Vicuna, WizardLM, WizardMath, OpenOrca Platypus2, FlagAlpha Llama-2 Chinese, georgesung Llama-2 Uncensored
GPT-NeoXRedPajama
GPT-J
RWKVRWKV-raven
MiniGPT
GPTBigCodeWizardCoder
ChatGLM
StableLM
Mistral
Phi
\n\n", "sentence": [ [ "model", "support", "mlc", "llm", "support", "wide", "range", "model", "architecture", "variant", ".", "following", "prebuilts", "use", "off-the-shelf", ".", "visit", "[", "prebuilt", "model", "]", "(", "http", ":", "//llm.mlc.ai/docs/prebuilt_models.html", ")", "see", "full", "list", ",", "[", "compile", "model", "via", "mlc", "]", "(", "http", ":", "//llm.mlc.ai/docs/compilation/compile_models.html", ")", "see", "use", "model", "list", ".", "<", "table", "style=", "''", "width:100", "%", "''", ">", "<", "thead", ">", "<", "tr", ">", "<", "th", "style=", "''", "width:40", "%", "''", ">", "architecture", "<", "/th", ">", "<", "th", "style=", "''", "width:60", "%", "''", ">", "prebuilt", "model", "variant", "<", "/th", ">", "<", "/tr", ">", "<", "/thead", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "llama", "<", "/td", ">", "<", "td", ">", "llama-2", ",", "code", "llama", ",", "vicuna", ",", "wizardlm", ",", "wizardmath", ",", "openorca", "platypus2", ",", "flagalpha", "llama-2", "chinese", ",", "georgesung", "llama-2", "uncensored", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-neox", "<", "/td", ">", "<", "td", ">", "redpajama", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-j", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "rwkv", "<", "/td", ">", "<", "td", ">", "rwkv-raven", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "minigpt", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gptbigcode", "<", "/td", ">", "<", "td", ">", "wizardcoder", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "stablelm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "mistral", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "phi", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">" ], [ "model support mlc llm support wide range model architecture variant .", "following prebuilts use off-the-shelf .", "visit [ prebuilt model ] ( http : //llm.mlc.ai/docs/prebuilt_models.html ) see full list , [ compile model via mlc ] ( http : //llm.mlc.ai/docs/compilation/compile_models.html ) see use model list .", "< table style= '' width:100 % '' > < thead > < tr > < th style= '' width:40 % '' > architecture < /th > < th style= '' width:60 % '' > prebuilt model variant < /th > < /tr > < /thead > < tbody > < tr > < td > llama < /td > < td > llama-2 , code llama , vicuna , wizardlm , wizardmath , openorca platypus2 , flagalpha llama-2 chinese , georgesung llama-2 uncensored < /td > < /tr > < tr > < td > gpt-neox < /td > < td > redpajama < /td > < /tr > < tr > < td > gpt-j < /td > < td > < /td > < /tr > < tr > < td > rwkv < /td > < td > rwkv-raven < /td > < /tr > < tr > < td > minigpt < /td > < td > < /td > < /tr > < tr > < td > gptbigcode < /td > < td > wizardcoder < /td > < /tr > < tr > < td > chatglm < /td > < td > < /td > < /tr > < tr > < td > stablelm < /td > < td > < /td > < /tr > < tr > < td > mistral < /td > < td > < /td > < /tr > < tr > < td > phi < /td > < td > < /td > < /tr > < /tbody > < /table >" ] ], "token": [ [ "model", "support", "mlc", "llm", "support", "wide", "range", "model", "architecture", "variant", ".", "following", "prebuilts", "use", "off-the-shelf", ".", "visit", "[", "prebuilt", "model", "]", "(", "http", ":", "//llm.mlc.ai/docs/prebuilt_models.html", ")", "see", "full", "list", ",", "[", "compile", "model", "via", "mlc", "]", "(", "http", ":", "//llm.mlc.ai/docs/compilation/compile_models.html", ")", "see", "use", "model", "list", ".", "<", "table", "style=", "''", "width:100", "%", "''", ">", "<", "thead", ">", "<", "tr", ">", "<", "th", "style=", "''", "width:40", "%", "''", ">", "architecture", "<", "/th", ">", "<", "th", "style=", "''", "width:60", "%", "''", ">", "prebuilt", "model", "variant", "<", "/th", ">", "<", "/tr", ">", "<", "/thead", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "llama", "<", "/td", ">", "<", "td", ">", "llama-2", ",", "code", "llama", ",", "vicuna", ",", "wizardlm", ",", "wizardmath", ",", "openorca", "platypus2", ",", "flagalpha", "llama-2", "chinese", ",", "georgesung", "llama-2", "uncensored", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-neox", "<", "/td", ">", "<", "td", ">", "redpajama", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-j", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "rwkv", "<", "/td", ">", "<", "td", ">", "rwkv-raven", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "minigpt", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gptbigcode", "<", "/td", ">", "<", "td", ">", "wizardcoder", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "stablelm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "mistral", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "phi", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">" ], [ "model support mlc llm support wide range model architecture variant .", "following prebuilts use off-the-shelf .", "visit [ prebuilt model ] ( http : //llm.mlc.ai/docs/prebuilt_models.html ) see full list , [ compile model via mlc ] ( http : //llm.mlc.ai/docs/compilation/compile_models.html ) see use model list .", "< table style= '' width:100 % '' > < thead > < tr > < th style= '' width:40 % '' > architecture < /th > < th style= '' width:60 % '' > prebuilt model variant < /th > < /tr > < /thead > < tbody > < tr > < td > llama < /td > < td > llama-2 , code llama , vicuna , wizardlm , wizardmath , openorca platypus2 , flagalpha llama-2 chinese , georgesung llama-2 uncensored < /td > < /tr > < tr > < td > gpt-neox < /td > < td > redpajama < /td > < /tr > < tr > < td > gpt-j < /td > < td > < /td > < /tr > < tr > < td > rwkv < /td > < td > rwkv-raven < /td > < /tr > < tr > < td > minigpt < /td > < td > < /td > < /tr > < tr > < td > gptbigcode < /td > < td > wizardcoder < /td > < /tr > < tr > < td > chatglm < /td > < td > < /td > < /tr > < tr > < td > stablelm < /td > < td > < /td > < /tr > < tr > < td > mistral < /td > < td > < /td > < /tr > < tr > < td > phi < /td > < td > < /td > < /tr > < /tbody > < /table >" ] ], "level of complexity": -1 }, { "url": "https://github.com/langgenius/dify", "readme_url": "https://raw.githubusercontent.com/langgenius/dify/main/README.md", "topic": [ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ], "text": "Before You Start\n\n**Star us on GitHub, and be instantly notified for new releases!**\n\n![star-us](https://github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f)\n\n- [Website](https://dify.ai)\n- [Docs](https://docs.dify.ai)\n- [Deployment Docs](https://docs.dify.ai/getting-started/install-self-hosted)\n- [FAQ](https://docs.dify.ai/getting-started/faq) \n\n\n", "sentence": [ [ "start", "*", "*", "star", "u", "github", ",", "instantly", "notified", "new", "release", "!", "*", "*", "!", "[", "star-us", "]", "(", "http", ":", "//github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f", ")", "-", "[", "website", "]", "(", "http", ":", "//dify.ai", ")", "-", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai", ")", "-", "[", "deployment", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted", ")", "-", "[", "faq", "]", "(", "http", ":", "//docs.dify.ai/getting-started/faq", ")" ], [ "start * * star u github , instantly notified new release !", "* * !", "[ star-us ] ( http : //github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f ) - [ website ] ( http : //dify.ai ) - [ doc ] ( http : //docs.dify.ai ) - [ deployment doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted ) - [ faq ] ( http : //docs.dify.ai/getting-started/faq )" ] ], "token": [ [ "start", "*", "*", "star", "u", "github", ",", "instantly", "notified", "new", "release", "!", "*", "*", "!", "[", "star-us", "]", "(", "http", ":", "//github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f", ")", "-", "[", "website", "]", "(", "http", ":", "//dify.ai", ")", "-", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai", ")", "-", "[", "deployment", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted", ")", "-", "[", "faq", "]", "(", "http", ":", "//docs.dify.ai/getting-started/faq", ")" ], [ "start * * star u github , instantly notified new release !", "* * !", "[ star-us ] ( http : //github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f ) - [ website ] ( http : //dify.ai ) - [ doc ] ( http : //docs.dify.ai ) - [ deployment doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted ) - [ faq ] ( http : //docs.dify.ai/getting-started/faq )" ] ], "level of complexity": -1 }, { "url": "https://github.com/langgenius/dify", "readme_url": "https://raw.githubusercontent.com/langgenius/dify/main/README.md", "topic": [ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ], "text": "Install the Community Edition\n\n", "sentence": [ [ "install", "community", "edition" ], [ "install community edition" ] ], "token": [ [ "install", "community", "edition" ], [ "install community edition" ] ], "level of complexity": -1 }, { "url": "https://github.com/langgenius/dify", "readme_url": "https://raw.githubusercontent.com/langgenius/dify/main/README.md", "topic": [ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ], "text": "System Requirements\n\nBefore installing Dify, make sure your machine meets the following minimum system requirements:\n\n- CPU >= 2 Core\n- RAM >= 4GB\n\n", "sentence": [ [ "system", "requirement", "installing", "dify", ",", "make", "sure", "machine", "meet", "following", "minimum", "system", "requirement", ":", "-", "cpu", ">", "=", "2", "core", "-", "ram", ">", "=", "4gb" ], [ "system requirement installing dify , make sure machine meet following minimum system requirement : - cpu > = 2 core - ram > = 4gb" ] ], "token": [ [ "system", "requirement", "installing", "dify", ",", "make", "sure", "machine", "meet", "following", "minimum", "system", "requirement", ":", "-", "cpu", ">", "=", "2", "core", "-", "ram", ">", "=", "4gb" ], [ "system requirement installing dify , make sure machine meet following minimum system requirement : - cpu > = 2 core - ram > = 4gb" ] ], "level of complexity": -1 }, { "url": "https://github.com/langgenius/dify", "readme_url": "https://raw.githubusercontent.com/langgenius/dify/main/README.md", "topic": [ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ], "text": "Quick Start\n\nThe easiest way to start the Dify server is to run our [docker-compose.yml](docker/docker-compose.yaml) file. Before running the installation command, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:\n\n```bash\ncd docker\ndocker compose up -d\n```\n\nAfter running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization installation process.\n\n", "sentence": [ [ "quick", "start", "easiest", "way", "start", "dify", "server", "run", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", ".", "running", "installation", "command", ",", "make", "sure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/get-docker/", ")", "[", "docker", "compose", "]", "(", "http", ":", "//docs.docker.com/compose/install/", ")", "installed", "machine", ":", "``", "`", "bash", "cd", "docker", "docker", "compose", "-d", "``", "`", "running", ",", "access", "dify", "dashboard", "browser", "[", "http", ":", "//localhost/install", "]", "(", "http", ":", "//localhost/install", ")", "start", "initialization", "installation", "process", "." ], [ "quick start easiest way start dify server run [ docker-compose.yml ] ( docker/docker-compose.yaml ) file .", "running installation command , make sure [ docker ] ( http : //docs.docker.com/get-docker/ ) [ docker compose ] ( http : //docs.docker.com/compose/install/ ) installed machine : `` ` bash cd docker docker compose -d `` ` running , access dify dashboard browser [ http : //localhost/install ] ( http : //localhost/install ) start initialization installation process ." ] ], "token": [ [ "quick", "start", "easiest", "way", "start", "dify", "server", "run", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", ".", "running", "installation", "command", ",", "make", "sure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/get-docker/", ")", "[", "docker", "compose", "]", "(", "http", ":", "//docs.docker.com/compose/install/", ")", "installed", "machine", ":", "``", "`", "bash", "cd", "docker", "docker", "compose", "-d", "``", "`", "running", ",", "access", "dify", "dashboard", "browser", "[", "http", ":", "//localhost/install", "]", "(", "http", ":", "//localhost/install", ")", "start", "initialization", "installation", "process", "." ], [ "quick start easiest way start dify server run [ docker-compose.yml ] ( docker/docker-compose.yaml ) file .", "running installation command , make sure [ docker ] ( http : //docs.docker.com/get-docker/ ) [ docker compose ] ( http : //docs.docker.com/compose/install/ ) installed machine : `` ` bash cd docker docker compose -d `` ` running , access dify dashboard browser [ http : //localhost/install ] ( http : //localhost/install ) start initialization installation process ." ] ], "level of complexity": 1 }, { "url": "https://github.com/langgenius/dify", "readme_url": "https://raw.githubusercontent.com/langgenius/dify/main/README.md", "topic": [ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ], "text": "Configuration\n\nIf you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run `docker-compose up -d` again. You can see the full list of environment variables in our [docs](https://docs.dify.ai/getting-started/install-self-hosted/environments).\n\n\n", "sentence": [ [ "configuration", "need", "customize", "configuration", ",", "please", "refer", "comment", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", "manually", "set", "environment", "configuration", ".", "making", "change", ",", "please", "run", "`", "docker-compose", "-d", "`", ".", "see", "full", "list", "environment", "variable", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted/environments", ")", "." ], [ "configuration need customize configuration , please refer comment [ docker-compose.yml ] ( docker/docker-compose.yaml ) file manually set environment configuration .", "making change , please run ` docker-compose -d ` .", "see full list environment variable [ doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted/environments ) ." ] ], "token": [ [ "configuration", "need", "customize", "configuration", ",", "please", "refer", "comment", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", "manually", "set", "environment", "configuration", ".", "making", "change", ",", "please", "run", "`", "docker-compose", "-d", "`", ".", "see", "full", "list", "environment", "variable", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted/environments", ")", "." ], [ "configuration need customize configuration , please refer comment [ docker-compose.yml ] ( docker/docker-compose.yaml ) file manually set environment configuration .", "making change , please run ` docker-compose -d ` .", "see full list environment variable [ doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted/environments ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/THUDM/ChatGLM2-6B", "readme_url": "https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md", "topic": [ "chatglm", "chatglm-6b", "large-language-models", "llm" ], "text": "\u73af\u5883\u5b89\u88c5\n\u9996\u5148\u9700\u8981\u4e0b\u8f7d\u672c\u4ed3\u5e93\uff1a\n```shell\ngit clone https://github.com/THUDM/ChatGLM2-6B\ncd ChatGLM2-6B\n```\n\n\u7136\u540e\u4f7f\u7528 pip \u5b89\u88c5\u4f9d\u8d56\uff1a\n```\npip install -r requirements.txt\n```\n\u5176\u4e2d `transformers` \u5e93\u7248\u672c\u63a8\u8350\u4e3a `4.30.2`\uff0c`torch` \u63a8\u8350\u4f7f\u7528 2.0 \u53ca\u4ee5\u4e0a\u7684\u7248\u672c\uff0c\u4ee5\u83b7\u5f97\u6700\u4f73\u7684\u63a8\u7406\u6027\u80fd\u3002\n\n", "sentence": [ [ "\u73af\u5883\u5b89\u88c5", "\u9996\u5148\u9700\u8981\u4e0b\u8f7d\u672c\u4ed3\u5e93\uff1a", "``", "`", "shell", "git", "clone", "http", ":", "//github.com/thudm/chatglm2-6b", "cd", "chatglm2-6b", "``", "`", "\u7136\u540e\u4f7f\u7528", "pip", "\u5b89\u88c5\u4f9d\u8d56\uff1a", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`", "\u5176\u4e2d", "`", "transformer", "`", "\u5e93\u7248\u672c\u63a8\u8350\u4e3a", "`", "4.30.2", "`", "\uff0c", "`", "torch", "`", "\u63a8\u8350\u4f7f\u7528", "2.0", "\u53ca\u4ee5\u4e0a\u7684\u7248\u672c\uff0c\u4ee5\u83b7\u5f97\u6700\u4f73\u7684\u63a8\u7406\u6027\u80fd\u3002" ], [ "\u73af\u5883\u5b89\u88c5 \u9996\u5148\u9700\u8981\u4e0b\u8f7d\u672c\u4ed3\u5e93\uff1a `` ` shell git clone http : //github.com/thudm/chatglm2-6b cd chatglm2-6b `` ` \u7136\u540e\u4f7f\u7528 pip \u5b89\u88c5\u4f9d\u8d56\uff1a `` ` pip install -r requirements.txt `` ` \u5176\u4e2d ` transformer ` \u5e93\u7248\u672c\u63a8\u8350\u4e3a ` 4.30.2 ` \uff0c ` torch ` \u63a8\u8350\u4f7f\u7528 2.0 \u53ca\u4ee5\u4e0a\u7684\u7248\u672c\uff0c\u4ee5\u83b7\u5f97\u6700\u4f73\u7684\u63a8\u7406\u6027\u80fd\u3002" ] ], "token": [ [ "\u73af\u5883\u5b89\u88c5", "\u9996\u5148\u9700\u8981\u4e0b\u8f7d\u672c\u4ed3\u5e93\uff1a", "``", "`", "shell", "git", "clone", "http", ":", "//github.com/thudm/chatglm2-6b", "cd", "chatglm2-6b", "``", "`", "\u7136\u540e\u4f7f\u7528", "pip", "\u5b89\u88c5\u4f9d\u8d56\uff1a", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`", "\u5176\u4e2d", "`", "transformer", "`", "\u5e93\u7248\u672c\u63a8\u8350\u4e3a", "`", "4.30.2", "`", "\uff0c", "`", "torch", "`", "\u63a8\u8350\u4f7f\u7528", "2.0", "\u53ca\u4ee5\u4e0a\u7684\u7248\u672c\uff0c\u4ee5\u83b7\u5f97\u6700\u4f73\u7684\u63a8\u7406\u6027\u80fd\u3002" ], [ "\u73af\u5883\u5b89\u88c5 \u9996\u5148\u9700\u8981\u4e0b\u8f7d\u672c\u4ed3\u5e93\uff1a `` ` shell git clone http : //github.com/thudm/chatglm2-6b cd chatglm2-6b `` ` \u7136\u540e\u4f7f\u7528 pip \u5b89\u88c5\u4f9d\u8d56\uff1a `` ` pip install -r requirements.txt `` ` \u5176\u4e2d ` transformer ` \u5e93\u7248\u672c\u63a8\u8350\u4e3a ` 4.30.2 ` \uff0c ` torch ` \u63a8\u8350\u4f7f\u7528 2.0 \u53ca\u4ee5\u4e0a\u7684\u7248\u672c\uff0c\u4ee5\u83b7\u5f97\u6700\u4f73\u7684\u63a8\u7406\u6027\u80fd\u3002" ] ], "level of complexity": 0 }, { "url": "https://github.com/THUDM/ChatGLM2-6B", "readme_url": "https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md", "topic": [ "chatglm", "chatglm-6b", "large-language-models", "llm" ], "text": "\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b\n\u4ee5\u4e0a\u4ee3\u7801\u4f1a\u7531 `transformers` \u81ea\u52a8\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0\u548c\u53c2\u6570\u3002\u5b8c\u6574\u7684\u6a21\u578b\u5b9e\u73b0\u5728 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm2-6b)\u3002\u5982\u679c\u4f60\u7684\u7f51\u7edc\u73af\u5883\u8f83\u5dee\uff0c\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u53ef\u80fd\u4f1a\u82b1\u8d39\u8f83\u957f\u65f6\u95f4\u751a\u81f3\u5931\u8d25\u3002\u6b64\u65f6\u53ef\u4ee5\u5148\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\uff0c\u7136\u540e\u4ece\u672c\u5730\u52a0\u8f7d\u3002\n\n\u4ece Hugging Face Hub \u4e0b\u8f7d\u6a21\u578b\u9700\u8981\u5148[\u5b89\u88c5Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)\uff0c\u7136\u540e\u8fd0\u884c\n```Shell\ngit clone https://huggingface.co/THUDM/chatglm2-6b\n```\n\n\u5982\u679c\u4f60\u4ece Hugging Face Hub \u4e0a\u4e0b\u8f7d checkpoint \u7684\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u53ea\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0\n```Shell\nGIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/THUDM/chatglm2-6b\n```\n\u7136\u540e\u4ece[\u8fd9\u91cc](https://cloud.tsinghua.edu.cn/d/674208019e314311ab5c/)\u624b\u52a8\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u6587\u4ef6\uff0c\u5e76\u5c06\u4e0b\u8f7d\u7684\u6587\u4ef6\u66ff\u6362\u5230\u672c\u5730\u7684 `chatglm2-6b` \u76ee\u5f55\u4e0b\u3002\n\n\n\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\u4e4b\u540e\uff0c\u5c06\u4ee5\u4e0a\u4ee3\u7801\u4e2d\u7684 `THUDM/chatglm2-6b` \u66ff\u6362\u4e3a\u4f60\u672c\u5730\u7684 `chatglm2-6b` \u6587\u4ef6\u5939\u7684\u8def\u5f84\uff0c\u5373\u53ef\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b\u3002\n\n\u6a21\u578b\u7684\u5b9e\u73b0\u4ecd\u7136\u5904\u5728\u53d8\u52a8\u4e2d\u3002\u5982\u679c\u5e0c\u671b\u56fa\u5b9a\u4f7f\u7528\u7684\u6a21\u578b\u5b9e\u73b0\u4ee5\u4fdd\u8bc1\u517c\u5bb9\u6027\uff0c\u53ef\u4ee5\u5728 `from_pretrained` \u7684\u8c03\u7528\u4e2d\u589e\u52a0 `revision=\"v1.0\"` \u53c2\u6570\u3002`v1.0` \u662f\u5f53\u524d\u6700\u65b0\u7684\u7248\u672c\u53f7\uff0c\u5b8c\u6574\u7684\u7248\u672c\u5217\u8868\u53c2\u89c1 [Change Log](https://huggingface.co/THUDM/chatglm2-6b#change-log)\u3002\n\n", "sentence": [ [ "\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b", "\u4ee5\u4e0a\u4ee3\u7801\u4f1a\u7531", "`", "transformer", "`", "\u81ea\u52a8\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0\u548c\u53c2\u6570\u3002\u5b8c\u6574\u7684\u6a21\u578b\u5b9e\u73b0\u5728", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", ")", "\u3002\u5982\u679c\u4f60\u7684\u7f51\u7edc\u73af\u5883\u8f83\u5dee\uff0c\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u53ef\u80fd\u4f1a\u82b1\u8d39\u8f83\u957f\u65f6\u95f4\u751a\u81f3\u5931\u8d25\u3002\u6b64\u65f6\u53ef\u4ee5\u5148\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\uff0c\u7136\u540e\u4ece\u672c\u5730\u52a0\u8f7d\u3002", "\u4ece", "hugging", "face", "hub", "\u4e0b\u8f7d\u6a21\u578b\u9700\u8981\u5148", "[", "\u5b89\u88c5git", "lf", "]", "(", "http", ":", "//docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage", ")", "\uff0c\u7136\u540e\u8fd0\u884c", "``", "`", "shell", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "\u5982\u679c\u4f60\u4ece", "hugging", "face", "hub", "\u4e0a\u4e0b\u8f7d", "checkpoint", "\u7684\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u53ea\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0", "``", "`", "shell", "git_lfs_skip_smudge=1", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "\u7136\u540e\u4ece", "[", "\u8fd9\u91cc", "]", "(", "http", ":", "//cloud.tsinghua.edu.cn/d/674208019e314311ab5c/", ")", "\u624b\u52a8\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u6587\u4ef6\uff0c\u5e76\u5c06\u4e0b\u8f7d\u7684\u6587\u4ef6\u66ff\u6362\u5230\u672c\u5730\u7684", "`", "chatglm2-6b", "`", "\u76ee\u5f55\u4e0b\u3002", "\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\u4e4b\u540e\uff0c\u5c06\u4ee5\u4e0a\u4ee3\u7801\u4e2d\u7684", "`", "thudm/chatglm2-6b", "`", "\u66ff\u6362\u4e3a\u4f60\u672c\u5730\u7684", "`", "chatglm2-6b", "`", "\u6587\u4ef6\u5939\u7684\u8def\u5f84\uff0c\u5373\u53ef\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b\u3002", "\u6a21\u578b\u7684\u5b9e\u73b0\u4ecd\u7136\u5904\u5728\u53d8\u52a8\u4e2d\u3002\u5982\u679c\u5e0c\u671b\u56fa\u5b9a\u4f7f\u7528\u7684\u6a21\u578b\u5b9e\u73b0\u4ee5\u4fdd\u8bc1\u517c\u5bb9\u6027\uff0c\u53ef\u4ee5\u5728", "`", "from_pretrained", "`", "\u7684\u8c03\u7528\u4e2d\u589e\u52a0", "`", "revision=", "''", "v1.0", "''", "`", "\u53c2\u6570\u3002", "`", "v1.0", "`", "\u662f\u5f53\u524d\u6700\u65b0\u7684\u7248\u672c\u53f7\uff0c\u5b8c\u6574\u7684\u7248\u672c\u5217\u8868\u53c2\u89c1", "[", "change", "log", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "#", "change-log", ")", "\u3002" ], [ "\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b \u4ee5\u4e0a\u4ee3\u7801\u4f1a\u7531 ` transformer ` \u81ea\u52a8\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0\u548c\u53c2\u6570\u3002\u5b8c\u6574\u7684\u6a21\u578b\u5b9e\u73b0\u5728 [ hugging face hub ] ( http : //huggingface.co/thudm/chatglm2-6b ) \u3002\u5982\u679c\u4f60\u7684\u7f51\u7edc\u73af\u5883\u8f83\u5dee\uff0c\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u53ef\u80fd\u4f1a\u82b1\u8d39\u8f83\u957f\u65f6\u95f4\u751a\u81f3\u5931\u8d25\u3002\u6b64\u65f6\u53ef\u4ee5\u5148\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\uff0c\u7136\u540e\u4ece\u672c\u5730\u52a0\u8f7d\u3002 \u4ece hugging face hub \u4e0b\u8f7d\u6a21\u578b\u9700\u8981\u5148 [ \u5b89\u88c5git lf ] ( http : //docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage ) \uff0c\u7136\u540e\u8fd0\u884c `` ` shell git clone http : //huggingface.co/thudm/chatglm2-6b `` ` \u5982\u679c\u4f60\u4ece hugging face hub \u4e0a\u4e0b\u8f7d checkpoint \u7684\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u53ea\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0 `` ` shell git_lfs_skip_smudge=1 git clone http : //huggingface.co/thudm/chatglm2-6b `` ` \u7136\u540e\u4ece [ \u8fd9\u91cc ] ( http : //cloud.tsinghua.edu.cn/d/674208019e314311ab5c/ ) \u624b\u52a8\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u6587\u4ef6\uff0c\u5e76\u5c06\u4e0b\u8f7d\u7684\u6587\u4ef6\u66ff\u6362\u5230\u672c\u5730\u7684 ` chatglm2-6b ` \u76ee\u5f55\u4e0b\u3002 \u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\u4e4b\u540e\uff0c\u5c06\u4ee5\u4e0a\u4ee3\u7801\u4e2d\u7684 ` thudm/chatglm2-6b ` \u66ff\u6362\u4e3a\u4f60\u672c\u5730\u7684 ` chatglm2-6b ` \u6587\u4ef6\u5939\u7684\u8def\u5f84\uff0c\u5373\u53ef\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b\u3002 \u6a21\u578b\u7684\u5b9e\u73b0\u4ecd\u7136\u5904\u5728\u53d8\u52a8\u4e2d\u3002\u5982\u679c\u5e0c\u671b\u56fa\u5b9a\u4f7f\u7528\u7684\u6a21\u578b\u5b9e\u73b0\u4ee5\u4fdd\u8bc1\u517c\u5bb9\u6027\uff0c\u53ef\u4ee5\u5728 ` from_pretrained ` \u7684\u8c03\u7528\u4e2d\u589e\u52a0 ` revision= '' v1.0 '' ` \u53c2\u6570\u3002 ` v1.0 ` \u662f\u5f53\u524d\u6700\u65b0\u7684\u7248\u672c\u53f7\uff0c\u5b8c\u6574\u7684\u7248\u672c\u5217\u8868\u53c2\u89c1 [ change log ] ( http : //huggingface.co/thudm/chatglm2-6b # change-log ) \u3002" ] ], "token": [ [ "\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b", "\u4ee5\u4e0a\u4ee3\u7801\u4f1a\u7531", "`", "transformer", "`", "\u81ea\u52a8\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0\u548c\u53c2\u6570\u3002\u5b8c\u6574\u7684\u6a21\u578b\u5b9e\u73b0\u5728", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", ")", "\u3002\u5982\u679c\u4f60\u7684\u7f51\u7edc\u73af\u5883\u8f83\u5dee\uff0c\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u53ef\u80fd\u4f1a\u82b1\u8d39\u8f83\u957f\u65f6\u95f4\u751a\u81f3\u5931\u8d25\u3002\u6b64\u65f6\u53ef\u4ee5\u5148\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\uff0c\u7136\u540e\u4ece\u672c\u5730\u52a0\u8f7d\u3002", "\u4ece", "hugging", "face", "hub", "\u4e0b\u8f7d\u6a21\u578b\u9700\u8981\u5148", "[", "\u5b89\u88c5git", "lf", "]", "(", "http", ":", "//docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage", ")", "\uff0c\u7136\u540e\u8fd0\u884c", "``", "`", "shell", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "\u5982\u679c\u4f60\u4ece", "hugging", "face", "hub", "\u4e0a\u4e0b\u8f7d", "checkpoint", "\u7684\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u53ea\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0", "``", "`", "shell", "git_lfs_skip_smudge=1", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "\u7136\u540e\u4ece", "[", "\u8fd9\u91cc", "]", "(", "http", ":", "//cloud.tsinghua.edu.cn/d/674208019e314311ab5c/", ")", "\u624b\u52a8\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u6587\u4ef6\uff0c\u5e76\u5c06\u4e0b\u8f7d\u7684\u6587\u4ef6\u66ff\u6362\u5230\u672c\u5730\u7684", "`", "chatglm2-6b", "`", "\u76ee\u5f55\u4e0b\u3002", "\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\u4e4b\u540e\uff0c\u5c06\u4ee5\u4e0a\u4ee3\u7801\u4e2d\u7684", "`", "thudm/chatglm2-6b", "`", "\u66ff\u6362\u4e3a\u4f60\u672c\u5730\u7684", "`", "chatglm2-6b", "`", "\u6587\u4ef6\u5939\u7684\u8def\u5f84\uff0c\u5373\u53ef\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b\u3002", "\u6a21\u578b\u7684\u5b9e\u73b0\u4ecd\u7136\u5904\u5728\u53d8\u52a8\u4e2d\u3002\u5982\u679c\u5e0c\u671b\u56fa\u5b9a\u4f7f\u7528\u7684\u6a21\u578b\u5b9e\u73b0\u4ee5\u4fdd\u8bc1\u517c\u5bb9\u6027\uff0c\u53ef\u4ee5\u5728", "`", "from_pretrained", "`", "\u7684\u8c03\u7528\u4e2d\u589e\u52a0", "`", "revision=", "''", "v1.0", "''", "`", "\u53c2\u6570\u3002", "`", "v1.0", "`", "\u662f\u5f53\u524d\u6700\u65b0\u7684\u7248\u672c\u53f7\uff0c\u5b8c\u6574\u7684\u7248\u672c\u5217\u8868\u53c2\u89c1", "[", "change", "log", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "#", "change-log", ")", "\u3002" ], [ "\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b \u4ee5\u4e0a\u4ee3\u7801\u4f1a\u7531 ` transformer ` \u81ea\u52a8\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0\u548c\u53c2\u6570\u3002\u5b8c\u6574\u7684\u6a21\u578b\u5b9e\u73b0\u5728 [ hugging face hub ] ( http : //huggingface.co/thudm/chatglm2-6b ) \u3002\u5982\u679c\u4f60\u7684\u7f51\u7edc\u73af\u5883\u8f83\u5dee\uff0c\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u53ef\u80fd\u4f1a\u82b1\u8d39\u8f83\u957f\u65f6\u95f4\u751a\u81f3\u5931\u8d25\u3002\u6b64\u65f6\u53ef\u4ee5\u5148\u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\uff0c\u7136\u540e\u4ece\u672c\u5730\u52a0\u8f7d\u3002 \u4ece hugging face hub \u4e0b\u8f7d\u6a21\u578b\u9700\u8981\u5148 [ \u5b89\u88c5git lf ] ( http : //docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage ) \uff0c\u7136\u540e\u8fd0\u884c `` ` shell git clone http : //huggingface.co/thudm/chatglm2-6b `` ` \u5982\u679c\u4f60\u4ece hugging face hub \u4e0a\u4e0b\u8f7d checkpoint \u7684\u901f\u5ea6\u8f83\u6162\uff0c\u53ef\u4ee5\u53ea\u4e0b\u8f7d\u6a21\u578b\u5b9e\u73b0 `` ` shell git_lfs_skip_smudge=1 git clone http : //huggingface.co/thudm/chatglm2-6b `` ` \u7136\u540e\u4ece [ \u8fd9\u91cc ] ( http : //cloud.tsinghua.edu.cn/d/674208019e314311ab5c/ ) \u624b\u52a8\u4e0b\u8f7d\u6a21\u578b\u53c2\u6570\u6587\u4ef6\uff0c\u5e76\u5c06\u4e0b\u8f7d\u7684\u6587\u4ef6\u66ff\u6362\u5230\u672c\u5730\u7684 ` chatglm2-6b ` \u76ee\u5f55\u4e0b\u3002 \u5c06\u6a21\u578b\u4e0b\u8f7d\u5230\u672c\u5730\u4e4b\u540e\uff0c\u5c06\u4ee5\u4e0a\u4ee3\u7801\u4e2d\u7684 ` thudm/chatglm2-6b ` \u66ff\u6362\u4e3a\u4f60\u672c\u5730\u7684 ` chatglm2-6b ` \u6587\u4ef6\u5939\u7684\u8def\u5f84\uff0c\u5373\u53ef\u4ece\u672c\u5730\u52a0\u8f7d\u6a21\u578b\u3002 \u6a21\u578b\u7684\u5b9e\u73b0\u4ecd\u7136\u5904\u5728\u53d8\u52a8\u4e2d\u3002\u5982\u679c\u5e0c\u671b\u56fa\u5b9a\u4f7f\u7528\u7684\u6a21\u578b\u5b9e\u73b0\u4ee5\u4fdd\u8bc1\u517c\u5bb9\u6027\uff0c\u53ef\u4ee5\u5728 ` from_pretrained ` \u7684\u8c03\u7528\u4e2d\u589e\u52a0 ` revision= '' v1.0 '' ` \u53c2\u6570\u3002 ` v1.0 ` \u662f\u5f53\u524d\u6700\u65b0\u7684\u7248\u672c\u53f7\uff0c\u5b8c\u6574\u7684\u7248\u672c\u5217\u8868\u53c2\u89c1 [ change log ] ( http : //huggingface.co/thudm/chatglm2-6b # change-log ) \u3002" ] ], "level of complexity": 2 }, { "url": "https://github.com/THUDM/ChatGLM2-6B", "readme_url": "https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md", "topic": [ "chatglm", "chatglm-6b", "large-language-models", "llm" ], "text": "API \u90e8\u7f72\n\u9996\u5148\u9700\u8981\u5b89\u88c5\u989d\u5916\u7684\u4f9d\u8d56 `pip install fastapi uvicorn`\uff0c\u7136\u540e\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684 [api.py](api.py)\uff1a\n```shell\npython api.py\n```\n\u9ed8\u8ba4\u90e8\u7f72\u5728\u672c\u5730\u7684 8000 \u7aef\u53e3\uff0c\u901a\u8fc7 POST \u65b9\u6cd5\u8fdb\u884c\u8c03\u7528\n```shell\ncurl -X POST \"http://127.0.0.1:8000\" \\\n -H 'Content-Type: application/json' \\\n -d '{\"prompt\": \"\u4f60\u597d\", \"history\": []}'\n```\n\u5f97\u5230\u7684\u8fd4\u56de\u503c\u4e3a\n```shell\n{\n \"response\":\"\u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b ChatGLM2-6B\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002\",\n \"history\":[[\"\u4f60\u597d\",\"\u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b ChatGLM2-6B\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002\"]],\n \"status\":200,\n \"time\":\"2023-03-23 21:38:40\"\n}\n```\n\u611f\u8c22 [@hiyouga]() \u5b9e\u73b0\u4e86 OpenAI \u683c\u5f0f\u7684\u6d41\u5f0f API \u90e8\u7f72\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u4efb\u610f\u57fa\u4e8e ChatGPT \u7684\u5e94\u7528\u7684\u540e\u7aef\uff0c\u6bd4\u5982 [ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web)\u3002\u53ef\u4ee5\u901a\u8fc7\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684[openai_api.py](openai_api.py) \u8fdb\u884c\u90e8\u7f72\uff1a\n```shell\npython openai_api.py\n```\n\u8fdb\u884c API \u8c03\u7528\u7684\u793a\u4f8b\u4ee3\u7801\u4e3a\n```python\nimport openai\nif __name__ == \"__main__\":\n openai.api_base = \"http://localhost:8000/v1\"\n openai.api_key = \"none\"\n for chunk in openai.ChatCompletion.create(\n model=\"chatglm2-6b\",\n messages=[\n {\"role\": \"user\", \"content\": \"\u4f60\u597d\"}\n ],\n stream=True\n ):\n if hasattr(chunk.choices[0].delta, \"content\"):\n print(chunk.choices[0].delta.content, end=\"\", flush=True)\n```\n\n\n", "sentence": [ [ "api", "\u90e8\u7f72", "\u9996\u5148\u9700\u8981\u5b89\u88c5\u989d\u5916\u7684\u4f9d\u8d56", "`", "pip", "install", "fastapi", "uvicorn", "`", "\uff0c\u7136\u540e\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684", "[", "api.py", "]", "(", "api.py", ")", "\uff1a", "``", "`", "shell", "python", "api.py", "``", "`", "\u9ed8\u8ba4\u90e8\u7f72\u5728\u672c\u5730\u7684", "8000", "\u7aef\u53e3\uff0c\u901a\u8fc7", "post", "\u65b9\u6cd5\u8fdb\u884c\u8c03\u7528", "``", "`", "shell", "curl", "-x", "post", "``", "http", ":", "//127.0.0.1:8000", "''", "\\", "-h", "'content-type", ":", "application/json", "'", "\\", "-d", "'", "{", "``", "prompt", "''", ":", "``", "\u4f60\u597d", "''", ",", "``", "history", "''", ":", "[", "]", "}", "'", "``", "`", "\u5f97\u5230\u7684\u8fd4\u56de\u503c\u4e3a", "``", "`", "shell", "{", "``", "response", "''", ":", "''", "\u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b", "chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002", "''", ",", "``", "history", "''", ":", "[", "[", "``", "\u4f60\u597d", "''", ",", "''", "\u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b", "chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002", "''", "]", "]", ",", "``", "status", "''", ":200", ",", "``", "time", "''", ":", "''", "2023-03-23", "21:38:40", "''", "}", "``", "`", "\u611f\u8c22", "[", "@", "hiyouga", "]", "(", ")", "\u5b9e\u73b0\u4e86", "openai", "\u683c\u5f0f\u7684\u6d41\u5f0f", "api", "\u90e8\u7f72\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u4efb\u610f\u57fa\u4e8e", "chatgpt", "\u7684\u5e94\u7528\u7684\u540e\u7aef\uff0c\u6bd4\u5982", "[", "chatgpt-next-web", "]", "(", "http", ":", "//github.com/yidadaa/chatgpt-next-web", ")", "\u3002\u53ef\u4ee5\u901a\u8fc7\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684", "[", "openai_api.py", "]", "(", "openai_api.py", ")", "\u8fdb\u884c\u90e8\u7f72\uff1a", "``", "`", "shell", "python", "openai_api.py", "``", "`", "\u8fdb\u884c", "api", "\u8c03\u7528\u7684\u793a\u4f8b\u4ee3\u7801\u4e3a", "``", "`", "python", "import", "openai", "__name__", "==", "``", "__main__", "''", ":", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''", "chunk", "openai.chatcompletion.create", "(", "model=", "''", "chatglm2-6b", "''", ",", "messages=", "[", "{", "``", "role", "''", ":", "``", "user", "''", ",", "``", "content", "''", ":", "``", "\u4f60\u597d", "''", "}", "]", ",", "stream=true", ")", ":", "hasattr", "(", "chunk.choices", "[", "0", "]", ".delta", ",", "``", "content", "''", ")", ":", "print", "(", "chunk.choices", "[", "0", "]", ".delta.content", ",", "end=", "''", "''", ",", "flush=true", ")", "``", "`" ], [ "api \u90e8\u7f72 \u9996\u5148\u9700\u8981\u5b89\u88c5\u989d\u5916\u7684\u4f9d\u8d56 ` pip install fastapi uvicorn ` \uff0c\u7136\u540e\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684 [ api.py ] ( api.py ) \uff1a `` ` shell python api.py `` ` \u9ed8\u8ba4\u90e8\u7f72\u5728\u672c\u5730\u7684 8000 \u7aef\u53e3\uff0c\u901a\u8fc7 post \u65b9\u6cd5\u8fdb\u884c\u8c03\u7528 `` ` shell curl -x post `` http : //127.0.0.1:8000 '' \\ -h 'content-type : application/json ' \\ -d ' { `` prompt '' : `` \u4f60\u597d '' , `` history '' : [ ] } ' `` ` \u5f97\u5230\u7684\u8fd4\u56de\u503c\u4e3a `` ` shell { `` response '' : '' \u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002 '' , `` history '' : [ [ `` \u4f60\u597d '' , '' \u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002 '' ] ] , `` status '' :200 , `` time '' : '' 2023-03-23 21:38:40 '' } `` ` \u611f\u8c22 [ @ hiyouga ] ( ) \u5b9e\u73b0\u4e86 openai \u683c\u5f0f\u7684\u6d41\u5f0f api \u90e8\u7f72\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u4efb\u610f\u57fa\u4e8e chatgpt \u7684\u5e94\u7528\u7684\u540e\u7aef\uff0c\u6bd4\u5982 [ chatgpt-next-web ] ( http : //github.com/yidadaa/chatgpt-next-web ) \u3002\u53ef\u4ee5\u901a\u8fc7\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684 [ openai_api.py ] ( openai_api.py ) \u8fdb\u884c\u90e8\u7f72\uff1a `` ` shell python openai_api.py `` ` \u8fdb\u884c api \u8c03\u7528\u7684\u793a\u4f8b\u4ee3\u7801\u4e3a `` ` python import openai __name__ == `` __main__ '' : openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none '' chunk openai.chatcompletion.create ( model= '' chatglm2-6b '' , messages= [ { `` role '' : `` user '' , `` content '' : `` \u4f60\u597d '' } ] , stream=true ) : hasattr ( chunk.choices [ 0 ] .delta , `` content '' ) : print ( chunk.choices [ 0 ] .delta.content , end= '' '' , flush=true ) `` `" ] ], "token": [ [ "api", "\u90e8\u7f72", "\u9996\u5148\u9700\u8981\u5b89\u88c5\u989d\u5916\u7684\u4f9d\u8d56", "`", "pip", "install", "fastapi", "uvicorn", "`", "\uff0c\u7136\u540e\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684", "[", "api.py", "]", "(", "api.py", ")", "\uff1a", "``", "`", "shell", "python", "api.py", "``", "`", "\u9ed8\u8ba4\u90e8\u7f72\u5728\u672c\u5730\u7684", "8000", "\u7aef\u53e3\uff0c\u901a\u8fc7", "post", "\u65b9\u6cd5\u8fdb\u884c\u8c03\u7528", "``", "`", "shell", "curl", "-x", "post", "``", "http", ":", "//127.0.0.1:8000", "''", "\\", "-h", "'content-type", ":", "application/json", "'", "\\", "-d", "'", "{", "``", "prompt", "''", ":", "``", "\u4f60\u597d", "''", ",", "``", "history", "''", ":", "[", "]", "}", "'", "``", "`", "\u5f97\u5230\u7684\u8fd4\u56de\u503c\u4e3a", "``", "`", "shell", "{", "``", "response", "''", ":", "''", "\u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b", "chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002", "''", ",", "``", "history", "''", ":", "[", "[", "``", "\u4f60\u597d", "''", ",", "''", "\u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b", "chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002", "''", "]", "]", ",", "``", "status", "''", ":200", ",", "``", "time", "''", ":", "''", "2023-03-23", "21:38:40", "''", "}", "``", "`", "\u611f\u8c22", "[", "@", "hiyouga", "]", "(", ")", "\u5b9e\u73b0\u4e86", "openai", "\u683c\u5f0f\u7684\u6d41\u5f0f", "api", "\u90e8\u7f72\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u4efb\u610f\u57fa\u4e8e", "chatgpt", "\u7684\u5e94\u7528\u7684\u540e\u7aef\uff0c\u6bd4\u5982", "[", "chatgpt-next-web", "]", "(", "http", ":", "//github.com/yidadaa/chatgpt-next-web", ")", "\u3002\u53ef\u4ee5\u901a\u8fc7\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684", "[", "openai_api.py", "]", "(", "openai_api.py", ")", "\u8fdb\u884c\u90e8\u7f72\uff1a", "``", "`", "shell", "python", "openai_api.py", "``", "`", "\u8fdb\u884c", "api", "\u8c03\u7528\u7684\u793a\u4f8b\u4ee3\u7801\u4e3a", "``", "`", "python", "import", "openai", "__name__", "==", "``", "__main__", "''", ":", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''", "chunk", "openai.chatcompletion.create", "(", "model=", "''", "chatglm2-6b", "''", ",", "messages=", "[", "{", "``", "role", "''", ":", "``", "user", "''", ",", "``", "content", "''", ":", "``", "\u4f60\u597d", "''", "}", "]", ",", "stream=true", ")", ":", "hasattr", "(", "chunk.choices", "[", "0", "]", ".delta", ",", "``", "content", "''", ")", ":", "print", "(", "chunk.choices", "[", "0", "]", ".delta.content", ",", "end=", "''", "''", ",", "flush=true", ")", "``", "`" ], [ "api \u90e8\u7f72 \u9996\u5148\u9700\u8981\u5b89\u88c5\u989d\u5916\u7684\u4f9d\u8d56 ` pip install fastapi uvicorn ` \uff0c\u7136\u540e\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684 [ api.py ] ( api.py ) \uff1a `` ` shell python api.py `` ` \u9ed8\u8ba4\u90e8\u7f72\u5728\u672c\u5730\u7684 8000 \u7aef\u53e3\uff0c\u901a\u8fc7 post \u65b9\u6cd5\u8fdb\u884c\u8c03\u7528 `` ` shell curl -x post `` http : //127.0.0.1:8000 '' \\ -h 'content-type : application/json ' \\ -d ' { `` prompt '' : `` \u4f60\u597d '' , `` history '' : [ ] } ' `` ` \u5f97\u5230\u7684\u8fd4\u56de\u503c\u4e3a `` ` shell { `` response '' : '' \u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002 '' , `` history '' : [ [ `` \u4f60\u597d '' , '' \u4f60\u597d\ud83d\udc4b\uff01\u6211\u662f\u4eba\u5de5\u667a\u80fd\u52a9\u624b chatglm2-6b\uff0c\u5f88\u9ad8\u5174\u89c1\u5230\u4f60\uff0c\u6b22\u8fce\u95ee\u6211\u4efb\u4f55\u95ee\u9898\u3002 '' ] ] , `` status '' :200 , `` time '' : '' 2023-03-23 21:38:40 '' } `` ` \u611f\u8c22 [ @ hiyouga ] ( ) \u5b9e\u73b0\u4e86 openai \u683c\u5f0f\u7684\u6d41\u5f0f api \u90e8\u7f72\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u4efb\u610f\u57fa\u4e8e chatgpt \u7684\u5e94\u7528\u7684\u540e\u7aef\uff0c\u6bd4\u5982 [ chatgpt-next-web ] ( http : //github.com/yidadaa/chatgpt-next-web ) \u3002\u53ef\u4ee5\u901a\u8fc7\u8fd0\u884c\u4ed3\u5e93\u4e2d\u7684 [ openai_api.py ] ( openai_api.py ) \u8fdb\u884c\u90e8\u7f72\uff1a `` ` shell python openai_api.py `` ` \u8fdb\u884c api \u8c03\u7528\u7684\u793a\u4f8b\u4ee3\u7801\u4e3a `` ` python import openai __name__ == `` __main__ '' : openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none '' chunk openai.chatcompletion.create ( model= '' chatglm2-6b '' , messages= [ { `` role '' : `` user '' , `` content '' : `` \u4f60\u597d '' } ] , stream=true ) : hasattr ( chunk.choices [ 0 ] .delta , `` content '' ) : print ( chunk.choices [ 0 ] .delta.content , end= '' '' , flush=true ) `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/THUDM/ChatGLM2-6B", "readme_url": "https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md", "topic": [ "chatglm", "chatglm-6b", "large-language-models", "llm" ], "text": "\u591a\u5361\u90e8\u7f72\n\u5982\u679c\u4f60\u6709\u591a\u5f20 GPU\uff0c\u4f46\u662f\u6bcf\u5f20 GPU \u7684\u663e\u5b58\u5927\u5c0f\u90fd\u4e0d\u8db3\u4ee5\u5bb9\u7eb3\u5b8c\u6574\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u53ef\u4ee5\u5c06\u6a21\u578b\u5207\u5206\u5728\u591a\u5f20GPU\u4e0a\u3002\u9996\u5148\u5b89\u88c5 accelerate: `pip install accelerate`\uff0c\u7136\u540e\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u52a0\u8f7d\u6a21\u578b\uff1a\n```python\nfrom utils import load_model_on_gpus\nmodel = load_model_on_gpus(\"THUDM/chatglm2-6b\", num_gpus=2)\n```\n\u5373\u53ef\u5c06\u6a21\u578b\u90e8\u7f72\u5230\u4e24\u5f20 GPU \u4e0a\u8fdb\u884c\u63a8\u7406\u3002\u4f60\u53ef\u4ee5\u5c06 `num_gpus` \u6539\u4e3a\u4f60\u5e0c\u671b\u4f7f\u7528\u7684 GPU \u6570\u3002\u9ed8\u8ba4\u662f\u5747\u5300\u5207\u5206\u7684\uff0c\u4f60\u4e5f\u53ef\u4ee5\u4f20\u5165 `device_map` \u53c2\u6570\u6765\u81ea\u5df1\u6307\u5b9a\u3002 \n\n", "sentence": [ [ "\u591a\u5361\u90e8\u7f72", "\u5982\u679c\u4f60\u6709\u591a\u5f20", "gpu\uff0c\u4f46\u662f\u6bcf\u5f20", "gpu", "\u7684\u663e\u5b58\u5927\u5c0f\u90fd\u4e0d\u8db3\u4ee5\u5bb9\u7eb3\u5b8c\u6574\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u53ef\u4ee5\u5c06\u6a21\u578b\u5207\u5206\u5728\u591a\u5f20gpu\u4e0a\u3002\u9996\u5148\u5b89\u88c5", "accelerate", ":", "`", "pip", "install", "accelerate", "`", "\uff0c\u7136\u540e\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u52a0\u8f7d\u6a21\u578b\uff1a", "``", "`", "python", "utils", "import", "load_model_on_gpus", "model", "=", "load_model_on_gpus", "(", "``", "thudm/chatglm2-6b", "''", ",", "num_gpus=2", ")", "``", "`", "\u5373\u53ef\u5c06\u6a21\u578b\u90e8\u7f72\u5230\u4e24\u5f20", "gpu", "\u4e0a\u8fdb\u884c\u63a8\u7406\u3002\u4f60\u53ef\u4ee5\u5c06", "`", "num_gpus", "`", "\u6539\u4e3a\u4f60\u5e0c\u671b\u4f7f\u7528\u7684", "gpu", "\u6570\u3002\u9ed8\u8ba4\u662f\u5747\u5300\u5207\u5206\u7684\uff0c\u4f60\u4e5f\u53ef\u4ee5\u4f20\u5165", "`", "device_map", "`", "\u53c2\u6570\u6765\u81ea\u5df1\u6307\u5b9a\u3002" ], [ "\u591a\u5361\u90e8\u7f72 \u5982\u679c\u4f60\u6709\u591a\u5f20 gpu\uff0c\u4f46\u662f\u6bcf\u5f20 gpu \u7684\u663e\u5b58\u5927\u5c0f\u90fd\u4e0d\u8db3\u4ee5\u5bb9\u7eb3\u5b8c\u6574\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u53ef\u4ee5\u5c06\u6a21\u578b\u5207\u5206\u5728\u591a\u5f20gpu\u4e0a\u3002\u9996\u5148\u5b89\u88c5 accelerate : ` pip install accelerate ` \uff0c\u7136\u540e\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u52a0\u8f7d\u6a21\u578b\uff1a `` ` python utils import load_model_on_gpus model = load_model_on_gpus ( `` thudm/chatglm2-6b '' , num_gpus=2 ) `` ` \u5373\u53ef\u5c06\u6a21\u578b\u90e8\u7f72\u5230\u4e24\u5f20 gpu \u4e0a\u8fdb\u884c\u63a8\u7406\u3002\u4f60\u53ef\u4ee5\u5c06 ` num_gpus ` \u6539\u4e3a\u4f60\u5e0c\u671b\u4f7f\u7528\u7684 gpu \u6570\u3002\u9ed8\u8ba4\u662f\u5747\u5300\u5207\u5206\u7684\uff0c\u4f60\u4e5f\u53ef\u4ee5\u4f20\u5165 ` device_map ` \u53c2\u6570\u6765\u81ea\u5df1\u6307\u5b9a\u3002" ] ], "token": [ [ "\u591a\u5361\u90e8\u7f72", "\u5982\u679c\u4f60\u6709\u591a\u5f20", "gpu\uff0c\u4f46\u662f\u6bcf\u5f20", "gpu", "\u7684\u663e\u5b58\u5927\u5c0f\u90fd\u4e0d\u8db3\u4ee5\u5bb9\u7eb3\u5b8c\u6574\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u53ef\u4ee5\u5c06\u6a21\u578b\u5207\u5206\u5728\u591a\u5f20gpu\u4e0a\u3002\u9996\u5148\u5b89\u88c5", "accelerate", ":", "`", "pip", "install", "accelerate", "`", "\uff0c\u7136\u540e\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u52a0\u8f7d\u6a21\u578b\uff1a", "``", "`", "python", "utils", "import", "load_model_on_gpus", "model", "=", "load_model_on_gpus", "(", "``", "thudm/chatglm2-6b", "''", ",", "num_gpus=2", ")", "``", "`", "\u5373\u53ef\u5c06\u6a21\u578b\u90e8\u7f72\u5230\u4e24\u5f20", "gpu", "\u4e0a\u8fdb\u884c\u63a8\u7406\u3002\u4f60\u53ef\u4ee5\u5c06", "`", "num_gpus", "`", "\u6539\u4e3a\u4f60\u5e0c\u671b\u4f7f\u7528\u7684", "gpu", "\u6570\u3002\u9ed8\u8ba4\u662f\u5747\u5300\u5207\u5206\u7684\uff0c\u4f60\u4e5f\u53ef\u4ee5\u4f20\u5165", "`", "device_map", "`", "\u53c2\u6570\u6765\u81ea\u5df1\u6307\u5b9a\u3002" ], [ "\u591a\u5361\u90e8\u7f72 \u5982\u679c\u4f60\u6709\u591a\u5f20 gpu\uff0c\u4f46\u662f\u6bcf\u5f20 gpu \u7684\u663e\u5b58\u5927\u5c0f\u90fd\u4e0d\u8db3\u4ee5\u5bb9\u7eb3\u5b8c\u6574\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u53ef\u4ee5\u5c06\u6a21\u578b\u5207\u5206\u5728\u591a\u5f20gpu\u4e0a\u3002\u9996\u5148\u5b89\u88c5 accelerate : ` pip install accelerate ` \uff0c\u7136\u540e\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u52a0\u8f7d\u6a21\u578b\uff1a `` ` python utils import load_model_on_gpus model = load_model_on_gpus ( `` thudm/chatglm2-6b '' , num_gpus=2 ) `` ` \u5373\u53ef\u5c06\u6a21\u578b\u90e8\u7f72\u5230\u4e24\u5f20 gpu \u4e0a\u8fdb\u884c\u63a8\u7406\u3002\u4f60\u53ef\u4ee5\u5c06 ` num_gpus ` \u6539\u4e3a\u4f60\u5e0c\u671b\u4f7f\u7528\u7684 gpu \u6570\u3002\u9ed8\u8ba4\u662f\u5747\u5300\u5207\u5206\u7684\uff0c\u4f60\u4e5f\u53ef\u4ee5\u4f20\u5165 ` device_map ` \u53c2\u6570\u6765\u81ea\u5df1\u6307\u5b9a\u3002" ] ], "level of complexity": 0 }, { "url": "https://github.com/vllm-project/vllm", "readme_url": "https://raw.githubusercontent.com/vllm-project/vllm/main/README.md", "topic": [ "gpt", "inference", "llama", "llm", "llm-serving", "llmops", "mlops", "model-serving", "pytorch", "transformer" ], "text": "About\nvLLM is a fast and easy-to-use library for LLM inference and serving.\n\nvLLM is fast with:\n\n- State-of-the-art serving throughput\n- Efficient management of attention key and value memory with **PagedAttention**\n- Continuous batching of incoming requests\n- Fast model execution with CUDA/HIP graph\n- Quantization: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [SqueezeLLM](https://arxiv.org/abs/2306.07629), FP8 KV Cache\n- Optimized CUDA kernels\n\nvLLM is flexible and easy to use with:\n\n- Seamless integration with popular Hugging Face models\n- High-throughput serving with various decoding algorithms, including *parallel sampling*, *beam search*, and more\n- Tensor parallelism support for distributed inference\n- Streaming outputs\n- OpenAI-compatible API server\n- Support NVIDIA GPUs and AMD GPUs\n- (Experimental) Prefix caching support\n- (Experimental) Multi-lora support\n\nvLLM seamlessly supports many Hugging Face models, including the following architectures:\n\n- Aquila & Aquila2 (`BAAI/AquilaChat2-7B`, `BAAI/AquilaChat2-34B`, `BAAI/Aquila-7B`, `BAAI/AquilaChat-7B`, etc.)\n- Baichuan & Baichuan2 (`baichuan-inc/Baichuan2-13B-Chat`, `baichuan-inc/Baichuan-7B`, etc.)\n- BLOOM (`bigscience/bloom`, `bigscience/bloomz`, etc.)\n- ChatGLM (`THUDM/chatglm2-6b`, `THUDM/chatglm3-6b`, etc.)\n- DeciLM (`Deci/DeciLM-7B`, `Deci/DeciLM-7B-instruct`, etc.)\n- Falcon (`tiiuae/falcon-7b`, `tiiuae/falcon-40b`, `tiiuae/falcon-rw-7b`, etc.)\n- GPT-2 (`gpt2`, `gpt2-xl`, etc.)\n- GPT BigCode (`bigcode/starcoder`, `bigcode/gpt_bigcode-santacoder`, etc.)\n- GPT-J (`EleutherAI/gpt-j-6b`, `nomic-ai/gpt4all-j`, etc.)\n- GPT-NeoX (`EleutherAI/gpt-neox-20b`, `databricks/dolly-v2-12b`, `stabilityai/stablelm-tuned-alpha-7b`, etc.)\n- InternLM (`internlm/internlm-7b`, `internlm/internlm-chat-7b`, etc.)\n- InternLM2 (`internlm/internlm2-7b`, `internlm/internlm2-chat-7b`, etc.)\n- LLaMA & LLaMA-2 (`meta-llama/Llama-2-70b-hf`, `lmsys/vicuna-13b-v1.3`, `young-geng/koala`, `openlm-research/open_llama_13b`, etc.)\n- Mistral (`mistralai/Mistral-7B-v0.1`, `mistralai/Mistral-7B-Instruct-v0.1`, etc.)\n- Mixtral (`mistralai/Mixtral-8x7B-v0.1`, `mistralai/Mixtral-8x7B-Instruct-v0.1`, etc.)\n- MPT (`mosaicml/mpt-7b`, `mosaicml/mpt-30b`, etc.)\n- OPT (`facebook/opt-66b`, `facebook/opt-iml-max-30b`, etc.)\n- Phi (`microsoft/phi-1_5`, `microsoft/phi-2`, etc.)\n- Qwen (`Qwen/Qwen-7B`, `Qwen/Qwen-7B-Chat`, etc.)\n- Qwen2 (`Qwen/Qwen2-7B-beta`, `Qwen/Qwen-7B-Chat-beta`, etc.)\n- StableLM(`stabilityai/stablelm-3b-4e1t`, `stabilityai/stablelm-base-alpha-7b-v2`, etc.)\n- Yi (`01-ai/Yi-6B`, `01-ai/Yi-34B`, etc.)\n\nInstall vLLM with pip or [from source](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source):\n\n```bash\npip install vllm\n```\n\n", "sentence": [ [ "vllm", "fast", "easy-to-use", "library", "llm", "inference", "serving", ".", "vllm", "fast", ":", "-", "state-of-the-art", "serving", "throughput", "-", "efficient", "management", "attention", "key", "value", "memory", "*", "*", "pagedattention", "*", "*", "-", "continuous", "batching", "incoming", "request", "-", "fast", "model", "execution", "cuda/hip", "graph", "-", "quantization", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "[", "awq", "]", "(", "http", ":", "//arxiv.org/abs/2306.00978", ")", ",", "[", "squeezellm", "]", "(", "http", ":", "//arxiv.org/abs/2306.07629", ")", ",", "fp8", "kv", "cache", "-", "optimized", "cuda", "kernel", "vllm", "flexible", "easy", "use", ":", "-", "seamless", "integration", "popular", "hugging", "face", "model", "-", "high-throughput", "serving", "various", "decoding", "algorithm", ",", "including", "*", "parallel", "sampling", "*", ",", "*", "beam", "search", "*", ",", "-", "tensor", "parallelism", "support", "distributed", "inference", "-", "streaming", "output", "-", "openai-compatible", "api", "server", "-", "support", "nvidia", "gpus", "amd", "gpus", "-", "(", "experimental", ")", "prefix", "caching", "support", "-", "(", "experimental", ")", "multi-lora", "support", "vllm", "seamlessly", "support", "many", "hugging", "face", "model", ",", "including", "following", "architecture", ":", "-", "aquila", "&", "aquila2", "(", "`", "baai/aquilachat2-7b", "`", ",", "`", "baai/aquilachat2-34b", "`", ",", "`", "baai/aquila-7b", "`", ",", "`", "baai/aquilachat-7b", "`", ",", "etc", ".", ")", "-", "baichuan", "&", "baichuan2", "(", "`", "baichuan-inc/baichuan2-13b-chat", "`", ",", "`", "baichuan-inc/baichuan-7b", "`", ",", "etc", ".", ")", "-", "bloom", "(", "`", "bigscience/bloom", "`", ",", "`", "bigscience/bloomz", "`", ",", "etc", ".", ")", "-", "chatglm", "(", "`", "thudm/chatglm2-6b", "`", ",", "`", "thudm/chatglm3-6b", "`", ",", "etc", ".", ")", "-", "decilm", "(", "`", "deci/decilm-7b", "`", ",", "`", "deci/decilm-7b-instruct", "`", ",", "etc", ".", ")", "-", "falcon", "(", "`", "tiiuae/falcon-7b", "`", ",", "`", "tiiuae/falcon-40b", "`", ",", "`", "tiiuae/falcon-rw-7b", "`", ",", "etc", ".", ")", "-", "gpt-2", "(", "`", "gpt2", "`", ",", "`", "gpt2-xl", "`", ",", "etc", ".", ")", "-", "gpt", "bigcode", "(", "`", "bigcode/starcoder", "`", ",", "`", "bigcode/gpt_bigcode-santacoder", "`", ",", "etc", ".", ")", "-", "gpt-j", "(", "`", "eleutherai/gpt-j-6b", "`", ",", "`", "nomic-ai/gpt4all-j", "`", ",", "etc", ".", ")", "-", "gpt-neox", "(", "`", "eleutherai/gpt-neox-20b", "`", ",", "`", "databricks/dolly-v2-12b", "`", ",", "`", "stabilityai/stablelm-tuned-alpha-7b", "`", ",", "etc", ".", ")", "-", "internlm", "(", "`", "internlm/internlm-7b", "`", ",", "`", "internlm/internlm-chat-7b", "`", ",", "etc", ".", ")", "-", "internlm2", "(", "`", "internlm/internlm2-7b", "`", ",", "`", "internlm/internlm2-chat-7b", "`", ",", "etc", ".", ")", "-", "llama", "&", "llama-2", "(", "`", "meta-llama/llama-2-70b-hf", "`", ",", "`", "lmsys/vicuna-13b-v1.3", "`", ",", "`", "young-geng/koala", "`", ",", "`", "openlm-research/open_llama_13b", "`", ",", "etc", ".", ")", "-", "mistral", "(", "`", "mistralai/mistral-7b-v0.1", "`", ",", "`", "mistralai/mistral-7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mixtral", "(", "`", "mistralai/mixtral-8x7b-v0.1", "`", ",", "`", "mistralai/mixtral-8x7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mpt", "(", "`", "mosaicml/mpt-7b", "`", ",", "`", "mosaicml/mpt-30b", "`", ",", "etc", ".", ")", "-", "opt", "(", "`", "facebook/opt-66b", "`", ",", "`", "facebook/opt-iml-max-30b", "`", ",", "etc", ".", ")", "-", "phi", "(", "`", "microsoft/phi-1_5", "`", ",", "`", "microsoft/phi-2", "`", ",", "etc", ".", ")", "-", "qwen", "(", "`", "qwen/qwen-7b", "`", ",", "`", "qwen/qwen-7b-chat", "`", ",", "etc", ".", ")", "-", "qwen2", "(", "`", "qwen/qwen2-7b-beta", "`", ",", "`", "qwen/qwen-7b-chat-beta", "`", ",", "etc", ".", ")", "-", "stablelm", "(", "`", "stabilityai/stablelm-3b-4e1t", "`", ",", "`", "stabilityai/stablelm-base-alpha-7b-v2", "`", ",", "etc", ".", ")", "-", "yi", "(", "`", "01-ai/yi-6b", "`", ",", "`", "01-ai/yi-34b", "`", ",", "etc", ".", ")", "install", "vllm", "pip", "[", "source", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", "#", "build-from-source", ")", ":", "``", "`", "bash", "pip", "install", "vllm", "``", "`" ], [ "vllm fast easy-to-use library llm inference serving .", "vllm fast : - state-of-the-art serving throughput - efficient management attention key value memory * * pagedattention * * - continuous batching incoming request - fast model execution cuda/hip graph - quantization : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , [ awq ] ( http : //arxiv.org/abs/2306.00978 ) , [ squeezellm ] ( http : //arxiv.org/abs/2306.07629 ) , fp8 kv cache - optimized cuda kernel vllm flexible easy use : - seamless integration popular hugging face model - high-throughput serving various decoding algorithm , including * parallel sampling * , * beam search * , - tensor parallelism support distributed inference - streaming output - openai-compatible api server - support nvidia gpus amd gpus - ( experimental ) prefix caching support - ( experimental ) multi-lora support vllm seamlessly support many hugging face model , including following architecture : - aquila & aquila2 ( ` baai/aquilachat2-7b ` , ` baai/aquilachat2-34b ` , ` baai/aquila-7b ` , ` baai/aquilachat-7b ` , etc . )", "- baichuan & baichuan2 ( ` baichuan-inc/baichuan2-13b-chat ` , ` baichuan-inc/baichuan-7b ` , etc . )", "- bloom ( ` bigscience/bloom ` , ` bigscience/bloomz ` , etc . )", "- chatglm ( ` thudm/chatglm2-6b ` , ` thudm/chatglm3-6b ` , etc . )", "- decilm ( ` deci/decilm-7b ` , ` deci/decilm-7b-instruct ` , etc . )", "- falcon ( ` tiiuae/falcon-7b ` , ` tiiuae/falcon-40b ` , ` tiiuae/falcon-rw-7b ` , etc . )", "- gpt-2 ( ` gpt2 ` , ` gpt2-xl ` , etc . )", "- gpt bigcode ( ` bigcode/starcoder ` , ` bigcode/gpt_bigcode-santacoder ` , etc . )", "- gpt-j ( ` eleutherai/gpt-j-6b ` , ` nomic-ai/gpt4all-j ` , etc . )", "- gpt-neox ( ` eleutherai/gpt-neox-20b ` , ` databricks/dolly-v2-12b ` , ` stabilityai/stablelm-tuned-alpha-7b ` , etc . )", "- internlm ( ` internlm/internlm-7b ` , ` internlm/internlm-chat-7b ` , etc . )", "- internlm2 ( ` internlm/internlm2-7b ` , ` internlm/internlm2-chat-7b ` , etc . )", "- llama & llama-2 ( ` meta-llama/llama-2-70b-hf ` , ` lmsys/vicuna-13b-v1.3 ` , ` young-geng/koala ` , ` openlm-research/open_llama_13b ` , etc . )", "- mistral ( ` mistralai/mistral-7b-v0.1 ` , ` mistralai/mistral-7b-instruct-v0.1 ` , etc . )", "- mixtral ( ` mistralai/mixtral-8x7b-v0.1 ` , ` mistralai/mixtral-8x7b-instruct-v0.1 ` , etc . )", "- mpt ( ` mosaicml/mpt-7b ` , ` mosaicml/mpt-30b ` , etc . )", "- opt ( ` facebook/opt-66b ` , ` facebook/opt-iml-max-30b ` , etc . )", "- phi ( ` microsoft/phi-1_5 ` , ` microsoft/phi-2 ` , etc . )", "- qwen ( ` qwen/qwen-7b ` , ` qwen/qwen-7b-chat ` , etc . )", "- qwen2 ( ` qwen/qwen2-7b-beta ` , ` qwen/qwen-7b-chat-beta ` , etc . )", "- stablelm ( ` stabilityai/stablelm-3b-4e1t ` , ` stabilityai/stablelm-base-alpha-7b-v2 ` , etc . )", "- yi ( ` 01-ai/yi-6b ` , ` 01-ai/yi-34b ` , etc . )", "install vllm pip [ source ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html # build-from-source ) : `` ` bash pip install vllm `` `" ] ], "token": [ [ "vllm", "fast", "easy-to-use", "library", "llm", "inference", "serving", ".", "vllm", "fast", ":", "-", "state-of-the-art", "serving", "throughput", "-", "efficient", "management", "attention", "key", "value", "memory", "*", "*", "pagedattention", "*", "*", "-", "continuous", "batching", "incoming", "request", "-", "fast", "model", "execution", "cuda/hip", "graph", "-", "quantization", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "[", "awq", "]", "(", "http", ":", "//arxiv.org/abs/2306.00978", ")", ",", "[", "squeezellm", "]", "(", "http", ":", "//arxiv.org/abs/2306.07629", ")", ",", "fp8", "kv", "cache", "-", "optimized", "cuda", "kernel", "vllm", "flexible", "easy", "use", ":", "-", "seamless", "integration", "popular", "hugging", "face", "model", "-", "high-throughput", "serving", "various", "decoding", "algorithm", ",", "including", "*", "parallel", "sampling", "*", ",", "*", "beam", "search", "*", ",", "-", "tensor", "parallelism", "support", "distributed", "inference", "-", "streaming", "output", "-", "openai-compatible", "api", "server", "-", "support", "nvidia", "gpus", "amd", "gpus", "-", "(", "experimental", ")", "prefix", "caching", "support", "-", "(", "experimental", ")", "multi-lora", "support", "vllm", "seamlessly", "support", "many", "hugging", "face", "model", ",", "including", "following", "architecture", ":", "-", "aquila", "&", "aquila2", "(", "`", "baai/aquilachat2-7b", "`", ",", "`", "baai/aquilachat2-34b", "`", ",", "`", "baai/aquila-7b", "`", ",", "`", "baai/aquilachat-7b", "`", ",", "etc", ".", ")", "-", "baichuan", "&", "baichuan2", "(", "`", "baichuan-inc/baichuan2-13b-chat", "`", ",", "`", "baichuan-inc/baichuan-7b", "`", ",", "etc", ".", ")", "-", "bloom", "(", "`", "bigscience/bloom", "`", ",", "`", "bigscience/bloomz", "`", ",", "etc", ".", ")", "-", "chatglm", "(", "`", "thudm/chatglm2-6b", "`", ",", "`", "thudm/chatglm3-6b", "`", ",", "etc", ".", ")", "-", "decilm", "(", "`", "deci/decilm-7b", "`", ",", "`", "deci/decilm-7b-instruct", "`", ",", "etc", ".", ")", "-", "falcon", "(", "`", "tiiuae/falcon-7b", "`", ",", "`", "tiiuae/falcon-40b", "`", ",", "`", "tiiuae/falcon-rw-7b", "`", ",", "etc", ".", ")", "-", "gpt-2", "(", "`", "gpt2", "`", ",", "`", "gpt2-xl", "`", ",", "etc", ".", ")", "-", "gpt", "bigcode", "(", "`", "bigcode/starcoder", "`", ",", "`", "bigcode/gpt_bigcode-santacoder", "`", ",", "etc", ".", ")", "-", "gpt-j", "(", "`", "eleutherai/gpt-j-6b", "`", ",", "`", "nomic-ai/gpt4all-j", "`", ",", "etc", ".", ")", "-", "gpt-neox", "(", "`", "eleutherai/gpt-neox-20b", "`", ",", "`", "databricks/dolly-v2-12b", "`", ",", "`", "stabilityai/stablelm-tuned-alpha-7b", "`", ",", "etc", ".", ")", "-", "internlm", "(", "`", "internlm/internlm-7b", "`", ",", "`", "internlm/internlm-chat-7b", "`", ",", "etc", ".", ")", "-", "internlm2", "(", "`", "internlm/internlm2-7b", "`", ",", "`", "internlm/internlm2-chat-7b", "`", ",", "etc", ".", ")", "-", "llama", "&", "llama-2", "(", "`", "meta-llama/llama-2-70b-hf", "`", ",", "`", "lmsys/vicuna-13b-v1.3", "`", ",", "`", "young-geng/koala", "`", ",", "`", "openlm-research/open_llama_13b", "`", ",", "etc", ".", ")", "-", "mistral", "(", "`", "mistralai/mistral-7b-v0.1", "`", ",", "`", "mistralai/mistral-7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mixtral", "(", "`", "mistralai/mixtral-8x7b-v0.1", "`", ",", "`", "mistralai/mixtral-8x7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mpt", "(", "`", "mosaicml/mpt-7b", "`", ",", "`", "mosaicml/mpt-30b", "`", ",", "etc", ".", ")", "-", "opt", "(", "`", "facebook/opt-66b", "`", ",", "`", "facebook/opt-iml-max-30b", "`", ",", "etc", ".", ")", "-", "phi", "(", "`", "microsoft/phi-1_5", "`", ",", "`", "microsoft/phi-2", "`", ",", "etc", ".", ")", "-", "qwen", "(", "`", "qwen/qwen-7b", "`", ",", "`", "qwen/qwen-7b-chat", "`", ",", "etc", ".", ")", "-", "qwen2", "(", "`", "qwen/qwen2-7b-beta", "`", ",", "`", "qwen/qwen-7b-chat-beta", "`", ",", "etc", ".", ")", "-", "stablelm", "(", "`", "stabilityai/stablelm-3b-4e1t", "`", ",", "`", "stabilityai/stablelm-base-alpha-7b-v2", "`", ",", "etc", ".", ")", "-", "yi", "(", "`", "01-ai/yi-6b", "`", ",", "`", "01-ai/yi-34b", "`", ",", "etc", ".", ")", "install", "vllm", "pip", "[", "source", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", "#", "build-from-source", ")", ":", "``", "`", "bash", "pip", "install", "vllm", "``", "`" ], [ "vllm fast easy-to-use library llm inference serving .", "vllm fast : - state-of-the-art serving throughput - efficient management attention key value memory * * pagedattention * * - continuous batching incoming request - fast model execution cuda/hip graph - quantization : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , [ awq ] ( http : //arxiv.org/abs/2306.00978 ) , [ squeezellm ] ( http : //arxiv.org/abs/2306.07629 ) , fp8 kv cache - optimized cuda kernel vllm flexible easy use : - seamless integration popular hugging face model - high-throughput serving various decoding algorithm , including * parallel sampling * , * beam search * , - tensor parallelism support distributed inference - streaming output - openai-compatible api server - support nvidia gpus amd gpus - ( experimental ) prefix caching support - ( experimental ) multi-lora support vllm seamlessly support many hugging face model , including following architecture : - aquila & aquila2 ( ` baai/aquilachat2-7b ` , ` baai/aquilachat2-34b ` , ` baai/aquila-7b ` , ` baai/aquilachat-7b ` , etc . )", "- baichuan & baichuan2 ( ` baichuan-inc/baichuan2-13b-chat ` , ` baichuan-inc/baichuan-7b ` , etc . )", "- bloom ( ` bigscience/bloom ` , ` bigscience/bloomz ` , etc . )", "- chatglm ( ` thudm/chatglm2-6b ` , ` thudm/chatglm3-6b ` , etc . )", "- decilm ( ` deci/decilm-7b ` , ` deci/decilm-7b-instruct ` , etc . )", "- falcon ( ` tiiuae/falcon-7b ` , ` tiiuae/falcon-40b ` , ` tiiuae/falcon-rw-7b ` , etc . )", "- gpt-2 ( ` gpt2 ` , ` gpt2-xl ` , etc . )", "- gpt bigcode ( ` bigcode/starcoder ` , ` bigcode/gpt_bigcode-santacoder ` , etc . )", "- gpt-j ( ` eleutherai/gpt-j-6b ` , ` nomic-ai/gpt4all-j ` , etc . )", "- gpt-neox ( ` eleutherai/gpt-neox-20b ` , ` databricks/dolly-v2-12b ` , ` stabilityai/stablelm-tuned-alpha-7b ` , etc . )", "- internlm ( ` internlm/internlm-7b ` , ` internlm/internlm-chat-7b ` , etc . )", "- internlm2 ( ` internlm/internlm2-7b ` , ` internlm/internlm2-chat-7b ` , etc . )", "- llama & llama-2 ( ` meta-llama/llama-2-70b-hf ` , ` lmsys/vicuna-13b-v1.3 ` , ` young-geng/koala ` , ` openlm-research/open_llama_13b ` , etc . )", "- mistral ( ` mistralai/mistral-7b-v0.1 ` , ` mistralai/mistral-7b-instruct-v0.1 ` , etc . )", "- mixtral ( ` mistralai/mixtral-8x7b-v0.1 ` , ` mistralai/mixtral-8x7b-instruct-v0.1 ` , etc . )", "- mpt ( ` mosaicml/mpt-7b ` , ` mosaicml/mpt-30b ` , etc . )", "- opt ( ` facebook/opt-66b ` , ` facebook/opt-iml-max-30b ` , etc . )", "- phi ( ` microsoft/phi-1_5 ` , ` microsoft/phi-2 ` , etc . )", "- qwen ( ` qwen/qwen-7b ` , ` qwen/qwen-7b-chat ` , etc . )", "- qwen2 ( ` qwen/qwen2-7b-beta ` , ` qwen/qwen-7b-chat-beta ` , etc . )", "- stablelm ( ` stabilityai/stablelm-3b-4e1t ` , ` stabilityai/stablelm-base-alpha-7b-v2 ` , etc . )", "- yi ( ` 01-ai/yi-6b ` , ` 01-ai/yi-34b ` , etc . )", "install vllm pip [ source ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html # build-from-source ) : `` ` bash pip install vllm `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/vllm-project/vllm", "readme_url": "https://raw.githubusercontent.com/vllm-project/vllm/main/README.md", "topic": [ "gpt", "inference", "llama", "llm", "llm-serving", "llmops", "mlops", "model-serving", "pytorch", "transformer" ], "text": "Getting Started\n\nVisit our [documentation](https://vllm.readthedocs.io/en/latest/) to get started.\n- [Installation](https://vllm.readthedocs.io/en/latest/getting_started/installation.html)\n- [Quickstart](https://vllm.readthedocs.io/en/latest/getting_started/quickstart.html)\n- [Supported Models](https://vllm.readthedocs.io/en/latest/models/supported_models.html)\n\n", "sentence": [ [ "getting", "started", "visit", "[", "documentation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/", ")", "get", "started", ".", "-", "[", "installation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", ")", "-", "[", "quickstart", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/quickstart.html", ")", "-", "[", "supported", "model", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/models/supported_models.html", ")" ], [ "getting started visit [ documentation ] ( http : //vllm.readthedocs.io/en/latest/ ) get started .", "- [ installation ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html ) - [ quickstart ] ( http : //vllm.readthedocs.io/en/latest/getting_started/quickstart.html ) - [ supported model ] ( http : //vllm.readthedocs.io/en/latest/models/supported_models.html )" ] ], "token": [ [ "getting", "started", "visit", "[", "documentation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/", ")", "get", "started", ".", "-", "[", "installation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", ")", "-", "[", "quickstart", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/quickstart.html", ")", "-", "[", "supported", "model", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/models/supported_models.html", ")" ], [ "getting started visit [ documentation ] ( http : //vllm.readthedocs.io/en/latest/ ) get started .", "- [ installation ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html ) - [ quickstart ] ( http : //vllm.readthedocs.io/en/latest/getting_started/quickstart.html ) - [ supported model ] ( http : //vllm.readthedocs.io/en/latest/models/supported_models.html )" ] ], "level of complexity": -1 }, { "url": "https://github.com/vllm-project/vllm", "readme_url": "https://raw.githubusercontent.com/vllm-project/vllm/main/README.md", "topic": [ "gpt", "inference", "llama", "llm", "llm-serving", "llmops", "mlops", "model-serving", "pytorch", "transformer" ], "text": "Contributing\n\nWe welcome and value any contributions and collaborations.\nPlease check out [CONTRIBUTING.md](./CONTRIBUTING.md) for how to get involved.\n\n", "sentence": [ [ "contributing", "welcome", "value", "contribution", "collaboration", ".", "please", "check", "[", "contributing.md", "]", "(", "./contributing.md", ")", "get", "involved", "." ], [ "contributing welcome value contribution collaboration .", "please check [ contributing.md ] ( ./contributing.md ) get involved ." ] ], "token": [ [ "contributing", "welcome", "value", "contribution", "collaboration", ".", "please", "check", "[", "contributing.md", "]", "(", "./contributing.md", ")", "get", "involved", "." ], [ "contributing welcome value contribution collaboration .", "please check [ contributing.md ] ( ./contributing.md ) get involved ." ] ], "level of complexity": -1 }, { "url": "https://github.com/huggingface/peft", "readme_url": "https://raw.githubusercontent.com/huggingface/peft/main/README.md", "topic": [ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ], "text": "\n\n

\ud83e\udd17 PEFT

\n

\n

State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods

\n

\n\nFine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models.\n\nPEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models.\n\n> [!TIP]\n> Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the \"Watch repos\" button on the organization page to be notified of newly implemented methods and notebooks!\n\nCheck the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work.\n\n", "sentence": [ [ "<", "!", "--", "-", "copyright", "2023", "huggingface", "team", ".", "right", "reserved", ".", "licensed", "apache", "license", ",", "version", "2.0", "(", "``", "license", "''", ")", ";", "may", "use", "file", "except", "compliance", "license", ".", "may", "obtain", "copy", "license", "http", ":", "//www.apache.org/licenses/license-2.0", "unless", "required", "applicable", "law", "agreed", "writing", ",", "software", "distributed", "license", "distributed", "``", "''", "basis", ",", "without", "warranty", "condition", "kind", ",", "either", "express", "implied", ".", "see", "license", "specific", "language", "governing", "permission", "limitation", "license", ".", "--", ">", "<", "h1", "align=", "''", "center", "''", ">", "<", "p", ">", "\ud83e\udd17", "peft", "<", "/p", ">", "<", "/h1", ">", "<", "h3", "align=", "''", "center", "''", ">", "<", "p", ">", "state-of-the-art", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "<", "/p", ">", "<", "/h3", ">", "fine-tuning", "large", "pretrained", "model", "often", "prohibitively", "costly", "due", "scale", ".", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "enable", "efficient", "adaptation", "large", "pretrained", "model", "various", "downstream", "application", "fine-tuning", "small", "number", "(", "extra", ")", "model", "parameter", "instead", "model", "'s", "parameter", ".", "significantly", "decrease", "computational", "storage", "cost", ".", "recent", "state-of-the-art", "peft", "technique", "achieve", "performance", "comparable", "fully", "fine-tuned", "model", ".", "peft", "integrated", "transformer", "easy", "model", "training", "inference", ",", "diffuser", "conveniently", "managing", "different", "adapter", ",", "accelerate", "distributed", "training", "inference", "really", "big", "model", ".", ">", "[", "!", "tip", "]", ">", "visit", "[", "peft", "]", "(", "http", ":", "//huggingface.co/peft", ")", "organization", "read", "peft", "method", "implemented", "library", "see", "notebook", "demonstrating", "apply", "method", "variety", "downstream", "task", ".", "click", "``", "watch", "repos", "''", "button", "organization", "page", "notified", "newly", "implemented", "method", "notebook", "!", "check", "peft", "adapter", "api", "reference", "section", "list", "supported", "peft", "method", ",", "read", "[", "adapter", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/adapter", ")", ",", "[", "soft", "prompt", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/prompting", ")", ",", "[", "ia3", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/ia3", ")", "conceptual", "guide", "learn", "method", "work", "." ], [ "< ! -- - copyright 2023 huggingface team .", "right reserved .", "licensed apache license , version 2.0 ( `` license '' ) ; may use file except compliance license .", "may obtain copy license http : //www.apache.org/licenses/license-2.0 unless required applicable law agreed writing , software distributed license distributed `` '' basis , without warranty condition kind , either express implied .", "see license specific language governing permission limitation license .", "-- > < h1 align= '' center '' > < p > \ud83e\udd17 peft < /p > < /h1 > < h3 align= '' center '' > < p > state-of-the-art parameter-efficient fine-tuning ( peft ) method < /p > < /h3 > fine-tuning large pretrained model often prohibitively costly due scale .", "parameter-efficient fine-tuning ( peft ) method enable efficient adaptation large pretrained model various downstream application fine-tuning small number ( extra ) model parameter instead model 's parameter .", "significantly decrease computational storage cost .", "recent state-of-the-art peft technique achieve performance comparable fully fine-tuned model .", "peft integrated transformer easy model training inference , diffuser conveniently managing different adapter , accelerate distributed training inference really big model .", "> [ ! tip ] > visit [ peft ] ( http : //huggingface.co/peft ) organization read peft method implemented library see notebook demonstrating apply method variety downstream task .", "click `` watch repos '' button organization page notified newly implemented method notebook !", "check peft adapter api reference section list supported peft method , read [ adapter ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/adapter ) , [ soft prompt ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/prompting ) , [ ia3 ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/ia3 ) conceptual guide learn method work ." ] ], "token": [ [ "<", "!", "--", "-", "copyright", "2023", "huggingface", "team", ".", "right", "reserved", ".", "licensed", "apache", "license", ",", "version", "2.0", "(", "``", "license", "''", ")", ";", "may", "use", "file", "except", "compliance", "license", ".", "may", "obtain", "copy", "license", "http", ":", "//www.apache.org/licenses/license-2.0", "unless", "required", "applicable", "law", "agreed", "writing", ",", "software", "distributed", "license", "distributed", "``", "''", "basis", ",", "without", "warranty", "condition", "kind", ",", "either", "express", "implied", ".", "see", "license", "specific", "language", "governing", "permission", "limitation", "license", ".", "--", ">", "<", "h1", "align=", "''", "center", "''", ">", "<", "p", ">", "\ud83e\udd17", "peft", "<", "/p", ">", "<", "/h1", ">", "<", "h3", "align=", "''", "center", "''", ">", "<", "p", ">", "state-of-the-art", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "<", "/p", ">", "<", "/h3", ">", "fine-tuning", "large", "pretrained", "model", "often", "prohibitively", "costly", "due", "scale", ".", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "enable", "efficient", "adaptation", "large", "pretrained", "model", "various", "downstream", "application", "fine-tuning", "small", "number", "(", "extra", ")", "model", "parameter", "instead", "model", "'s", "parameter", ".", "significantly", "decrease", "computational", "storage", "cost", ".", "recent", "state-of-the-art", "peft", "technique", "achieve", "performance", "comparable", "fully", "fine-tuned", "model", ".", "peft", "integrated", "transformer", "easy", "model", "training", "inference", ",", "diffuser", "conveniently", "managing", "different", "adapter", ",", "accelerate", "distributed", "training", "inference", "really", "big", "model", ".", ">", "[", "!", "tip", "]", ">", "visit", "[", "peft", "]", "(", "http", ":", "//huggingface.co/peft", ")", "organization", "read", "peft", "method", "implemented", "library", "see", "notebook", "demonstrating", "apply", "method", "variety", "downstream", "task", ".", "click", "``", "watch", "repos", "''", "button", "organization", "page", "notified", "newly", "implemented", "method", "notebook", "!", "check", "peft", "adapter", "api", "reference", "section", "list", "supported", "peft", "method", ",", "read", "[", "adapter", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/adapter", ")", ",", "[", "soft", "prompt", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/prompting", ")", ",", "[", "ia3", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/ia3", ")", "conceptual", "guide", "learn", "method", "work", "." ], [ "< ! -- - copyright 2023 huggingface team .", "right reserved .", "licensed apache license , version 2.0 ( `` license '' ) ; may use file except compliance license .", "may obtain copy license http : //www.apache.org/licenses/license-2.0 unless required applicable law agreed writing , software distributed license distributed `` '' basis , without warranty condition kind , either express implied .", "see license specific language governing permission limitation license .", "-- > < h1 align= '' center '' > < p > \ud83e\udd17 peft < /p > < /h1 > < h3 align= '' center '' > < p > state-of-the-art parameter-efficient fine-tuning ( peft ) method < /p > < /h3 > fine-tuning large pretrained model often prohibitively costly due scale .", "parameter-efficient fine-tuning ( peft ) method enable efficient adaptation large pretrained model various downstream application fine-tuning small number ( extra ) model parameter instead model 's parameter .", "significantly decrease computational storage cost .", "recent state-of-the-art peft technique achieve performance comparable fully fine-tuned model .", "peft integrated transformer easy model training inference , diffuser conveniently managing different adapter , accelerate distributed training inference really big model .", "> [ ! tip ] > visit [ peft ] ( http : //huggingface.co/peft ) organization read peft method implemented library see notebook demonstrating apply method variety downstream task .", "click `` watch repos '' button organization page notified newly implemented method notebook !", "check peft adapter api reference section list supported peft method , read [ adapter ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/adapter ) , [ soft prompt ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/prompting ) , [ ia3 ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/ia3 ) conceptual guide learn method work ." ] ], "level of complexity": -1 }, { "url": "https://github.com/huggingface/peft", "readme_url": "https://raw.githubusercontent.com/huggingface/peft/main/README.md", "topic": [ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ], "text": "Quickstart\n\nInstall PEFT from pip:\n\n```bash\npip install peft\n```\n\nPrepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters!\n\n```python\nfrom transformers import AutoModelForSeq2SeqLM\nfrom peft import get_peft_config, get_peft_model, LoraConfig, TaskType\nmodel_name_or_path = \"bigscience/mt0-large\"\ntokenizer_name_or_path = \"bigscience/mt0-large\"\n\npeft_config = LoraConfig(\n task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1\n)\n\nmodel = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)\nmodel = get_peft_model(model, peft_config)\nmodel.print_trainable_parameters()\n\"trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282\"\n```\n\nTo load a PEFT model for inference:\n\n```py\nfrom peft import AutoPeftModelForCausalLM\nfrom transformers import AutoTokenizer\nimport torch\n\nmodel = AutoPeftModelForCausalLM.from_pretrained(\"ybelkada/opt-350m-lora\").to(\"cuda\")\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n\nmodel.eval()\ninputs = tokenizer(\"Preheat the oven to 350 degrees and place the cookie dough\", return_tensors=\"pt\")\n\noutputs = model.generate(input_ids=inputs[\"input_ids\"].to(\"cuda\"), max_new_tokens=50)\nprint(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])\n\n\"Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla.\"\n```\n\n", "sentence": [ [ "quickstart", "install", "peft", "pip", ":", "``", "`", "bash", "pip", "install", "peft", "``", "`", "prepare", "model", "training", "peft", "method", "lora", "wrapping", "base", "model", "peft", "configuration", "`", "get_peft_model", "`", ".", "bigscience/mt0-large", "model", ",", "'re", "training", "0.19", "%", "parameter", "!", "``", "`", "python", "transformer", "import", "automodelforseq2seqlm", "peft", "import", "get_peft_config", ",", "get_peft_model", ",", "loraconfig", ",", "tasktype", "model_name_or_path", "=", "``", "bigscience/mt0-large", "''", "tokenizer_name_or_path", "=", "``", "bigscience/mt0-large", "''", "peft_config", "=", "loraconfig", "(", "task_type=tasktype.seq_2_seq_lm", ",", "inference_mode=false", ",", "r=8", ",", "lora_alpha=32", ",", "lora_dropout=0.1", ")", "model", "=", "automodelforseq2seqlm.from_pretrained", "(", "model_name_or_path", ")", "model", "=", "get_peft_model", "(", "model", ",", "peft_config", ")", "model.print_trainable_parameters", "(", ")", "''", "trainable", "params", ":", "2359296", "||", "params", ":", "1231940608", "||", "trainable", "%", ":", "0.19151053100118282", "''", "``", "`", "load", "peft", "model", "inference", ":", "``", "`", "py", "peft", "import", "autopeftmodelforcausallm", "transformer", "import", "autotokenizer", "import", "torch", "model", "=", "autopeftmodelforcausallm.from_pretrained", "(", "``", "ybelkada/opt-350m-lora", "''", ")", ".to", "(", "``", "cuda", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "``", "facebook/opt-350m", "''", ")", "model.eval", "(", ")", "input", "=", "tokenizer", "(", "``", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "''", ",", "return_tensors=", "''", "pt", "''", ")", "output", "=", "model.generate", "(", "input_ids=inputs", "[", "``", "input_ids", "''", "]", ".to", "(", "``", "cuda", "''", ")", ",", "max_new_tokens=50", ")", "print", "(", "tokenizer.batch_decode", "(", "output", ",", "skip_special_tokens=true", ")", "[", "0", "]", ")", "''", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "center", "oven", ".", "large", "bowl", ",", "combine", "flour", ",", "baking", "powder", ",", "baking", "soda", ",", "salt", ",", "cinnamon", ".", "separate", "bowl", ",", "combine", "egg", "yolk", ",", "sugar", ",", "vanilla", ".", "''", "``", "`" ], [ "quickstart install peft pip : `` ` bash pip install peft `` ` prepare model training peft method lora wrapping base model peft configuration ` get_peft_model ` .", "bigscience/mt0-large model , 're training 0.19 % parameter !", "`` ` python transformer import automodelforseq2seqlm peft import get_peft_config , get_peft_model , loraconfig , tasktype model_name_or_path = `` bigscience/mt0-large '' tokenizer_name_or_path = `` bigscience/mt0-large '' peft_config = loraconfig ( task_type=tasktype.seq_2_seq_lm , inference_mode=false , r=8 , lora_alpha=32 , lora_dropout=0.1 ) model = automodelforseq2seqlm.from_pretrained ( model_name_or_path ) model = get_peft_model ( model , peft_config ) model.print_trainable_parameters ( ) '' trainable params : 2359296 || params : 1231940608 || trainable % : 0.19151053100118282 '' `` ` load peft model inference : `` ` py peft import autopeftmodelforcausallm transformer import autotokenizer import torch model = autopeftmodelforcausallm.from_pretrained ( `` ybelkada/opt-350m-lora '' ) .to ( `` cuda '' ) tokenizer = autotokenizer.from_pretrained ( `` facebook/opt-350m '' ) model.eval ( ) input = tokenizer ( `` preheat oven 350 degree place cookie dough '' , return_tensors= '' pt '' ) output = model.generate ( input_ids=inputs [ `` input_ids '' ] .to ( `` cuda '' ) , max_new_tokens=50 ) print ( tokenizer.batch_decode ( output , skip_special_tokens=true ) [ 0 ] ) '' preheat oven 350 degree place cookie dough center oven .", "large bowl , combine flour , baking powder , baking soda , salt , cinnamon .", "separate bowl , combine egg yolk , sugar , vanilla . ''", "`` `" ] ], "token": [ [ "quickstart", "install", "peft", "pip", ":", "``", "`", "bash", "pip", "install", "peft", "``", "`", "prepare", "model", "training", "peft", "method", "lora", "wrapping", "base", "model", "peft", "configuration", "`", "get_peft_model", "`", ".", "bigscience/mt0-large", "model", ",", "'re", "training", "0.19", "%", "parameter", "!", "``", "`", "python", "transformer", "import", "automodelforseq2seqlm", "peft", "import", "get_peft_config", ",", "get_peft_model", ",", "loraconfig", ",", "tasktype", "model_name_or_path", "=", "``", "bigscience/mt0-large", "''", "tokenizer_name_or_path", "=", "``", "bigscience/mt0-large", "''", "peft_config", "=", "loraconfig", "(", "task_type=tasktype.seq_2_seq_lm", ",", "inference_mode=false", ",", "r=8", ",", "lora_alpha=32", ",", "lora_dropout=0.1", ")", "model", "=", "automodelforseq2seqlm.from_pretrained", "(", "model_name_or_path", ")", "model", "=", "get_peft_model", "(", "model", ",", "peft_config", ")", "model.print_trainable_parameters", "(", ")", "''", "trainable", "params", ":", "2359296", "||", "params", ":", "1231940608", "||", "trainable", "%", ":", "0.19151053100118282", "''", "``", "`", "load", "peft", "model", "inference", ":", "``", "`", "py", "peft", "import", "autopeftmodelforcausallm", "transformer", "import", "autotokenizer", "import", "torch", "model", "=", "autopeftmodelforcausallm.from_pretrained", "(", "``", "ybelkada/opt-350m-lora", "''", ")", ".to", "(", "``", "cuda", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "``", "facebook/opt-350m", "''", ")", "model.eval", "(", ")", "input", "=", "tokenizer", "(", "``", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "''", ",", "return_tensors=", "''", "pt", "''", ")", "output", "=", "model.generate", "(", "input_ids=inputs", "[", "``", "input_ids", "''", "]", ".to", "(", "``", "cuda", "''", ")", ",", "max_new_tokens=50", ")", "print", "(", "tokenizer.batch_decode", "(", "output", ",", "skip_special_tokens=true", ")", "[", "0", "]", ")", "''", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "center", "oven", ".", "large", "bowl", ",", "combine", "flour", ",", "baking", "powder", ",", "baking", "soda", ",", "salt", ",", "cinnamon", ".", "separate", "bowl", ",", "combine", "egg", "yolk", ",", "sugar", ",", "vanilla", ".", "''", "``", "`" ], [ "quickstart install peft pip : `` ` bash pip install peft `` ` prepare model training peft method lora wrapping base model peft configuration ` get_peft_model ` .", "bigscience/mt0-large model , 're training 0.19 % parameter !", "`` ` python transformer import automodelforseq2seqlm peft import get_peft_config , get_peft_model , loraconfig , tasktype model_name_or_path = `` bigscience/mt0-large '' tokenizer_name_or_path = `` bigscience/mt0-large '' peft_config = loraconfig ( task_type=tasktype.seq_2_seq_lm , inference_mode=false , r=8 , lora_alpha=32 , lora_dropout=0.1 ) model = automodelforseq2seqlm.from_pretrained ( model_name_or_path ) model = get_peft_model ( model , peft_config ) model.print_trainable_parameters ( ) '' trainable params : 2359296 || params : 1231940608 || trainable % : 0.19151053100118282 '' `` ` load peft model inference : `` ` py peft import autopeftmodelforcausallm transformer import autotokenizer import torch model = autopeftmodelforcausallm.from_pretrained ( `` ybelkada/opt-350m-lora '' ) .to ( `` cuda '' ) tokenizer = autotokenizer.from_pretrained ( `` facebook/opt-350m '' ) model.eval ( ) input = tokenizer ( `` preheat oven 350 degree place cookie dough '' , return_tensors= '' pt '' ) output = model.generate ( input_ids=inputs [ `` input_ids '' ] .to ( `` cuda '' ) , max_new_tokens=50 ) print ( tokenizer.batch_decode ( output , skip_special_tokens=true ) [ 0 ] ) '' preheat oven 350 degree place cookie dough center oven .", "large bowl , combine flour , baking powder , baking soda , salt , cinnamon .", "separate bowl , combine egg yolk , sugar , vanilla . ''", "`` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/huggingface/peft", "readme_url": "https://raw.githubusercontent.com/huggingface/peft/main/README.md", "topic": [ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ], "text": "Quantization\n\nQuantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference.\n\n* Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post.\n* Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset).\n\n", "sentence": [ [ "quantization", "quantization", "another", "method", "reducing", "memory", "requirement", "model", "representing", "data", "lower", "precision", ".", "combined", "peft", "method", "make", "even", "easier", "train", "load", "llm", "inference", ".", "*", "learn", "finetune", "[", "meta-llama/llama-2-7b-hf", "]", "(", "http", ":", "//huggingface.co/meta-llama/llama-2-7b-hf", ")", "qlora", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "16gb", "gpu", "[", "finetune", "llm", "consumer", "hardware", "using", "tool", "pytorch", "hugging", "face", "ecosystem", "]", "(", "http", ":", "//pytorch.org/blog/finetune-llms/", ")", "blog", "post", ".", "*", "learn", "finetune", "[", "openai/whisper-large-v2", "]", "(", "http", ":", "//huggingface.co/openai/whisper-large-v2", ")", "model", "multilingual", "automatic", "speech", "recognition", "lora", "8-bit", "quantization", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo", "?", "usp=sharing", ")", "(", "see", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs", "?", "usp=sharing", ")", "instead", "example", "streaming", "dataset", ")", "." ], [ "quantization quantization another method reducing memory requirement model representing data lower precision .", "combined peft method make even easier train load llm inference .", "* learn finetune [ meta-llama/llama-2-7b-hf ] ( http : //huggingface.co/meta-llama/llama-2-7b-hf ) qlora [ trl ] ( http : //huggingface.co/docs/trl/index ) library 16gb gpu [ finetune llm consumer hardware using tool pytorch hugging face ecosystem ] ( http : //pytorch.org/blog/finetune-llms/ ) blog post .", "* learn finetune [ openai/whisper-large-v2 ] ( http : //huggingface.co/openai/whisper-large-v2 ) model multilingual automatic speech recognition lora 8-bit quantization [ notebook ] ( http : //colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo ? usp=sharing ) ( see [ notebook ] ( http : //colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs ? usp=sharing ) instead example streaming dataset ) ." ] ], "token": [ [ "quantization", "quantization", "another", "method", "reducing", "memory", "requirement", "model", "representing", "data", "lower", "precision", ".", "combined", "peft", "method", "make", "even", "easier", "train", "load", "llm", "inference", ".", "*", "learn", "finetune", "[", "meta-llama/llama-2-7b-hf", "]", "(", "http", ":", "//huggingface.co/meta-llama/llama-2-7b-hf", ")", "qlora", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "16gb", "gpu", "[", "finetune", "llm", "consumer", "hardware", "using", "tool", "pytorch", "hugging", "face", "ecosystem", "]", "(", "http", ":", "//pytorch.org/blog/finetune-llms/", ")", "blog", "post", ".", "*", "learn", "finetune", "[", "openai/whisper-large-v2", "]", "(", "http", ":", "//huggingface.co/openai/whisper-large-v2", ")", "model", "multilingual", "automatic", "speech", "recognition", "lora", "8-bit", "quantization", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo", "?", "usp=sharing", ")", "(", "see", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs", "?", "usp=sharing", ")", "instead", "example", "streaming", "dataset", ")", "." ], [ "quantization quantization another method reducing memory requirement model representing data lower precision .", "combined peft method make even easier train load llm inference .", "* learn finetune [ meta-llama/llama-2-7b-hf ] ( http : //huggingface.co/meta-llama/llama-2-7b-hf ) qlora [ trl ] ( http : //huggingface.co/docs/trl/index ) library 16gb gpu [ finetune llm consumer hardware using tool pytorch hugging face ecosystem ] ( http : //pytorch.org/blog/finetune-llms/ ) blog post .", "* learn finetune [ openai/whisper-large-v2 ] ( http : //huggingface.co/openai/whisper-large-v2 ) model multilingual automatic speech recognition lora 8-bit quantization [ notebook ] ( http : //colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo ? usp=sharing ) ( see [ notebook ] ( http : //colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs ? usp=sharing ) instead example streaming dataset ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/huggingface/peft", "readme_url": "https://raw.githubusercontent.com/huggingface/peft/main/README.md", "topic": [ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ], "text": "Accelerate\n\n[Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources.\n\n", "sentence": [ [ "accelerate", "[", "accelerate", "]", "(", "http", ":", "//huggingface.co/docs/accelerate/index", ")", "library", "distributed", "training", "inference", "various", "training", "setup", "hardware", "(", "gpus", ",", "tpus", ",", "apple", "silicon", ",", "etc", ".", ")", ".", "peft", "model", "work", "accelerate", "box", ",", "making", "really", "convenient", "train", "really", "large", "model", "use", "inference", "consumer", "hardware", "limited", "resource", "." ], [ "accelerate [ accelerate ] ( http : //huggingface.co/docs/accelerate/index ) library distributed training inference various training setup hardware ( gpus , tpus , apple silicon , etc . ) .", "peft model work accelerate box , making really convenient train really large model use inference consumer hardware limited resource ." ] ], "token": [ [ "accelerate", "[", "accelerate", "]", "(", "http", ":", "//huggingface.co/docs/accelerate/index", ")", "library", "distributed", "training", "inference", "various", "training", "setup", "hardware", "(", "gpus", ",", "tpus", ",", "apple", "silicon", ",", "etc", ".", ")", ".", "peft", "model", "work", "accelerate", "box", ",", "making", "really", "convenient", "train", "really", "large", "model", "use", "inference", "consumer", "hardware", "limited", "resource", "." ], [ "accelerate [ accelerate ] ( http : //huggingface.co/docs/accelerate/index ) library distributed training inference various training setup hardware ( gpus , tpus , apple silicon , etc . ) .", "peft model work accelerate box , making really convenient train really large model use inference consumer hardware limited resource ." ] ], "level of complexity": -1 }, { "url": "https://github.com/huggingface/peft", "readme_url": "https://raw.githubusercontent.com/huggingface/peft/main/README.md", "topic": [ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ], "text": "TRL\n\nPEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading:\n\n* [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM.\n* [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews.\n* [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning.\n\n", "sentence": [ [ "trl", "peft", "also", "applied", "training", "llm", "rlhf", "component", "ranker", "policy", ".", "get", "started", "reading", ":", "*", "[", "fine-tune", "mistral-7b", "model", "direct", "preference", "optimization", "]", "(", "http", ":", "//towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "learn", "direct", "preference", "optimization", "(", "dpo", ")", "method", "apply", "llm", ".", "*", "[", "fine-tuning", "20b", "llm", "rlhf", "24gb", "consumer", "gpu", "]", "(", "http", ":", "//huggingface.co/blog/trl-peft", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", ",", "try", "[", "gpt2-sentiment_peft.ipynb", "]", "(", "http", ":", "//github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb", ")", "notebook", "optimize", "gpt2", "generate", "positive", "movie", "review", ".", "*", "[", "stackllama", ":", "hands-on", "guide", "train", "llama", "rlhf", "]", "(", "http", ":", "//huggingface.co/blog/stackllama", ")", "peft", ",", "try", "[", "stack_llama/scripts", "]", "(", "http", ":", "//github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts", ")", "supervised", "finetuning", ",", "reward", "modeling", ",", "rl", "finetuning", "." ], [ "trl peft also applied training llm rlhf component ranker policy .", "get started reading : * [ fine-tune mistral-7b model direct preference optimization ] ( http : //towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library learn direct preference optimization ( dpo ) method apply llm .", "* [ fine-tuning 20b llm rlhf 24gb consumer gpu ] ( http : //huggingface.co/blog/trl-peft ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library , try [ gpt2-sentiment_peft.ipynb ] ( http : //github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb ) notebook optimize gpt2 generate positive movie review .", "* [ stackllama : hands-on guide train llama rlhf ] ( http : //huggingface.co/blog/stackllama ) peft , try [ stack_llama/scripts ] ( http : //github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts ) supervised finetuning , reward modeling , rl finetuning ." ] ], "token": [ [ "trl", "peft", "also", "applied", "training", "llm", "rlhf", "component", "ranker", "policy", ".", "get", "started", "reading", ":", "*", "[", "fine-tune", "mistral-7b", "model", "direct", "preference", "optimization", "]", "(", "http", ":", "//towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "learn", "direct", "preference", "optimization", "(", "dpo", ")", "method", "apply", "llm", ".", "*", "[", "fine-tuning", "20b", "llm", "rlhf", "24gb", "consumer", "gpu", "]", "(", "http", ":", "//huggingface.co/blog/trl-peft", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", ",", "try", "[", "gpt2-sentiment_peft.ipynb", "]", "(", "http", ":", "//github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb", ")", "notebook", "optimize", "gpt2", "generate", "positive", "movie", "review", ".", "*", "[", "stackllama", ":", "hands-on", "guide", "train", "llama", "rlhf", "]", "(", "http", ":", "//huggingface.co/blog/stackllama", ")", "peft", ",", "try", "[", "stack_llama/scripts", "]", "(", "http", ":", "//github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts", ")", "supervised", "finetuning", ",", "reward", "modeling", ",", "rl", "finetuning", "." ], [ "trl peft also applied training llm rlhf component ranker policy .", "get started reading : * [ fine-tune mistral-7b model direct preference optimization ] ( http : //towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library learn direct preference optimization ( dpo ) method apply llm .", "* [ fine-tuning 20b llm rlhf 24gb consumer gpu ] ( http : //huggingface.co/blog/trl-peft ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library , try [ gpt2-sentiment_peft.ipynb ] ( http : //github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb ) notebook optimize gpt2 generate positive movie review .", "* [ stackllama : hands-on guide train llama rlhf ] ( http : //huggingface.co/blog/stackllama ) peft , try [ stack_llama/scripts ] ( http : //github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts ) supervised finetuning , reward modeling , rl finetuning ." ] ], "level of complexity": -1 }, { "url": "https://github.com/hiyouga/LLaMA-Factory", "readme_url": "https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md", "topic": [ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ], "text": "LLaMA Board: A One-stop Web UI for Getting Started with LLaMA Factory\n\nPreview LLaMA Board at **[\ud83e\udd17 Spaces](https://huggingface.co/spaces/hiyouga/LLaMA-Board)** or **[ModelScope](https://modelscope.cn/studios/hiyouga/LLaMA-Board)**.\n\nLaunch LLaMA Board via `CUDA_VISIBLE_DEVICES=0 python src/train_web.py`. (multiple GPUs are not supported yet in this mode)\n\nHere is an example of altering the self-cognition of an instruction-tuned language model within 10 minutes on a single GPU.\n\nhttps://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1\n\n", "sentence": [ [ "llama", "board", ":", "one-stop", "web", "ui", "getting", "started", "llama", "factory", "preview", "llama", "board", "*", "*", "[", "\ud83e\udd17", "space", "]", "(", "http", ":", "//huggingface.co/spaces/hiyouga/llama-board", ")", "*", "*", "*", "*", "[", "modelscope", "]", "(", "http", ":", "//modelscope.cn/studios/hiyouga/llama-board", ")", "*", "*", ".", "launch", "llama", "board", "via", "`", "cuda_visible_devices=0", "python", "src/train_web.py", "`", ".", "(", "multiple", "gpus", "supported", "yet", "mode", ")", "example", "altering", "self-cognition", "instruction-tuned", "language", "model", "within", "10", "minute", "single", "gpu", ".", "http", ":", "//github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ], [ "llama board : one-stop web ui getting started llama factory preview llama board * * [ \ud83e\udd17 space ] ( http : //huggingface.co/spaces/hiyouga/llama-board ) * * * * [ modelscope ] ( http : //modelscope.cn/studios/hiyouga/llama-board ) * * .", "launch llama board via ` cuda_visible_devices=0 python src/train_web.py ` .", "( multiple gpus supported yet mode ) example altering self-cognition instruction-tuned language model within 10 minute single gpu .", "http : //github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ] ], "token": [ [ "llama", "board", ":", "one-stop", "web", "ui", "getting", "started", "llama", "factory", "preview", "llama", "board", "*", "*", "[", "\ud83e\udd17", "space", "]", "(", "http", ":", "//huggingface.co/spaces/hiyouga/llama-board", ")", "*", "*", "*", "*", "[", "modelscope", "]", "(", "http", ":", "//modelscope.cn/studios/hiyouga/llama-board", ")", "*", "*", ".", "launch", "llama", "board", "via", "`", "cuda_visible_devices=0", "python", "src/train_web.py", "`", ".", "(", "multiple", "gpus", "supported", "yet", "mode", ")", "example", "altering", "self-cognition", "instruction-tuned", "language", "model", "within", "10", "minute", "single", "gpu", ".", "http", ":", "//github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ], [ "llama board : one-stop web ui getting started llama factory preview llama board * * [ \ud83e\udd17 space ] ( http : //huggingface.co/spaces/hiyouga/llama-board ) * * * * [ modelscope ] ( http : //modelscope.cn/studios/hiyouga/llama-board ) * * .", "launch llama board via ` cuda_visible_devices=0 python src/train_web.py ` .", "( multiple gpus supported yet mode ) example altering self-cognition instruction-tuned language model within 10 minute single gpu .", "http : //github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ] ], "level of complexity": -1 }, { "url": "https://github.com/hiyouga/LLaMA-Factory", "readme_url": "https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md", "topic": [ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ], "text": "Table of Contents\n\n- [Benchmark](#benchmark)\n- [Changelog](#changelog)\n- [Supported Models](#supported-models)\n- [Supported Training Approaches](#supported-training-approaches)\n- [Provided Datasets](#provided-datasets)\n- [Requirement](#requirement)\n- [Getting Started](#getting-started)\n- [Projects using LLaMA Factory](#projects-using-llama-factory)\n- [License](#license)\n- [Citation](#citation)\n- [Acknowledgement](#acknowledgement)\n\n", "sentence": [ [ "table", "content", "-", "[", "benchmark", "]", "(", "#", "benchmark", ")", "-", "[", "changelog", "]", "(", "#", "changelog", ")", "-", "[", "supported", "model", "]", "(", "#", "supported-models", ")", "-", "[", "supported", "training", "approach", "]", "(", "#", "supported-training-approaches", ")", "-", "[", "provided", "datasets", "]", "(", "#", "provided-datasets", ")", "-", "[", "requirement", "]", "(", "#", "requirement", ")", "-", "[", "getting", "started", "]", "(", "#", "getting-started", ")", "-", "[", "project", "using", "llama", "factory", "]", "(", "#", "projects-using-llama-factory", ")", "-", "[", "license", "]", "(", "#", "license", ")", "-", "[", "citation", "]", "(", "#", "citation", ")", "-", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")" ], [ "table content - [ benchmark ] ( # benchmark ) - [ changelog ] ( # changelog ) - [ supported model ] ( # supported-models ) - [ supported training approach ] ( # supported-training-approaches ) - [ provided datasets ] ( # provided-datasets ) - [ requirement ] ( # requirement ) - [ getting started ] ( # getting-started ) - [ project using llama factory ] ( # projects-using-llama-factory ) - [ license ] ( # license ) - [ citation ] ( # citation ) - [ acknowledgement ] ( # acknowledgement )" ] ], "token": [ [ "table", "content", "-", "[", "benchmark", "]", "(", "#", "benchmark", ")", "-", "[", "changelog", "]", "(", "#", "changelog", ")", "-", "[", "supported", "model", "]", "(", "#", "supported-models", ")", "-", "[", "supported", "training", "approach", "]", "(", "#", "supported-training-approaches", ")", "-", "[", "provided", "datasets", "]", "(", "#", "provided-datasets", ")", "-", "[", "requirement", "]", "(", "#", "requirement", ")", "-", "[", "getting", "started", "]", "(", "#", "getting-started", ")", "-", "[", "project", "using", "llama", "factory", "]", "(", "#", "projects-using-llama-factory", ")", "-", "[", "license", "]", "(", "#", "license", ")", "-", "[", "citation", "]", "(", "#", "citation", ")", "-", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")" ], [ "table content - [ benchmark ] ( # benchmark ) - [ changelog ] ( # changelog ) - [ supported model ] ( # supported-models ) - [ supported training approach ] ( # supported-training-approaches ) - [ provided datasets ] ( # provided-datasets ) - [ requirement ] ( # requirement ) - [ getting started ] ( # getting-started ) - [ project using llama factory ] ( # projects-using-llama-factory ) - [ license ] ( # license ) - [ citation ] ( # citation ) - [ acknowledgement ] ( # acknowledgement )" ] ], "level of complexity": -1 }, { "url": "https://github.com/hiyouga/LLaMA-Factory", "readme_url": "https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md", "topic": [ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ], "text": "Provided Datasets\n\n
Pre-training datasets\n\n- [Wiki Demo (en)](data/wiki_demo.txt)\n- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)\n- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)\n- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)\n- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)\n- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)\n- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)\n- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)\n- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)\n\n
\n\n
Supervised fine-tuning datasets\n\n- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)\n- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)\n- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)\n- [Self Cognition (zh)](data/self_cognition.json)\n- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)\n- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)\n- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)\n- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)\n- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)\n- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)\n- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)\n- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)\n- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)\n- [UltraChat (en)](https://github.com/thunlp/UltraChat)\n- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)\n- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)\n- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)\n- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)\n- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)\n- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)\n- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)\n- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)\n- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)\n- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)\n- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)\n- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)\n- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)\n- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)\n- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)\n- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)\n- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)\n- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)\n- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)\n- [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)\n- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)\n- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)\n- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)\n- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)\n- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)\n- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)\n- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)\n- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)\n- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)\n\n
\n\n
Preference datasets\n\n- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)\n- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)\n- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)\n- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)\n- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)\n\n
\n\nPlease refer to [data/README.md](data/README.md) for details.\n\nSome datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands.\n\n```bash\npip install --upgrade huggingface_hub\nhuggingface-cli login\n```\n\n", "sentence": [ [ "provided", "datasets", "<", "detail", ">", "<", "summary", ">", "pre-training", "datasets", "<", "/summary", ">", "-", "[", "wiki", "demo", "(", "en", ")", "]", "(", "data/wiki_demo.txt", ")", "-", "[", "refinedweb", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiiuae/falcon-refinedweb", ")", "-", "[", "redpajama", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/togethercomputer/redpajama-data-v2", ")", "-", "[", "wikipedia", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/olm/olm-wikipedia-20221220", ")", "-", "[", "wikipedia", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered", ")", "-", "[", "pile", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/eleutherai/pile", ")", "-", "[", "skypile", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/skywork/skypile-150b", ")", "-", "[", "stack", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/the-stack", ")", "-", "[", "starcoder", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/starcoderdata", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "supervised", "fine-tuning", "datasets", "<", "/summary", ">", "-", "[", "stanford", "alpaca", "(", "en", ")", "]", "(", "http", ":", "//github.com/tatsu-lab/stanford_alpaca", ")", "-", "[", "stanford", "alpaca", "(", "zh", ")", "]", "(", "http", ":", "//github.com/ymcui/chinese-llama-alpaca", ")", "-", "[", "alpaca", "gpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "self", "cognition", "(", "zh", ")", "]", "(", "data/self_cognition.json", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "sharegpt", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection", ")", "-", "[", "guanaco", "dataset", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/josephuscheung/guanacodataset", ")", "-", "[", "belle", "2m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_2m_cn", ")", "-", "[", "belle", "1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_1m_cn", ")", "-", "[", "belle", "0.5m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_0.5m_cn", ")", "-", "[", "belle", "dialogue", "0.4m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/generated_chat_0.4m", ")", "-", "[", "belle", "school", "math", "0.25m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/school_math_0.25m", ")", "-", "[", "belle", "multiturn", "chat", "0.8m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/multiturn_chat_0.8m", ")", "-", "[", "ultrachat", "(", "en", ")", "]", "(", "http", ":", "//github.com/thunlp/ultrachat", ")", "-", "[", "lima", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/gair/lima", ")", "-", "[", "openplatypus", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/garage-baind/open-platypus", ")", "-", "[", "codealpaca", "20k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/sahil2801/codealpaca-20k", ")", "-", "[", "alpaca", "cot", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot", ")", "-", "[", "openorca", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/open-orca/openorca", ")", "-", "[", "mathinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiger-lab/mathinstruct", ")", "-", "[", "firefly", "1.1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/yeungnlp/firefly-train-1.1m", ")", "-", "[", "wiki", "qa", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wiki_qa", ")", "-", "[", "web", "qa", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/suolyer/webqa", ")", "-", "[", "webnovel", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/zxbsmk/webnovel_cn", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "deepctrl", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data", ")", "-", "[", "ad", "gen", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/hasturofficial/adgen", ")", "-", "[", "sharegpt", "hyperfiltered", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k", ")", "-", "[", "sharegpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/shibing624/sharegpt_gpt4", ")", "-", "[", "ultrachat", "200k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/huggingfaceh4/ultrachat_200k", ")", "-", "[", "agentinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/thudm/agentinstruct", ")", "-", "[", "lmsys", "chat", "1m", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/lmsys/lmsys-chat-1m", ")", "-", "[", "evol", "instruct", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k", ")", "-", "[", "glaive", "function", "calling", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/glaiveai/glaive-function-calling-v2", ")", "-", "[", "open", "assistant", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/oasst_de", ")", "-", "[", "dolly", "15k", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolly-15k_de", ")", "-", "[", "alpaca", "gpt4", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de", ")", "-", "[", "openschnabeltier", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/openschnabeltier_de", ")", "-", "[", "evol", "instruct", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/evol-instruct_de", ")", "-", "[", "dolphin", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolphin_de", ")", "-", "[", "booksum", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/booksum_de", ")", "-", "[", "airoboros", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de", ")", "-", "[", "ultrachat", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/ultra-chat_de", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "preference", "datasets", "<", "/summary", ">", "-", "[", "hh-rlhf", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/anthropic/hh-rlhf", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "gpt-4", "generated", "data", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "orca", "dpo", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de", ")", "<", "/details", ">", "please", "refer", "[", "data/readme.md", "]", "(", "data/readme.md", ")", "detail", ".", "datasets", "require", "confirmation", "using", ",", "recommend", "logging", "hugging", "face", "account", "using", "command", ".", "``", "`", "bash", "pip", "install", "--", "upgrade", "huggingface_hub", "huggingface-cli", "login", "``", "`" ], [ "provided datasets < detail > < summary > pre-training datasets < /summary > - [ wiki demo ( en ) ] ( data/wiki_demo.txt ) - [ refinedweb ( en ) ] ( http : //huggingface.co/datasets/tiiuae/falcon-refinedweb ) - [ redpajama v2 ( en ) ] ( http : //huggingface.co/datasets/togethercomputer/redpajama-data-v2 ) - [ wikipedia ( en ) ] ( http : //huggingface.co/datasets/olm/olm-wikipedia-20221220 ) - [ wikipedia ( zh ) ] ( http : //huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered ) - [ pile ( en ) ] ( http : //huggingface.co/datasets/eleutherai/pile ) - [ skypile ( zh ) ] ( http : //huggingface.co/datasets/skywork/skypile-150b ) - [ stack ( en ) ] ( http : //huggingface.co/datasets/bigcode/the-stack ) - [ starcoder ( en ) ] ( http : //huggingface.co/datasets/bigcode/starcoderdata ) < /details > < detail > < summary > supervised fine-tuning datasets < /summary > - [ stanford alpaca ( en ) ] ( http : //github.com/tatsu-lab/stanford_alpaca ) - [ stanford alpaca ( zh ) ] ( http : //github.com/ymcui/chinese-llama-alpaca ) - [ alpaca gpt4 ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ self cognition ( zh ) ] ( data/self_cognition.json ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ sharegpt ( zh ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection ) - [ guanaco dataset ( multilingual ) ] ( http : //huggingface.co/datasets/josephuscheung/guanacodataset ) - [ belle 2m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_2m_cn ) - [ belle 1m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_1m_cn ) - [ belle 0.5m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_0.5m_cn ) - [ belle dialogue 0.4m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/generated_chat_0.4m ) - [ belle school math 0.25m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/school_math_0.25m ) - [ belle multiturn chat 0.8m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/multiturn_chat_0.8m ) - [ ultrachat ( en ) ] ( http : //github.com/thunlp/ultrachat ) - [ lima ( en ) ] ( http : //huggingface.co/datasets/gair/lima ) - [ openplatypus ( en ) ] ( http : //huggingface.co/datasets/garage-baind/open-platypus ) - [ codealpaca 20k ( en ) ] ( http : //huggingface.co/datasets/sahil2801/codealpaca-20k ) - [ alpaca cot ( multilingual ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot ) - [ openorca ( en ) ] ( http : //huggingface.co/datasets/open-orca/openorca ) - [ mathinstruct ( en ) ] ( http : //huggingface.co/datasets/tiger-lab/mathinstruct ) - [ firefly 1.1m ( zh ) ] ( http : //huggingface.co/datasets/yeungnlp/firefly-train-1.1m ) - [ wiki qa ( en ) ] ( http : //huggingface.co/datasets/wiki_qa ) - [ web qa ( zh ) ] ( http : //huggingface.co/datasets/suolyer/webqa ) - [ webnovel ( zh ) ] ( http : //huggingface.co/datasets/zxbsmk/webnovel_cn ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ deepctrl ( en & zh ) ] ( http : //www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data ) - [ ad gen ( zh ) ] ( http : //huggingface.co/datasets/hasturofficial/adgen ) - [ sharegpt hyperfiltered ( en ) ] ( http : //huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k ) - [ sharegpt4 ( en & zh ) ] ( http : //huggingface.co/datasets/shibing624/sharegpt_gpt4 ) - [ ultrachat 200k ( en ) ] ( http : //huggingface.co/datasets/huggingfaceh4/ultrachat_200k ) - [ agentinstruct ( en ) ] ( http : //huggingface.co/datasets/thudm/agentinstruct ) - [ lmsys chat 1m ( en ) ] ( http : //huggingface.co/datasets/lmsys/lmsys-chat-1m ) - [ evol instruct v2 ( en ) ] ( http : //huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k ) - [ glaive function calling v2 ( en ) ] ( http : //huggingface.co/datasets/glaiveai/glaive-function-calling-v2 ) - [ open assistant ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/oasst_de ) - [ dolly 15k ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolly-15k_de ) - [ alpaca gpt4 ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de ) - [ openschnabeltier ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/openschnabeltier_de ) - [ evol instruct ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/evol-instruct_de ) - [ dolphin ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolphin_de ) - [ booksum ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/booksum_de ) - [ airoboros ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de ) - [ ultrachat ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/ultra-chat_de ) < /details > < detail > < summary > preference datasets < /summary > - [ hh-rlhf ( en ) ] ( http : //huggingface.co/datasets/anthropic/hh-rlhf ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ gpt-4 generated data ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ orca dpo ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de ) < /details > please refer [ data/readme.md ] ( data/readme.md ) detail .", "datasets require confirmation using , recommend logging hugging face account using command .", "`` ` bash pip install -- upgrade huggingface_hub huggingface-cli login `` `" ] ], "token": [ [ "provided", "datasets", "<", "detail", ">", "<", "summary", ">", "pre-training", "datasets", "<", "/summary", ">", "-", "[", "wiki", "demo", "(", "en", ")", "]", "(", "data/wiki_demo.txt", ")", "-", "[", "refinedweb", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiiuae/falcon-refinedweb", ")", "-", "[", "redpajama", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/togethercomputer/redpajama-data-v2", ")", "-", "[", "wikipedia", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/olm/olm-wikipedia-20221220", ")", "-", "[", "wikipedia", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered", ")", "-", "[", "pile", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/eleutherai/pile", ")", "-", "[", "skypile", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/skywork/skypile-150b", ")", "-", "[", "stack", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/the-stack", ")", "-", "[", "starcoder", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/starcoderdata", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "supervised", "fine-tuning", "datasets", "<", "/summary", ">", "-", "[", "stanford", "alpaca", "(", "en", ")", "]", "(", "http", ":", "//github.com/tatsu-lab/stanford_alpaca", ")", "-", "[", "stanford", "alpaca", "(", "zh", ")", "]", "(", "http", ":", "//github.com/ymcui/chinese-llama-alpaca", ")", "-", "[", "alpaca", "gpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "self", "cognition", "(", "zh", ")", "]", "(", "data/self_cognition.json", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "sharegpt", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection", ")", "-", "[", "guanaco", "dataset", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/josephuscheung/guanacodataset", ")", "-", "[", "belle", "2m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_2m_cn", ")", "-", "[", "belle", "1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_1m_cn", ")", "-", "[", "belle", "0.5m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_0.5m_cn", ")", "-", "[", "belle", "dialogue", "0.4m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/generated_chat_0.4m", ")", "-", "[", "belle", "school", "math", "0.25m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/school_math_0.25m", ")", "-", "[", "belle", "multiturn", "chat", "0.8m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/multiturn_chat_0.8m", ")", "-", "[", "ultrachat", "(", "en", ")", "]", "(", "http", ":", "//github.com/thunlp/ultrachat", ")", "-", "[", "lima", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/gair/lima", ")", "-", "[", "openplatypus", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/garage-baind/open-platypus", ")", "-", "[", "codealpaca", "20k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/sahil2801/codealpaca-20k", ")", "-", "[", "alpaca", "cot", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot", ")", "-", "[", "openorca", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/open-orca/openorca", ")", "-", "[", "mathinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiger-lab/mathinstruct", ")", "-", "[", "firefly", "1.1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/yeungnlp/firefly-train-1.1m", ")", "-", "[", "wiki", "qa", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wiki_qa", ")", "-", "[", "web", "qa", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/suolyer/webqa", ")", "-", "[", "webnovel", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/zxbsmk/webnovel_cn", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "deepctrl", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data", ")", "-", "[", "ad", "gen", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/hasturofficial/adgen", ")", "-", "[", "sharegpt", "hyperfiltered", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k", ")", "-", "[", "sharegpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/shibing624/sharegpt_gpt4", ")", "-", "[", "ultrachat", "200k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/huggingfaceh4/ultrachat_200k", ")", "-", "[", "agentinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/thudm/agentinstruct", ")", "-", "[", "lmsys", "chat", "1m", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/lmsys/lmsys-chat-1m", ")", "-", "[", "evol", "instruct", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k", ")", "-", "[", "glaive", "function", "calling", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/glaiveai/glaive-function-calling-v2", ")", "-", "[", "open", "assistant", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/oasst_de", ")", "-", "[", "dolly", "15k", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolly-15k_de", ")", "-", "[", "alpaca", "gpt4", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de", ")", "-", "[", "openschnabeltier", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/openschnabeltier_de", ")", "-", "[", "evol", "instruct", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/evol-instruct_de", ")", "-", "[", "dolphin", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolphin_de", ")", "-", "[", "booksum", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/booksum_de", ")", "-", "[", "airoboros", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de", ")", "-", "[", "ultrachat", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/ultra-chat_de", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "preference", "datasets", "<", "/summary", ">", "-", "[", "hh-rlhf", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/anthropic/hh-rlhf", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "gpt-4", "generated", "data", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "orca", "dpo", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de", ")", "<", "/details", ">", "please", "refer", "[", "data/readme.md", "]", "(", "data/readme.md", ")", "detail", ".", "datasets", "require", "confirmation", "using", ",", "recommend", "logging", "hugging", "face", "account", "using", "command", ".", "``", "`", "bash", "pip", "install", "--", "upgrade", "huggingface_hub", "huggingface-cli", "login", "``", "`" ], [ "provided datasets < detail > < summary > pre-training datasets < /summary > - [ wiki demo ( en ) ] ( data/wiki_demo.txt ) - [ refinedweb ( en ) ] ( http : //huggingface.co/datasets/tiiuae/falcon-refinedweb ) - [ redpajama v2 ( en ) ] ( http : //huggingface.co/datasets/togethercomputer/redpajama-data-v2 ) - [ wikipedia ( en ) ] ( http : //huggingface.co/datasets/olm/olm-wikipedia-20221220 ) - [ wikipedia ( zh ) ] ( http : //huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered ) - [ pile ( en ) ] ( http : //huggingface.co/datasets/eleutherai/pile ) - [ skypile ( zh ) ] ( http : //huggingface.co/datasets/skywork/skypile-150b ) - [ stack ( en ) ] ( http : //huggingface.co/datasets/bigcode/the-stack ) - [ starcoder ( en ) ] ( http : //huggingface.co/datasets/bigcode/starcoderdata ) < /details > < detail > < summary > supervised fine-tuning datasets < /summary > - [ stanford alpaca ( en ) ] ( http : //github.com/tatsu-lab/stanford_alpaca ) - [ stanford alpaca ( zh ) ] ( http : //github.com/ymcui/chinese-llama-alpaca ) - [ alpaca gpt4 ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ self cognition ( zh ) ] ( data/self_cognition.json ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ sharegpt ( zh ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection ) - [ guanaco dataset ( multilingual ) ] ( http : //huggingface.co/datasets/josephuscheung/guanacodataset ) - [ belle 2m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_2m_cn ) - [ belle 1m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_1m_cn ) - [ belle 0.5m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_0.5m_cn ) - [ belle dialogue 0.4m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/generated_chat_0.4m ) - [ belle school math 0.25m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/school_math_0.25m ) - [ belle multiturn chat 0.8m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/multiturn_chat_0.8m ) - [ ultrachat ( en ) ] ( http : //github.com/thunlp/ultrachat ) - [ lima ( en ) ] ( http : //huggingface.co/datasets/gair/lima ) - [ openplatypus ( en ) ] ( http : //huggingface.co/datasets/garage-baind/open-platypus ) - [ codealpaca 20k ( en ) ] ( http : //huggingface.co/datasets/sahil2801/codealpaca-20k ) - [ alpaca cot ( multilingual ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot ) - [ openorca ( en ) ] ( http : //huggingface.co/datasets/open-orca/openorca ) - [ mathinstruct ( en ) ] ( http : //huggingface.co/datasets/tiger-lab/mathinstruct ) - [ firefly 1.1m ( zh ) ] ( http : //huggingface.co/datasets/yeungnlp/firefly-train-1.1m ) - [ wiki qa ( en ) ] ( http : //huggingface.co/datasets/wiki_qa ) - [ web qa ( zh ) ] ( http : //huggingface.co/datasets/suolyer/webqa ) - [ webnovel ( zh ) ] ( http : //huggingface.co/datasets/zxbsmk/webnovel_cn ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ deepctrl ( en & zh ) ] ( http : //www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data ) - [ ad gen ( zh ) ] ( http : //huggingface.co/datasets/hasturofficial/adgen ) - [ sharegpt hyperfiltered ( en ) ] ( http : //huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k ) - [ sharegpt4 ( en & zh ) ] ( http : //huggingface.co/datasets/shibing624/sharegpt_gpt4 ) - [ ultrachat 200k ( en ) ] ( http : //huggingface.co/datasets/huggingfaceh4/ultrachat_200k ) - [ agentinstruct ( en ) ] ( http : //huggingface.co/datasets/thudm/agentinstruct ) - [ lmsys chat 1m ( en ) ] ( http : //huggingface.co/datasets/lmsys/lmsys-chat-1m ) - [ evol instruct v2 ( en ) ] ( http : //huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k ) - [ glaive function calling v2 ( en ) ] ( http : //huggingface.co/datasets/glaiveai/glaive-function-calling-v2 ) - [ open assistant ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/oasst_de ) - [ dolly 15k ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolly-15k_de ) - [ alpaca gpt4 ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de ) - [ openschnabeltier ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/openschnabeltier_de ) - [ evol instruct ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/evol-instruct_de ) - [ dolphin ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolphin_de ) - [ booksum ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/booksum_de ) - [ airoboros ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de ) - [ ultrachat ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/ultra-chat_de ) < /details > < detail > < summary > preference datasets < /summary > - [ hh-rlhf ( en ) ] ( http : //huggingface.co/datasets/anthropic/hh-rlhf ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ gpt-4 generated data ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ orca dpo ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de ) < /details > please refer [ data/readme.md ] ( data/readme.md ) detail .", "datasets require confirmation using , recommend logging hugging face account using command .", "`` ` bash pip install -- upgrade huggingface_hub huggingface-cli login `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/hiyouga/LLaMA-Factory", "readme_url": "https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md", "topic": [ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ], "text": "Getting Started\n\n", "sentence": [ [ "getting", "started" ], [ "getting started" ] ], "token": [ [ "getting", "started" ], [ "getting started" ] ], "level of complexity": -1 }, { "url": "https://github.com/hiyouga/LLaMA-Factory", "readme_url": "https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md", "topic": [ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ], "text": "Dependence Installation (optional)\n\n```bash\ngit clone https://github.com/hiyouga/LLaMA-Factory.git\nconda create -n llama_factory python=3.10\nconda activate llama_factory\ncd LLaMA-Factory\npip install -r requirements.txt\n```\n\nIf you want to enable the quantized LoRA (QLoRA) on the Windows platform, you will be required to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.1.\n\n```bash\npip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl\n```\n\n", "sentence": [ [ "dependence", "installation", "(", "optional", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/hiyouga/llama-factory.git", "conda", "create", "-n", "llama_factory", "python=3.10", "conda", "activate", "llama_factory", "cd", "llama-factory", "pip", "install", "-r", "requirements.txt", "``", "`", "want", "enable", "quantized", "lora", "(", "qlora", ")", "window", "platform", ",", "required", "install", "pre-built", "version", "`", "bitsandbytes", "`", "library", ",", "support", "cuda", "11.1", "12.1", ".", "``", "`", "bash", "pip", "install", "http", ":", "//github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl", "``", "`" ], [ "dependence installation ( optional ) `` ` bash git clone http : //github.com/hiyouga/llama-factory.git conda create -n llama_factory python=3.10 conda activate llama_factory cd llama-factory pip install -r requirements.txt `` ` want enable quantized lora ( qlora ) window platform , required install pre-built version ` bitsandbytes ` library , support cuda 11.1 12.1 .", "`` ` bash pip install http : //github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl `` `" ] ], "token": [ [ "dependence", "installation", "(", "optional", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/hiyouga/llama-factory.git", "conda", "create", "-n", "llama_factory", "python=3.10", "conda", "activate", "llama_factory", "cd", "llama-factory", "pip", "install", "-r", "requirements.txt", "``", "`", "want", "enable", "quantized", "lora", "(", "qlora", ")", "window", "platform", ",", "required", "install", "pre-built", "version", "`", "bitsandbytes", "`", "library", ",", "support", "cuda", "11.1", "12.1", ".", "``", "`", "bash", "pip", "install", "http", ":", "//github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl", "``", "`" ], [ "dependence installation ( optional ) `` ` bash git clone http : //github.com/hiyouga/llama-factory.git conda create -n llama_factory python=3.10 conda activate llama_factory cd llama-factory pip install -r requirements.txt `` ` want enable quantized lora ( qlora ) window platform , required install pre-built version ` bitsandbytes ` library , support cuda 11.1 12.1 .", "`` ` bash pip install http : //github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/PaddlePaddle/PaddleNLP", "readme_url": "https://raw.githubusercontent.com/PaddlePaddle/PaddleNLP/main/README.md", "topic": [ "bert", "compression", "distributed-training", "document-intelligence", "embedding", "ernie", "information-extraction", "llama", "llm", "neural-search", "nlp", "paddlenlp", "pretrained-models", "question-answering", "search-engine", "semantic-analysis", "sentiment-analysis", "transformers", "uie" ], "text": "pip\u5b89\u88c5\n\n```shell\npip install --upgrade paddlenlp\n```\n\n\u6216\u8005\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\u6700\u65b0 develop \u5206\u652f\u4ee3\u7801\uff1a\n\n```shell\npip install --pre --upgrade paddlenlp -f https://www.paddlepaddle.org.cn/whl/paddlenlp.html\n```\n\n\u66f4\u591a\u5173\u4e8ePaddlePaddle\u548cPaddleNLP\u5b89\u88c5\u7684\u8be6\u7ec6\u6559\u7a0b\u8bf7\u67e5\u770b[Installation](./docs/get_started/installation.rst)\u3002\n\n", "sentence": [ [ "pip\u5b89\u88c5", "``", "`", "shell", "pip", "install", "--", "upgrade", "paddlenlp", "``", "`", "\u6216\u8005\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\u6700\u65b0", "develop", "\u5206\u652f\u4ee3\u7801\uff1a", "``", "`", "shell", "pip", "install", "--", "pre", "--", "upgrade", "paddlenlp", "-f", "http", ":", "//www.paddlepaddle.org.cn/whl/paddlenlp.html", "``", "`", "\u66f4\u591a\u5173\u4e8epaddlepaddle\u548cpaddlenlp\u5b89\u88c5\u7684\u8be6\u7ec6\u6559\u7a0b\u8bf7\u67e5\u770b", "[", "installation", "]", "(", "./docs/get_started/installation.rst", ")", "\u3002" ], [ "pip\u5b89\u88c5 `` ` shell pip install -- upgrade paddlenlp `` ` \u6216\u8005\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\u6700\u65b0 develop \u5206\u652f\u4ee3\u7801\uff1a `` ` shell pip install -- pre -- upgrade paddlenlp -f http : //www.paddlepaddle.org.cn/whl/paddlenlp.html `` ` \u66f4\u591a\u5173\u4e8epaddlepaddle\u548cpaddlenlp\u5b89\u88c5\u7684\u8be6\u7ec6\u6559\u7a0b\u8bf7\u67e5\u770b [ installation ] ( ./docs/get_started/installation.rst ) \u3002" ] ], "token": [ [ "pip\u5b89\u88c5", "``", "`", "shell", "pip", "install", "--", "upgrade", "paddlenlp", "``", "`", "\u6216\u8005\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\u6700\u65b0", "develop", "\u5206\u652f\u4ee3\u7801\uff1a", "``", "`", "shell", "pip", "install", "--", "pre", "--", "upgrade", "paddlenlp", "-f", "http", ":", "//www.paddlepaddle.org.cn/whl/paddlenlp.html", "``", "`", "\u66f4\u591a\u5173\u4e8epaddlepaddle\u548cpaddlenlp\u5b89\u88c5\u7684\u8be6\u7ec6\u6559\u7a0b\u8bf7\u67e5\u770b", "[", "installation", "]", "(", "./docs/get_started/installation.rst", ")", "\u3002" ], [ "pip\u5b89\u88c5 `` ` shell pip install -- upgrade paddlenlp `` ` \u6216\u8005\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\u6700\u65b0 develop \u5206\u652f\u4ee3\u7801\uff1a `` ` shell pip install -- pre -- upgrade paddlenlp -f http : //www.paddlepaddle.org.cn/whl/paddlenlp.html `` ` \u66f4\u591a\u5173\u4e8epaddlepaddle\u548cpaddlenlp\u5b89\u88c5\u7684\u8be6\u7ec6\u6559\u7a0b\u8bf7\u67e5\u770b [ installation ] ( ./docs/get_started/installation.rst ) \u3002" ] ], "level of complexity": 0 }, { "url": "https://github.com/eosphoros-ai/DB-GPT", "readme_url": "https://raw.githubusercontent.com/eosphoros-ai/DB-GPT/main/README.md", "topic": [ "agents", "bgi", "database", "gpt", "gpt-4", "langchain", "llm", "private", "rag", "security", "vicuna" ], "text": "Contents\n- [Introduction](#introduction)\n- [Install](#install)\n- [Features](#features)\n- [Contribution](#contribution)\n- [Contact](#contact-information)\n\n", "sentence": [ [ "content", "-", "[", "introduction", "]", "(", "#", "introduction", ")", "-", "[", "install", "]", "(", "#", "install", ")", "-", "[", "feature", "]", "(", "#", "feature", ")", "-", "[", "contribution", "]", "(", "#", "contribution", ")", "-", "[", "contact", "]", "(", "#", "contact-information", ")" ], [ "content - [ introduction ] ( # introduction ) - [ install ] ( # install ) - [ feature ] ( # feature ) - [ contribution ] ( # contribution ) - [ contact ] ( # contact-information )" ] ], "token": [ [ "content", "-", "[", "introduction", "]", "(", "#", "introduction", ")", "-", "[", "install", "]", "(", "#", "install", ")", "-", "[", "feature", "]", "(", "#", "feature", ")", "-", "[", "contribution", "]", "(", "#", "contribution", ")", "-", "[", "contact", "]", "(", "#", "contact-information", ")" ], [ "content - [ introduction ] ( # introduction ) - [ install ] ( # install ) - [ feature ] ( # feature ) - [ contribution ] ( # contribution ) - [ contact ] ( # contact-information )" ] ], "level of complexity": -1 }, { "url": "https://github.com/eosphoros-ai/DB-GPT", "readme_url": "https://raw.githubusercontent.com/eosphoros-ai/DB-GPT/main/README.md", "topic": [ "agents", "bgi", "database", "gpt", "gpt-4", "langchain", "llm", "private", "rag", "security", "vicuna" ], "text": "Install \n![Docker](https://img.shields.io/badge/docker-%230db7ed.svg?style=for-the-badge&logo=docker&logoColor=white)\n![Linux](https://img.shields.io/badge/Linux-FCC624?style=for-the-badge&logo=linux&logoColor=black)\n![macOS](https://img.shields.io/badge/mac%20os-000000?style=for-the-badge&logo=macos&logoColor=F0F0F0)\n![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=windows&logoColor=white)\n\n[**Usage Tutorial**](http://docs.dbgpt.site/docs/overview)\n- [**Install**](http://docs.dbgpt.site/docs/installation)\n- [**Quickstart**](http://docs.dbgpt.site/docs/quickstart)\n- [**Application**](http://docs.dbgpt.site/docs/operation_manual)\n- [**Debugging**](http://docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging)\n\n\n", "sentence": [ [ "install", "!", "[", "docker", "]", "(", "http", ":", "//img.shields.io/badge/docker-", "%", "230db7ed.svg", "?", "style=for-the-badge", "&", "logo=docker", "&", "logocolor=white", ")", "!", "[", "linux", "]", "(", "http", ":", "//img.shields.io/badge/linux-fcc624", "?", "style=for-the-badge", "&", "logo=linux", "&", "logocolor=black", ")", "!", "[", "macos", "]", "(", "http", ":", "//img.shields.io/badge/mac", "%", "20os-000000", "?", "style=for-the-badge", "&", "logo=macos", "&", "logocolor=f0f0f0", ")", "!", "[", "window", "]", "(", "http", ":", "//img.shields.io/badge/windows-0078d6", "?", "style=for-the-badge", "&", "logo=windows", "&", "logocolor=white", ")", "[", "*", "*", "usage", "tutorial", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/overview", ")", "-", "[", "*", "*", "install", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/installation", ")", "-", "[", "*", "*", "quickstart", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/quickstart", ")", "-", "[", "*", "*", "application", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual", ")", "-", "[", "*", "*", "debugging", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging", ")" ], [ "install !", "[ docker ] ( http : //img.shields.io/badge/docker- % 230db7ed.svg ? style=for-the-badge & logo=docker & logocolor=white ) !", "[ linux ] ( http : //img.shields.io/badge/linux-fcc624 ? style=for-the-badge & logo=linux & logocolor=black ) !", "[ macos ] ( http : //img.shields.io/badge/mac % 20os-000000 ? style=for-the-badge & logo=macos & logocolor=f0f0f0 ) !", "[ window ] ( http : //img.shields.io/badge/windows-0078d6 ? style=for-the-badge & logo=windows & logocolor=white ) [ * * usage tutorial * * ] ( http : //docs.dbgpt.site/docs/overview ) - [ * * install * * ] ( http : //docs.dbgpt.site/docs/installation ) - [ * * quickstart * * ] ( http : //docs.dbgpt.site/docs/quickstart ) - [ * * application * * ] ( http : //docs.dbgpt.site/docs/operation_manual ) - [ * * debugging * * ] ( http : //docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging )" ] ], "token": [ [ "install", "!", "[", "docker", "]", "(", "http", ":", "//img.shields.io/badge/docker-", "%", "230db7ed.svg", "?", "style=for-the-badge", "&", "logo=docker", "&", "logocolor=white", ")", "!", "[", "linux", "]", "(", "http", ":", "//img.shields.io/badge/linux-fcc624", "?", "style=for-the-badge", "&", "logo=linux", "&", "logocolor=black", ")", "!", "[", "macos", "]", "(", "http", ":", "//img.shields.io/badge/mac", "%", "20os-000000", "?", "style=for-the-badge", "&", "logo=macos", "&", "logocolor=f0f0f0", ")", "!", "[", "window", "]", "(", "http", ":", "//img.shields.io/badge/windows-0078d6", "?", "style=for-the-badge", "&", "logo=windows", "&", "logocolor=white", ")", "[", "*", "*", "usage", "tutorial", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/overview", ")", "-", "[", "*", "*", "install", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/installation", ")", "-", "[", "*", "*", "quickstart", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/quickstart", ")", "-", "[", "*", "*", "application", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual", ")", "-", "[", "*", "*", "debugging", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging", ")" ], [ "install !", "[ docker ] ( http : //img.shields.io/badge/docker- % 230db7ed.svg ? style=for-the-badge & logo=docker & logocolor=white ) !", "[ linux ] ( http : //img.shields.io/badge/linux-fcc624 ? style=for-the-badge & logo=linux & logocolor=black ) !", "[ macos ] ( http : //img.shields.io/badge/mac % 20os-000000 ? style=for-the-badge & logo=macos & logocolor=f0f0f0 ) !", "[ window ] ( http : //img.shields.io/badge/windows-0078d6 ? style=for-the-badge & logo=windows & logocolor=white ) [ * * usage tutorial * * ] ( http : //docs.dbgpt.site/docs/overview ) - [ * * install * * ] ( http : //docs.dbgpt.site/docs/installation ) - [ * * quickstart * * ] ( http : //docs.dbgpt.site/docs/quickstart ) - [ * * application * * ] ( http : //docs.dbgpt.site/docs/operation_manual ) - [ * * debugging * * ] ( http : //docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging )" ] ], "level of complexity": -1 }, { "url": "https://github.com/eosphoros-ai/DB-GPT", "readme_url": "https://raw.githubusercontent.com/eosphoros-ai/DB-GPT/main/README.md", "topic": [ "agents", "bgi", "database", "gpt", "gpt-4", "langchain", "llm", "private", "rag", "security", "vicuna" ], "text": "Contribution\n\n- Please run `black .` before submitting the code.\n- To check detailed guidelines for new contributions, please refer [how to contribute](https://github.com/eosphoros-ai/DB-GPT/blob/main/CONTRIBUTING.md)\n\n", "sentence": [ [ "contribution", "-", "please", "run", "`", "black", ".", "`", "submitting", "code", ".", "-", "check", "detailed", "guideline", "new", "contribution", ",", "please", "refer", "[", "contribute", "]", "(", "http", ":", "//github.com/eosphoros-ai/db-gpt/blob/main/contributing.md", ")" ], [ "contribution - please run ` black . ` submitting code .", "- check detailed guideline new contribution , please refer [ contribute ] ( http : //github.com/eosphoros-ai/db-gpt/blob/main/contributing.md )" ] ], "token": [ [ "contribution", "-", "please", "run", "`", "black", ".", "`", "submitting", "code", ".", "-", "check", "detailed", "guideline", "new", "contribution", ",", "please", "refer", "[", "contribute", "]", "(", "http", ":", "//github.com/eosphoros-ai/db-gpt/blob/main/contributing.md", ")" ], [ "contribution - please run ` black . ` submitting code .", "- check detailed guideline new contribution , please refer [ contribute ] ( http : //github.com/eosphoros-ai/db-gpt/blob/main/contributing.md )" ] ], "level of complexity": -1 }, { "url": "https://github.com/gventuri/pandas-ai", "readme_url": "https://raw.githubusercontent.com/gventuri/pandas-ai/main/README.md", "topic": [ "ai", "csv", "data", "data-analysis", "data-science", "gpt-3", "gpt-4", "llm", "pandas", "sql" ], "text": "\ud83d\udd27 Quick install\n\n```bash\npip install pandasai\n```\n\n", "sentence": [ [ "\ud83d\udd27", "quick", "install", "``", "`", "bash", "pip", "install", "pandasai", "``", "`" ], [ "\ud83d\udd27 quick install `` ` bash pip install pandasai `` `" ] ], "token": [ [ "\ud83d\udd27", "quick", "install", "``", "`", "bash", "pip", "install", "pandasai", "``", "`" ], [ "\ud83d\udd27 quick install `` ` bash pip install pandasai `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/gventuri/pandas-ai", "readme_url": "https://raw.githubusercontent.com/gventuri/pandas-ai/main/README.md", "topic": [ "ai", "csv", "data", "data-analysis", "data-science", "gpt-3", "gpt-4", "llm", "pandas", "sql" ], "text": "\ud83e\udd1d Contributing\n\nContributions are welcome! Please check out the todos below, and feel free to open a pull request.\nFor more information, please see the [contributing guidelines](CONTRIBUTING.md).\n\nAfter installing the virtual environment, please remember to install `pre-commit` to be compliant with our standards:\n\n```bash\npre-commit install\n```\n\n", "sentence": [ [ "\ud83e\udd1d", "contributing", "contribution", "welcome", "!", "please", "check", "todos", ",", "feel", "free", "open", "pull", "request", ".", "information", ",", "please", "see", "[", "contributing", "guideline", "]", "(", "contributing.md", ")", ".", "installing", "virtual", "environment", ",", "please", "remember", "install", "`", "pre-commit", "`", "compliant", "standard", ":", "``", "`", "bash", "pre-commit", "install", "``", "`" ], [ "\ud83e\udd1d contributing contribution welcome !", "please check todos , feel free open pull request .", "information , please see [ contributing guideline ] ( contributing.md ) .", "installing virtual environment , please remember install ` pre-commit ` compliant standard : `` ` bash pre-commit install `` `" ] ], "token": [ [ "\ud83e\udd1d", "contributing", "contribution", "welcome", "!", "please", "check", "todos", ",", "feel", "free", "open", "pull", "request", ".", "information", ",", "please", "see", "[", "contributing", "guideline", "]", "(", "contributing.md", ")", ".", "installing", "virtual", "environment", ",", "please", "remember", "install", "`", "pre-commit", "`", "compliant", "standard", ":", "``", "`", "bash", "pre-commit", "install", "``", "`" ], [ "\ud83e\udd1d contributing contribution welcome !", "please check todos , feel free open pull request .", "information , please see [ contributing guideline ] ( contributing.md ) .", "installing virtual environment , please remember install ` pre-commit ` compliant standard : `` ` bash pre-commit install `` `" ] ], "level of complexity": -1 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "h2oGPT\n\nTurn \u2605 into \u2b50 (top-right corner) if you like the project!\n\nQuery and summarize your documents or just chat with local private GPT LLMs using h2oGPT, an Apache V2 open-source project.\n\n- **Private** offline database of any documents [(PDFs, Excel, Word, Images, Video Frames, Youtube, Audio, Code, Text, MarkDown, etc.)](docs/README_LangChain.md#supported-datatypes)\n - **Persistent** database (Chroma, Weaviate, or in-memory FAISS) using accurate embeddings (instructor-large, all-MiniLM-L6-v2, etc.)\n - **Efficient** use of context using instruct-tuned LLMs (no need for LangChain's few-shot approach)\n - **Parallel** summarization and extraction, reaching an output of 80 tokens per second with the 13B LLaMa2 model\n - **HYDE** (Hypothetical Document Embeddings) for enhanced retrieval based upon LLM responses\n- **Variety** of models supported (LLaMa2, Mistral, Falcon, Vicuna, WizardLM. With AutoGPTQ, 4-bit/8-bit, LORA, etc.)\n - **GPU** support from HF and LLaMa.cpp GGML models, and **CPU** support using HF, LLaMa.cpp, and GPT4ALL models\n - **Attention Sinks** for [arbitrarily long](https://github.com/tomaarsen/attention_sinks) generation (LLaMa-2, Mistral, MPT, Pythia, Falcon, etc.)\n- **UI** or CLI with streaming of all models\n - **Upload** and **View** documents through the UI (control multiple collaborative or personal collections)\n - **Vision LLaVa** Model and **Stable Diffusion** Image Generation\n - **Voice STT** using Whisper with streaming audio conversion\n - **Voice TTS** using MIT-Licensed Microsoft Speech T5 with multiple voices and Streaming audio conversion\n - **Voice TTS** using MPL2-Licensed TTS including Voice Cloning and Streaming audio conversion\n - **AI Assistant Voice Control Mode** for hands-free control of h2oGPT chat\n - **Bake-off** UI mode against many models at the same time\n - **Easy Download** of model artifacts and control over models like LLaMa.cpp through the UI\n - **Authentication** in the UI by user/password\n - **State Preservation** in the UI by user/password\n- **Linux, Docker, macOS, and Windows** support\n - [**Easy Windows Installer**](#windows-1011-64-bit-with-full-document-qa-capability) for Windows 10 64-bit (CPU/CUDA)\n - [**Easy macOS Installer**](#macos-cpum1m2-with-full-document-qa-capability) for macOS (CPU/M1/M2)\n- **Inference Servers** support (HF TGI server, vLLM, Gradio, ExLLaMa, Replicate, OpenAI, Azure OpenAI, Anthropic)\n- **OpenAI-compliant**\n - Server Proxy API (h2oGPT acts as drop-in-replacement to OpenAI server)\n - Python client API (to talk to Gradio server)\n- **Web-Search** integration with Chat and Document Q/A\n- **Agents** for Search, Document Q/A, Python Code, CSV frames (Experimental, best with OpenAI currently)\n- **Evaluate** performance using reward models\n- **Quality** maintained with over 1000 unit and integration tests taking over 4 GPU-hours\n\n", "sentence": [ [ "h2ogpt", "turn", "\u2605", "\u2b50", "(", "top-right", "corner", ")", "like", "project", "!", "query", "summarize", "document", "chat", "local", "private", "gpt", "llm", "using", "h2ogpt", ",", "apache", "v2", "open-source", "project", ".", "-", "*", "*", "private", "*", "*", "offline", "database", "document", "[", "(", "pdfs", ",", "excel", ",", "word", ",", "image", ",", "video", "frame", ",", "youtube", ",", "audio", ",", "code", ",", "text", ",", "markdown", ",", "etc", ".", ")", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "-", "*", "*", "persistent", "*", "*", "database", "(", "chroma", ",", "weaviate", ",", "in-memory", "faiss", ")", "using", "accurate", "embeddings", "(", "instructor-large", ",", "all-minilm-l6-v2", ",", "etc", ".", ")", "-", "*", "*", "efficient", "*", "*", "use", "context", "using", "instruct-tuned", "llm", "(", "need", "langchain", "'s", "few-shot", "approach", ")", "-", "*", "*", "parallel", "*", "*", "summarization", "extraction", ",", "reaching", "output", "80", "token", "per", "second", "13b", "llama2", "model", "-", "*", "*", "hyde", "*", "*", "(", "hypothetical", "document", "embeddings", ")", "enhanced", "retrieval", "based", "upon", "llm", "response", "-", "*", "*", "variety", "*", "*", "model", "supported", "(", "llama2", ",", "mistral", ",", "falcon", ",", "vicuna", ",", "wizardlm", ".", "autogptq", ",", "4-bit/8-bit", ",", "lora", ",", "etc", ".", ")", "-", "*", "*", "gpu", "*", "*", "support", "hf", "llama.cpp", "ggml", "model", ",", "*", "*", "cpu", "*", "*", "support", "using", "hf", ",", "llama.cpp", ",", "gpt4all", "model", "-", "*", "*", "attention", "sink", "*", "*", "[", "arbitrarily", "long", "]", "(", "http", ":", "//github.com/tomaarsen/attention_sinks", ")", "generation", "(", "llama-2", ",", "mistral", ",", "mpt", ",", "pythia", ",", "falcon", ",", "etc", ".", ")", "-", "*", "*", "ui", "*", "*", "cli", "streaming", "model", "-", "*", "*", "upload", "*", "*", "*", "*", "view", "*", "*", "document", "ui", "(", "control", "multiple", "collaborative", "personal", "collection", ")", "-", "*", "*", "vision", "llava", "*", "*", "model", "*", "*", "stable", "diffusion", "*", "*", "image", "generation", "-", "*", "*", "voice", "stt", "*", "*", "using", "whisper", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mit-licensed", "microsoft", "speech", "t5", "multiple", "voice", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mpl2-licensed", "tt", "including", "voice", "cloning", "streaming", "audio", "conversion", "-", "*", "*", "ai", "assistant", "voice", "control", "mode", "*", "*", "hands-free", "control", "h2ogpt", "chat", "-", "*", "*", "bake-off", "*", "*", "ui", "mode", "many", "model", "time", "-", "*", "*", "easy", "download", "*", "*", "model", "artifact", "control", "model", "like", "llama.cpp", "ui", "-", "*", "*", "authentication", "*", "*", "ui", "user/password", "-", "*", "*", "state", "preservation", "*", "*", "ui", "user/password", "-", "*", "*", "linux", ",", "docker", ",", "macos", ",", "window", "*", "*", "support", "-", "[", "*", "*", "easy", "window", "installer", "*", "*", "]", "(", "#", "windows-1011-64-bit-with-full-document-qa-capability", ")", "window", "10", "64-bit", "(", "cpu/cuda", ")", "-", "[", "*", "*", "easy", "macos", "installer", "*", "*", "]", "(", "#", "macos-cpum1m2-with-full-document-qa-capability", ")", "macos", "(", "cpu/m1/m2", ")", "-", "*", "*", "inference", "server", "*", "*", "support", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ",", "anthropic", ")", "-", "*", "*", "openai-compliant", "*", "*", "-", "server", "proxy", "api", "(", "h2ogpt", "act", "drop-in-replacement", "openai", "server", ")", "-", "python", "client", "api", "(", "talk", "gradio", "server", ")", "-", "*", "*", "web-search", "*", "*", "integration", "chat", "document", "q/a", "-", "*", "*", "agent", "*", "*", "search", ",", "document", "q/a", ",", "python", "code", ",", "csv", "frame", "(", "experimental", ",", "best", "openai", "currently", ")", "-", "*", "*", "evaluate", "*", "*", "performance", "using", "reward", "model", "-", "*", "*", "quality", "*", "*", "maintained", "1000", "unit", "integration", "test", "taking", "4", "gpu-hours" ], [ "h2ogpt turn \u2605 \u2b50 ( top-right corner ) like project !", "query summarize document chat local private gpt llm using h2ogpt , apache v2 open-source project .", "- * * private * * offline database document [ ( pdfs , excel , word , image , video frame , youtube , audio , code , text , markdown , etc .", ") ] ( docs/readme_langchain.md # supported-datatypes ) - * * persistent * * database ( chroma , weaviate , in-memory faiss ) using accurate embeddings ( instructor-large , all-minilm-l6-v2 , etc . )", "- * * efficient * * use context using instruct-tuned llm ( need langchain 's few-shot approach ) - * * parallel * * summarization extraction , reaching output 80 token per second 13b llama2 model - * * hyde * * ( hypothetical document embeddings ) enhanced retrieval based upon llm response - * * variety * * model supported ( llama2 , mistral , falcon , vicuna , wizardlm .", "autogptq , 4-bit/8-bit , lora , etc . )", "- * * gpu * * support hf llama.cpp ggml model , * * cpu * * support using hf , llama.cpp , gpt4all model - * * attention sink * * [ arbitrarily long ] ( http : //github.com/tomaarsen/attention_sinks ) generation ( llama-2 , mistral , mpt , pythia , falcon , etc . )", "- * * ui * * cli streaming model - * * upload * * * * view * * document ui ( control multiple collaborative personal collection ) - * * vision llava * * model * * stable diffusion * * image generation - * * voice stt * * using whisper streaming audio conversion - * * voice tt * * using mit-licensed microsoft speech t5 multiple voice streaming audio conversion - * * voice tt * * using mpl2-licensed tt including voice cloning streaming audio conversion - * * ai assistant voice control mode * * hands-free control h2ogpt chat - * * bake-off * * ui mode many model time - * * easy download * * model artifact control model like llama.cpp ui - * * authentication * * ui user/password - * * state preservation * * ui user/password - * * linux , docker , macos , window * * support - [ * * easy window installer * * ] ( # windows-1011-64-bit-with-full-document-qa-capability ) window 10 64-bit ( cpu/cuda ) - [ * * easy macos installer * * ] ( # macos-cpum1m2-with-full-document-qa-capability ) macos ( cpu/m1/m2 ) - * * inference server * * support ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai , anthropic ) - * * openai-compliant * * - server proxy api ( h2ogpt act drop-in-replacement openai server ) - python client api ( talk gradio server ) - * * web-search * * integration chat document q/a - * * agent * * search , document q/a , python code , csv frame ( experimental , best openai currently ) - * * evaluate * * performance using reward model - * * quality * * maintained 1000 unit integration test taking 4 gpu-hours" ] ], "token": [ [ "h2ogpt", "turn", "\u2605", "\u2b50", "(", "top-right", "corner", ")", "like", "project", "!", "query", "summarize", "document", "chat", "local", "private", "gpt", "llm", "using", "h2ogpt", ",", "apache", "v2", "open-source", "project", ".", "-", "*", "*", "private", "*", "*", "offline", "database", "document", "[", "(", "pdfs", ",", "excel", ",", "word", ",", "image", ",", "video", "frame", ",", "youtube", ",", "audio", ",", "code", ",", "text", ",", "markdown", ",", "etc", ".", ")", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "-", "*", "*", "persistent", "*", "*", "database", "(", "chroma", ",", "weaviate", ",", "in-memory", "faiss", ")", "using", "accurate", "embeddings", "(", "instructor-large", ",", "all-minilm-l6-v2", ",", "etc", ".", ")", "-", "*", "*", "efficient", "*", "*", "use", "context", "using", "instruct-tuned", "llm", "(", "need", "langchain", "'s", "few-shot", "approach", ")", "-", "*", "*", "parallel", "*", "*", "summarization", "extraction", ",", "reaching", "output", "80", "token", "per", "second", "13b", "llama2", "model", "-", "*", "*", "hyde", "*", "*", "(", "hypothetical", "document", "embeddings", ")", "enhanced", "retrieval", "based", "upon", "llm", "response", "-", "*", "*", "variety", "*", "*", "model", "supported", "(", "llama2", ",", "mistral", ",", "falcon", ",", "vicuna", ",", "wizardlm", ".", "autogptq", ",", "4-bit/8-bit", ",", "lora", ",", "etc", ".", ")", "-", "*", "*", "gpu", "*", "*", "support", "hf", "llama.cpp", "ggml", "model", ",", "*", "*", "cpu", "*", "*", "support", "using", "hf", ",", "llama.cpp", ",", "gpt4all", "model", "-", "*", "*", "attention", "sink", "*", "*", "[", "arbitrarily", "long", "]", "(", "http", ":", "//github.com/tomaarsen/attention_sinks", ")", "generation", "(", "llama-2", ",", "mistral", ",", "mpt", ",", "pythia", ",", "falcon", ",", "etc", ".", ")", "-", "*", "*", "ui", "*", "*", "cli", "streaming", "model", "-", "*", "*", "upload", "*", "*", "*", "*", "view", "*", "*", "document", "ui", "(", "control", "multiple", "collaborative", "personal", "collection", ")", "-", "*", "*", "vision", "llava", "*", "*", "model", "*", "*", "stable", "diffusion", "*", "*", "image", "generation", "-", "*", "*", "voice", "stt", "*", "*", "using", "whisper", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mit-licensed", "microsoft", "speech", "t5", "multiple", "voice", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mpl2-licensed", "tt", "including", "voice", "cloning", "streaming", "audio", "conversion", "-", "*", "*", "ai", "assistant", "voice", "control", "mode", "*", "*", "hands-free", "control", "h2ogpt", "chat", "-", "*", "*", "bake-off", "*", "*", "ui", "mode", "many", "model", "time", "-", "*", "*", "easy", "download", "*", "*", "model", "artifact", "control", "model", "like", "llama.cpp", "ui", "-", "*", "*", "authentication", "*", "*", "ui", "user/password", "-", "*", "*", "state", "preservation", "*", "*", "ui", "user/password", "-", "*", "*", "linux", ",", "docker", ",", "macos", ",", "window", "*", "*", "support", "-", "[", "*", "*", "easy", "window", "installer", "*", "*", "]", "(", "#", "windows-1011-64-bit-with-full-document-qa-capability", ")", "window", "10", "64-bit", "(", "cpu/cuda", ")", "-", "[", "*", "*", "easy", "macos", "installer", "*", "*", "]", "(", "#", "macos-cpum1m2-with-full-document-qa-capability", ")", "macos", "(", "cpu/m1/m2", ")", "-", "*", "*", "inference", "server", "*", "*", "support", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ",", "anthropic", ")", "-", "*", "*", "openai-compliant", "*", "*", "-", "server", "proxy", "api", "(", "h2ogpt", "act", "drop-in-replacement", "openai", "server", ")", "-", "python", "client", "api", "(", "talk", "gradio", "server", ")", "-", "*", "*", "web-search", "*", "*", "integration", "chat", "document", "q/a", "-", "*", "*", "agent", "*", "*", "search", ",", "document", "q/a", ",", "python", "code", ",", "csv", "frame", "(", "experimental", ",", "best", "openai", "currently", ")", "-", "*", "*", "evaluate", "*", "*", "performance", "using", "reward", "model", "-", "*", "*", "quality", "*", "*", "maintained", "1000", "unit", "integration", "test", "taking", "4", "gpu-hours" ], [ "h2ogpt turn \u2605 \u2b50 ( top-right corner ) like project !", "query summarize document chat local private gpt llm using h2ogpt , apache v2 open-source project .", "- * * private * * offline database document [ ( pdfs , excel , word , image , video frame , youtube , audio , code , text , markdown , etc .", ") ] ( docs/readme_langchain.md # supported-datatypes ) - * * persistent * * database ( chroma , weaviate , in-memory faiss ) using accurate embeddings ( instructor-large , all-minilm-l6-v2 , etc . )", "- * * efficient * * use context using instruct-tuned llm ( need langchain 's few-shot approach ) - * * parallel * * summarization extraction , reaching output 80 token per second 13b llama2 model - * * hyde * * ( hypothetical document embeddings ) enhanced retrieval based upon llm response - * * variety * * model supported ( llama2 , mistral , falcon , vicuna , wizardlm .", "autogptq , 4-bit/8-bit , lora , etc . )", "- * * gpu * * support hf llama.cpp ggml model , * * cpu * * support using hf , llama.cpp , gpt4all model - * * attention sink * * [ arbitrarily long ] ( http : //github.com/tomaarsen/attention_sinks ) generation ( llama-2 , mistral , mpt , pythia , falcon , etc . )", "- * * ui * * cli streaming model - * * upload * * * * view * * document ui ( control multiple collaborative personal collection ) - * * vision llava * * model * * stable diffusion * * image generation - * * voice stt * * using whisper streaming audio conversion - * * voice tt * * using mit-licensed microsoft speech t5 multiple voice streaming audio conversion - * * voice tt * * using mpl2-licensed tt including voice cloning streaming audio conversion - * * ai assistant voice control mode * * hands-free control h2ogpt chat - * * bake-off * * ui mode many model time - * * easy download * * model artifact control model like llama.cpp ui - * * authentication * * ui user/password - * * state preservation * * ui user/password - * * linux , docker , macos , window * * support - [ * * easy window installer * * ] ( # windows-1011-64-bit-with-full-document-qa-capability ) window 10 64-bit ( cpu/cuda ) - [ * * easy macos installer * * ] ( # macos-cpum1m2-with-full-document-qa-capability ) macos ( cpu/m1/m2 ) - * * inference server * * support ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai , anthropic ) - * * openai-compliant * * - server proxy api ( h2ogpt act drop-in-replacement openai server ) - python client api ( talk gradio server ) - * * web-search * * integration chat document q/a - * * agent * * search , document q/a , python code , csv frame ( experimental , best openai currently ) - * * evaluate * * performance using reward model - * * quality * * maintained 1000 unit integration test taking 4 gpu-hours" ] ], "level of complexity": -1 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "for cu118 use export PIP_EXTRA_INDEX_URL=\"https://download.pytorch.org/whl/cu118 https://huggingface.github.io/autogptq-index/whl/cu118\"\n ```\nThen run the following commands on any system:\n ```bash\n git clone https://github.com/h2oai/h2ogpt.git\n cd h2ogpt\n pip install -r requirements.txt\n pip install -r reqs_optional/requirements_optional_langchain.txt\n\n ", "sentence": [ [ "cu118", "use", "export", "pip_extra_index_url=", "''", "http", ":", "//download.pytorch.org/whl/cu118", "http", ":", "//huggingface.github.io/autogptq-index/whl/cu118", "''", "``", "`", "run", "following", "command", "system", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/h2oai/h2ogpt.git", "cd", "h2ogpt", "pip", "install", "-r", "requirements.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.txt" ], [ "cu118 use export pip_extra_index_url= '' http : //download.pytorch.org/whl/cu118 http : //huggingface.github.io/autogptq-index/whl/cu118 '' `` ` run following command system : `` ` bash git clone http : //github.com/h2oai/h2ogpt.git cd h2ogpt pip install -r requirements.txt pip install -r reqs_optional/requirements_optional_langchain.txt" ] ], "token": [ [ "cu118", "use", "export", "pip_extra_index_url=", "''", "http", ":", "//download.pytorch.org/whl/cu118", "http", ":", "//huggingface.github.io/autogptq-index/whl/cu118", "''", "``", "`", "run", "following", "command", "system", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/h2oai/h2ogpt.git", "cd", "h2ogpt", "pip", "install", "-r", "requirements.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.txt" ], [ "cu118 use export pip_extra_index_url= '' http : //download.pytorch.org/whl/cu118 http : //huggingface.github.io/autogptq-index/whl/cu118 '' `` ` run following command system : `` ` bash git clone http : //github.com/h2oai/h2ogpt.git cd h2ogpt pip install -r requirements.txt pip install -r reqs_optional/requirements_optional_langchain.txt" ] ], "level of complexity": 0 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "for AMD ROC, comment-out all except the correct ROC wheel\n pip install -r reqs_optional/requirements_optional_gpt4all.txt\n\n pip install -r reqs_optional/requirements_optional_langchain.urls.txt\n ", "sentence": [ [ "amd", "roc", ",", "comment-out", "except", "correct", "roc", "wheel", "pip", "install", "-r", "reqs_optional/requirements_optional_gpt4all.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.urls.txt" ], [ "amd roc , comment-out except correct roc wheel pip install -r reqs_optional/requirements_optional_gpt4all.txt pip install -r reqs_optional/requirements_optional_langchain.urls.txt" ] ], "token": [ [ "amd", "roc", ",", "comment-out", "except", "correct", "roc", "wheel", "pip", "install", "-r", "reqs_optional/requirements_optional_gpt4all.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.urls.txt" ], [ "amd roc , comment-out except correct roc wheel pip install -r reqs_optional/requirements_optional_gpt4all.txt pip install -r reqs_optional/requirements_optional_langchain.urls.txt" ] ], "level of complexity": 0 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "pip install -r reqs_optional/requirements_optional_langchain.gpllike.txt\n\n python generate.py --base_model=TheBloke/zephyr-7B-beta-GGUF --prompt_type=zephyr --max_seq_len=4096\n ```\nNext, go to your browser by visiting [http://127.0.0.1:7860](http://127.0.0.1:7860) or [http://localhost:7860](http://localhost:7860). Choose 13B for a better model than 7B.\nIf you encounter issues with `llama-cpp-python` or other packages that try to compile and fail, try binary wheels for your platform as linked in the detailed instructions below. For AVX1 or AMD ROC systems, edit `reqs_optional/requirements_optional_gpt4all.txt` to choose valid packages.\n\nWe recommend quantized models for most small-GPU systems, e.g. [LLaMa-2-7B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q6_K.gguf) for 9GB+ GPU memory or larger models like [LLaMa-2-13B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-13b-chat.Q6_K.gguf) if you have 16GB+ GPU memory.\n\nSee [Offline](docs/README_offline.md#tldr) for how to run h2oGPT offline.\n\n---\n\nNote that for all platforms, some packages such as DocTR, Unstructured, BLIP, Stable Diffusion, etc. download models at runtime that appear to delay operations in the UI. The progress appears in the console logs.\n\n", "sentence": [ [ "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.gpllike.txt", "python", "generate.py", "--", "base_model=thebloke/zephyr-7b-beta-gguf", "--", "prompt_type=zephyr", "--", "max_seq_len=4096", "``", "`", "next", ",", "go", "browser", "visiting", "[", "http", ":", "//127.0.0.1:7860", "]", "(", "http", ":", "//127.0.0.1:7860", ")", "[", "http", ":", "//localhost:7860", "]", "(", "http", ":", "//localhost:7860", ")", ".", "choose", "13b", "better", "model", "7b", ".", "encounter", "issue", "`", "llama-cpp-python", "`", "package", "try", "compile", "fail", ",", "try", "binary", "wheel", "platform", "linked", "detailed", "instruction", ".", "avx1", "amd", "roc", "system", ",", "edit", "`", "reqs_optional/requirements_optional_gpt4all.txt", "`", "choose", "valid", "package", ".", "recommend", "quantized", "model", "small-gpu", "system", ",", "e.g", ".", "[", "llama-2-7b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf", ")", "9gb+", "gpu", "memory", "larger", "model", "like", "[", "llama-2-13b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf", ")", "16gb+", "gpu", "memory", ".", "see", "[", "offline", "]", "(", "docs/readme_offline.md", "#", "tldr", ")", "run", "h2ogpt", "offline", ".", "--", "-", "note", "platform", ",", "package", "doctr", ",", "unstructured", ",", "blip", ",", "stable", "diffusion", ",", "etc", ".", "download", "model", "runtime", "appear", "delay", "operation", "ui", ".", "progress", "appears", "console", "log", "." ], [ "pip install -r reqs_optional/requirements_optional_langchain.gpllike.txt python generate.py -- base_model=thebloke/zephyr-7b-beta-gguf -- prompt_type=zephyr -- max_seq_len=4096 `` ` next , go browser visiting [ http : //127.0.0.1:7860 ] ( http : //127.0.0.1:7860 ) [ http : //localhost:7860 ] ( http : //localhost:7860 ) .", "choose 13b better model 7b .", "encounter issue ` llama-cpp-python ` package try compile fail , try binary wheel platform linked detailed instruction .", "avx1 amd roc system , edit ` reqs_optional/requirements_optional_gpt4all.txt ` choose valid package .", "recommend quantized model small-gpu system , e.g .", "[ llama-2-7b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf ) 9gb+ gpu memory larger model like [ llama-2-13b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf ) 16gb+ gpu memory .", "see [ offline ] ( docs/readme_offline.md # tldr ) run h2ogpt offline .", "-- - note platform , package doctr , unstructured , blip , stable diffusion , etc .", "download model runtime appear delay operation ui .", "progress appears console log ." ] ], "token": [ [ "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.gpllike.txt", "python", "generate.py", "--", "base_model=thebloke/zephyr-7b-beta-gguf", "--", "prompt_type=zephyr", "--", "max_seq_len=4096", "``", "`", "next", ",", "go", "browser", "visiting", "[", "http", ":", "//127.0.0.1:7860", "]", "(", "http", ":", "//127.0.0.1:7860", ")", "[", "http", ":", "//localhost:7860", "]", "(", "http", ":", "//localhost:7860", ")", ".", "choose", "13b", "better", "model", "7b", ".", "encounter", "issue", "`", "llama-cpp-python", "`", "package", "try", "compile", "fail", ",", "try", "binary", "wheel", "platform", "linked", "detailed", "instruction", ".", "avx1", "amd", "roc", "system", ",", "edit", "`", "reqs_optional/requirements_optional_gpt4all.txt", "`", "choose", "valid", "package", ".", "recommend", "quantized", "model", "small-gpu", "system", ",", "e.g", ".", "[", "llama-2-7b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf", ")", "9gb+", "gpu", "memory", "larger", "model", "like", "[", "llama-2-13b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf", ")", "16gb+", "gpu", "memory", ".", "see", "[", "offline", "]", "(", "docs/readme_offline.md", "#", "tldr", ")", "run", "h2ogpt", "offline", ".", "--", "-", "note", "platform", ",", "package", "doctr", ",", "unstructured", ",", "blip", ",", "stable", "diffusion", ",", "etc", ".", "download", "model", "runtime", "appear", "delay", "operation", "ui", ".", "progress", "appears", "console", "log", "." ], [ "pip install -r reqs_optional/requirements_optional_langchain.gpllike.txt python generate.py -- base_model=thebloke/zephyr-7b-beta-gguf -- prompt_type=zephyr -- max_seq_len=4096 `` ` next , go browser visiting [ http : //127.0.0.1:7860 ] ( http : //127.0.0.1:7860 ) [ http : //localhost:7860 ] ( http : //localhost:7860 ) .", "choose 13b better model 7b .", "encounter issue ` llama-cpp-python ` package try compile fail , try binary wheel platform linked detailed instruction .", "avx1 amd roc system , edit ` reqs_optional/requirements_optional_gpt4all.txt ` choose valid package .", "recommend quantized model small-gpu system , e.g .", "[ llama-2-7b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf ) 9gb+ gpu memory larger model like [ llama-2-13b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf ) 16gb+ gpu memory .", "see [ offline ] ( docs/readme_offline.md # tldr ) run h2ogpt offline .", "-- - note platform , package doctr , unstructured , blip , stable diffusion , etc .", "download model runtime appear delay operation ui .", "progress appears console log ." ] ], "level of complexity": 0 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "Windows 10/11 64-bit with full document Q/A capability\n * One-Click Installer\n * CPU or GPU: Download [h2oGPT Windows Installer](https://h2o-release.s3.amazonaws.com/h2ogpt/Jan2024/h2oGPT_0.0.1.exe) (1.3GB file)\n * Once installed, feel free to change start directory for icon from `%HOMEDRIVE%\\%HOMEPATH%` to (e.g.) `%HOMEDRIVE%\\%HOMEPATH%\\h2ogpt_data` so all created files (like database) go there. All paths saved are relative to this path.\n * CPU: Click the h2oGPT icon in the Start menu. Give it about 15 seconds to open in a browser if many optional packages are included. By default, the browser will launch with the actual local IP address, not localhost.\n * GPU: Before starting, run the following commands (replace `pseud` with your user):\n ```\n C:\\Users\\pseud\\AppData\\Local\\Programs\\h2oGPT\\Python\\python.exe -m pip uninstall -y torch\n C:\\Users\\pseud\\AppData\\Local\\Programs\\h2oGPT\\Python\\python.exe -m pip install https://h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2%2Bcu118-cp310-cp310-win_amd64.whl\n ```\n Now click the h2oGPT icon in the Start menu. Give it about 20 seconds to open in a browser if many optional packages are included. By default, the browser will launch with the actual local IP address, not localhost.\n * Some other users may have python located here: `C:\\Program Files (x86)\\h2oGPT\\Python\\python.exe`.\n * To debug any issues, run the following (replace `pseud` with your user):\n ```\n C:\\Users\\pseud\\AppData\\Local\\Programs\\h2oGPT\\Python\\python.exe \"C:\\Users\\pseud\\AppData\\Local\\Programs\\h2oGPT\\h2oGPT.launch.pyw\"\n ```\n Any start-up exceptions are appended to log, e.g. `C:\\Users\\pseud\\h2ogpt_exception.log`.\n * To control startup, tweak the python startup file, e.g. for user `pseud`: `C:\\Users\\pseud\\AppData\\Local\\Programs\\h2oGPT\\pkgs\\win_run_app.py`\n * In this Python code, set ENVs anywhere before main_h2ogpt() is called\n * E.g. `os.environ['name'] = 'value'`, e.g. `os.environ['n_jobs'] = '10'` (must be always a string).\n * Environment variables can be changed, e.g.:\n * `n_jobs`: number of cores for various tasks\n * `OMP_NUM_THREADS` thread count for LLaMa\n * `CUDA_VISIBLE_DEVICES` which GPUs are used. Recommend set to single fast GPU, e.g. `CUDA_VISIBLE_DEVICES=0` if have multiple GPUs. Note that UI cannot control which GPUs (or CPU mode) for LLaMa models.\n * Any CLI argument from `python generate.py --help` with environment variable set as `h2ogpt_x`, e.g. `h2ogpt_h2ocolors` to `False`.\n * Set env `h2ogpt_server_name` to actual IP address for LAN to see app, e.g. `h2ogpt_server_name` to `192.168.1.172` and allow access through firewall if have Windows Defender activated.\n * One can tweak installed h2oGPT code at, e.g. `C:\\Users\\pseud\\AppData\\Local\\Programs\\h2oGPT`.\n * To terminate the app, go to System Tab and click Admin and click Shutdown h2oGPT.\n * If startup fails, run as console and check for errors, e.g. and kill any old Python processes.\n\n * [Full Windows 10/11 Manual Installation Script](docs/README_WINDOWS.md)\n * Single `.bat` file for installation (if you do not skip any optional packages, takes about 9GB filled on disk).\n * Recommend base Conda env, which allows for DocTR that requires pygobject that has otherwise no support (except `mysys2` that cannot be used by h2oGPT).\n * Also allows for the TTS package by Coqui, which is otherwise not currently enabled in the one-click installer.\n\n---\n\n", "sentence": [ [ "window", "10/11", "64-bit", "full", "document", "q/a", "capability", "*", "one-click", "installer", "*", "cpu", "gpu", ":", "download", "[", "h2ogpt", "window", "installer", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe", ")", "(", "1.3gb", "file", ")", "*", "installed", ",", "feel", "free", "change", "start", "directory", "icon", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "`", "(", "e.g", ".", ")", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "\\h2ogpt_data", "`", "created", "file", "(", "like", "database", ")", "go", ".", "path", "saved", "relative", "path", ".", "*", "cpu", ":", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "15", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "gpu", ":", "starting", ",", "run", "following", "command", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "uninstall", "-y", "torch", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "install", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2", "%", "2bcu118-cp310-cp310-win_amd64.whl", "``", "`", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "20", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "user", "may", "python", "located", ":", "`", "c", ":", "\\program", "file", "(", "x86", ")", "\\h2ogpt\\python\\python.exe", "`", ".", "*", "debug", "issue", ",", "run", "following", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "``", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw", "''", "``", "`", "start-up", "exception", "appended", "log", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\h2ogpt_exception.log", "`", ".", "*", "control", "startup", ",", "tweak", "python", "startup", "file", ",", "e.g", ".", "user", "`", "pseud", "`", ":", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py", "`", "*", "python", "code", ",", "set", "envs", "anywhere", "main_h2ogpt", "(", ")", "called", "*", "e.g", ".", "`", "os.environ", "[", "'name", "'", "]", "=", "'value", "'", "`", ",", "e.g", ".", "`", "os.environ", "[", "'n_jobs", "'", "]", "=", "'10", "'", "`", "(", "must", "always", "string", ")", ".", "*", "environment", "variable", "changed", ",", "e.g", ".", ":", "*", "`", "n_jobs", "`", ":", "number", "core", "various", "task", "*", "`", "omp_num_threads", "`", "thread", "count", "llama", "*", "`", "cuda_visible_devices", "`", "gpus", "used", ".", "recommend", "set", "single", "fast", "gpu", ",", "e.g", ".", "`", "cuda_visible_devices=0", "`", "multiple", "gpus", ".", "note", "ui", "control", "gpus", "(", "cpu", "mode", ")", "llama", "model", ".", "*", "cli", "argument", "`", "python", "generate.py", "--", "help", "`", "environment", "variable", "set", "`", "h2ogpt_x", "`", ",", "e.g", ".", "`", "h2ogpt_h2ocolors", "`", "`", "false", "`", ".", "*", "set", "env", "`", "h2ogpt_server_name", "`", "actual", "ip", "address", "lan", "see", "app", ",", "e.g", ".", "`", "h2ogpt_server_name", "`", "`", "192.168.1.172", "`", "allow", "access", "firewall", "window", "defender", "activated", ".", "*", "one", "tweak", "installed", "h2ogpt", "code", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt", "`", ".", "*", "terminate", "app", ",", "go", "system", "tab", "click", "admin", "click", "shutdown", "h2ogpt", ".", "*", "startup", "fails", ",", "run", "console", "check", "error", ",", "e.g", ".", "kill", "old", "python", "process", ".", "*", "[", "full", "window", "10/11", "manual", "installation", "script", "]", "(", "docs/readme_windows.md", ")", "*", "single", "`", ".bat", "`", "file", "installation", "(", "skip", "optional", "package", ",", "take", "9gb", "filled", "disk", ")", ".", "*", "recommend", "base", "conda", "env", ",", "allows", "doctr", "requires", "pygobject", "otherwise", "support", "(", "except", "`", "mysys2", "`", "used", "h2ogpt", ")", ".", "*", "also", "allows", "tt", "package", "coqui", ",", "otherwise", "currently", "enabled", "one-click", "installer", ".", "--", "-" ], [ "window 10/11 64-bit full document q/a capability * one-click installer * cpu gpu : download [ h2ogpt window installer ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe ) ( 1.3gb file ) * installed , feel free change start directory icon ` % homedrive % \\ % homepath % ` ( e.g . )", "` % homedrive % \\ % homepath % \\h2ogpt_data ` created file ( like database ) go .", "path saved relative path .", "* cpu : click h2ogpt icon start menu .", "give 15 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* gpu : starting , run following command ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip uninstall -y torch c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip install http : //h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2 % 2bcu118-cp310-cp310-win_amd64.whl `` ` click h2ogpt icon start menu .", "give 20 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* user may python located : ` c : \\program file ( x86 ) \\h2ogpt\\python\\python.exe ` .", "* debug issue , run following ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe `` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw '' `` ` start-up exception appended log , e.g .", "` c : \\users\\pseud\\h2ogpt_exception.log ` .", "* control startup , tweak python startup file , e.g .", "user ` pseud ` : ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py ` * python code , set envs anywhere main_h2ogpt ( ) called * e.g .", "` os.environ [ 'name ' ] = 'value ' ` , e.g .", "` os.environ [ 'n_jobs ' ] = '10 ' ` ( must always string ) .", "* environment variable changed , e.g .", ": * ` n_jobs ` : number core various task * ` omp_num_threads ` thread count llama * ` cuda_visible_devices ` gpus used .", "recommend set single fast gpu , e.g .", "` cuda_visible_devices=0 ` multiple gpus .", "note ui control gpus ( cpu mode ) llama model .", "* cli argument ` python generate.py -- help ` environment variable set ` h2ogpt_x ` , e.g .", "` h2ogpt_h2ocolors ` ` false ` .", "* set env ` h2ogpt_server_name ` actual ip address lan see app , e.g .", "` h2ogpt_server_name ` ` 192.168.1.172 ` allow access firewall window defender activated .", "* one tweak installed h2ogpt code , e.g .", "` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt ` .", "* terminate app , go system tab click admin click shutdown h2ogpt .", "* startup fails , run console check error , e.g .", "kill old python process .", "* [ full window 10/11 manual installation script ] ( docs/readme_windows.md ) * single ` .bat ` file installation ( skip optional package , take 9gb filled disk ) .", "* recommend base conda env , allows doctr requires pygobject otherwise support ( except ` mysys2 ` used h2ogpt ) .", "* also allows tt package coqui , otherwise currently enabled one-click installer .", "-- -" ] ], "token": [ [ "window", "10/11", "64-bit", "full", "document", "q/a", "capability", "*", "one-click", "installer", "*", "cpu", "gpu", ":", "download", "[", "h2ogpt", "window", "installer", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe", ")", "(", "1.3gb", "file", ")", "*", "installed", ",", "feel", "free", "change", "start", "directory", "icon", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "`", "(", "e.g", ".", ")", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "\\h2ogpt_data", "`", "created", "file", "(", "like", "database", ")", "go", ".", "path", "saved", "relative", "path", ".", "*", "cpu", ":", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "15", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "gpu", ":", "starting", ",", "run", "following", "command", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "uninstall", "-y", "torch", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "install", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2", "%", "2bcu118-cp310-cp310-win_amd64.whl", "``", "`", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "20", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "user", "may", "python", "located", ":", "`", "c", ":", "\\program", "file", "(", "x86", ")", "\\h2ogpt\\python\\python.exe", "`", ".", "*", "debug", "issue", ",", "run", "following", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "``", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw", "''", "``", "`", "start-up", "exception", "appended", "log", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\h2ogpt_exception.log", "`", ".", "*", "control", "startup", ",", "tweak", "python", "startup", "file", ",", "e.g", ".", "user", "`", "pseud", "`", ":", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py", "`", "*", "python", "code", ",", "set", "envs", "anywhere", "main_h2ogpt", "(", ")", "called", "*", "e.g", ".", "`", "os.environ", "[", "'name", "'", "]", "=", "'value", "'", "`", ",", "e.g", ".", "`", "os.environ", "[", "'n_jobs", "'", "]", "=", "'10", "'", "`", "(", "must", "always", "string", ")", ".", "*", "environment", "variable", "changed", ",", "e.g", ".", ":", "*", "`", "n_jobs", "`", ":", "number", "core", "various", "task", "*", "`", "omp_num_threads", "`", "thread", "count", "llama", "*", "`", "cuda_visible_devices", "`", "gpus", "used", ".", "recommend", "set", "single", "fast", "gpu", ",", "e.g", ".", "`", "cuda_visible_devices=0", "`", "multiple", "gpus", ".", "note", "ui", "control", "gpus", "(", "cpu", "mode", ")", "llama", "model", ".", "*", "cli", "argument", "`", "python", "generate.py", "--", "help", "`", "environment", "variable", "set", "`", "h2ogpt_x", "`", ",", "e.g", ".", "`", "h2ogpt_h2ocolors", "`", "`", "false", "`", ".", "*", "set", "env", "`", "h2ogpt_server_name", "`", "actual", "ip", "address", "lan", "see", "app", ",", "e.g", ".", "`", "h2ogpt_server_name", "`", "`", "192.168.1.172", "`", "allow", "access", "firewall", "window", "defender", "activated", ".", "*", "one", "tweak", "installed", "h2ogpt", "code", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt", "`", ".", "*", "terminate", "app", ",", "go", "system", "tab", "click", "admin", "click", "shutdown", "h2ogpt", ".", "*", "startup", "fails", ",", "run", "console", "check", "error", ",", "e.g", ".", "kill", "old", "python", "process", ".", "*", "[", "full", "window", "10/11", "manual", "installation", "script", "]", "(", "docs/readme_windows.md", ")", "*", "single", "`", ".bat", "`", "file", "installation", "(", "skip", "optional", "package", ",", "take", "9gb", "filled", "disk", ")", ".", "*", "recommend", "base", "conda", "env", ",", "allows", "doctr", "requires", "pygobject", "otherwise", "support", "(", "except", "`", "mysys2", "`", "used", "h2ogpt", ")", ".", "*", "also", "allows", "tt", "package", "coqui", ",", "otherwise", "currently", "enabled", "one-click", "installer", ".", "--", "-" ], [ "window 10/11 64-bit full document q/a capability * one-click installer * cpu gpu : download [ h2ogpt window installer ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe ) ( 1.3gb file ) * installed , feel free change start directory icon ` % homedrive % \\ % homepath % ` ( e.g . )", "` % homedrive % \\ % homepath % \\h2ogpt_data ` created file ( like database ) go .", "path saved relative path .", "* cpu : click h2ogpt icon start menu .", "give 15 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* gpu : starting , run following command ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip uninstall -y torch c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip install http : //h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2 % 2bcu118-cp310-cp310-win_amd64.whl `` ` click h2ogpt icon start menu .", "give 20 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* user may python located : ` c : \\program file ( x86 ) \\h2ogpt\\python\\python.exe ` .", "* debug issue , run following ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe `` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw '' `` ` start-up exception appended log , e.g .", "` c : \\users\\pseud\\h2ogpt_exception.log ` .", "* control startup , tweak python startup file , e.g .", "user ` pseud ` : ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py ` * python code , set envs anywhere main_h2ogpt ( ) called * e.g .", "` os.environ [ 'name ' ] = 'value ' ` , e.g .", "` os.environ [ 'n_jobs ' ] = '10 ' ` ( must always string ) .", "* environment variable changed , e.g .", ": * ` n_jobs ` : number core various task * ` omp_num_threads ` thread count llama * ` cuda_visible_devices ` gpus used .", "recommend set single fast gpu , e.g .", "` cuda_visible_devices=0 ` multiple gpus .", "note ui control gpus ( cpu mode ) llama model .", "* cli argument ` python generate.py -- help ` environment variable set ` h2ogpt_x ` , e.g .", "` h2ogpt_h2ocolors ` ` false ` .", "* set env ` h2ogpt_server_name ` actual ip address lan see app , e.g .", "` h2ogpt_server_name ` ` 192.168.1.172 ` allow access firewall window defender activated .", "* one tweak installed h2ogpt code , e.g .", "` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt ` .", "* terminate app , go system tab click admin click shutdown h2ogpt .", "* startup fails , run console check error , e.g .", "kill old python process .", "* [ full window 10/11 manual installation script ] ( docs/readme_windows.md ) * single ` .bat ` file installation ( skip optional package , take 9gb filled disk ) .", "* recommend base conda env , allows doctr requires pygobject otherwise support ( except ` mysys2 ` used h2ogpt ) .", "* also allows tt package coqui , otherwise currently enabled one-click installer .", "-- -" ] ], "level of complexity": 0 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "Linux (CPU/CUDA) with full document Q/A capability\n * [Docker Build and Run Docs](docs/README_DOCKER.md)\n * [Linux Manual Install and Run Docs](docs/README_LINUX.md)\n\n---\n\n", "sentence": [ [ "linux", "(", "cpu/cuda", ")", "full", "document", "q/a", "capability", "*", "[", "docker", "build", "run", "doc", "]", "(", "docs/readme_docker.md", ")", "*", "[", "linux", "manual", "install", "run", "doc", "]", "(", "docs/readme_linux.md", ")", "--", "-" ], [ "linux ( cpu/cuda ) full document q/a capability * [ docker build run doc ] ( docs/readme_docker.md ) * [ linux manual install run doc ] ( docs/readme_linux.md ) -- -" ] ], "token": [ [ "linux", "(", "cpu/cuda", ")", "full", "document", "q/a", "capability", "*", "[", "docker", "build", "run", "doc", "]", "(", "docs/readme_docker.md", ")", "*", "[", "linux", "manual", "install", "run", "doc", "]", "(", "docs/readme_linux.md", ")", "--", "-" ], [ "linux ( cpu/cuda ) full document q/a capability * [ docker build run doc ] ( docs/readme_docker.md ) * [ linux manual install run doc ] ( docs/readme_linux.md ) -- -" ] ], "level of complexity": -1 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "macOS (CPU/M1/M2) with full document Q/A capability\n* One-click Installers (Experimental and subject to changes)\n\n Nov 08, 2023\n - [h2ogpt-osx-m1-cpu](https://h2o-release.s3.amazonaws.com/h2ogpt/Nov2023/h2ogpt-osx-m1-cpu)\n - [h2ogpt-osx-m1-gpu](https://h2o-release.s3.amazonaws.com/h2ogpt/Nov2023/h2ogpt-osx-m1-gpu)\n \n Download the runnable file and open it from the Finder. It will take a few minutes to unpack and run the application.\n These one-click installers are experimental. Report any issues with steps to reproduce at https://github.com/h2oai/h2ogpt/issues.\n\n **Note:** The app bundle is unsigned. If you experience any issues with running the app, run the following commands:\n ```bash\n $ xattr -dr com.apple.quarantine {file-path}/h2ogpt-osx-m1-gpu\n $ chmod +x {file-path}/h2ogpt-osx-m1-gpu\n ```\n* [macOS Manual Install and Run Docs](docs/README_MACOS.md)\n\n---\n\n", "sentence": [ [ "macos", "(", "cpu/m1/m2", ")", "full", "document", "q/a", "capability", "*", "one-click", "installers", "(", "experimental", "subject", "change", ")", "nov", "08", ",", "2023", "-", "[", "h2ogpt-osx-m1-cpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu", ")", "-", "[", "h2ogpt-osx-m1-gpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu", ")", "download", "runnable", "file", "open", "finder", ".", "take", "minute", "unpack", "run", "application", ".", "one-click", "installers", "experimental", ".", "report", "issue", "step", "reproduce", "http", ":", "//github.com/h2oai/h2ogpt/issues", ".", "*", "*", "note", ":", "*", "*", "app", "bundle", "unsigned", ".", "experience", "issue", "running", "app", ",", "run", "following", "command", ":", "``", "`", "bash", "$", "xattr", "-dr", "com.apple.quarantine", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "$", "chmod", "+x", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "``", "`", "*", "[", "macos", "manual", "install", "run", "doc", "]", "(", "docs/readme_macos.md", ")", "--", "-" ], [ "macos ( cpu/m1/m2 ) full document q/a capability * one-click installers ( experimental subject change ) nov 08 , 2023 - [ h2ogpt-osx-m1-cpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu ) - [ h2ogpt-osx-m1-gpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu ) download runnable file open finder .", "take minute unpack run application .", "one-click installers experimental .", "report issue step reproduce http : //github.com/h2oai/h2ogpt/issues .", "* * note : * * app bundle unsigned .", "experience issue running app , run following command : `` ` bash $ xattr -dr com.apple.quarantine { file-path } /h2ogpt-osx-m1-gpu $ chmod +x { file-path } /h2ogpt-osx-m1-gpu `` ` * [ macos manual install run doc ] ( docs/readme_macos.md ) -- -" ] ], "token": [ [ "macos", "(", "cpu/m1/m2", ")", "full", "document", "q/a", "capability", "*", "one-click", "installers", "(", "experimental", "subject", "change", ")", "nov", "08", ",", "2023", "-", "[", "h2ogpt-osx-m1-cpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu", ")", "-", "[", "h2ogpt-osx-m1-gpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu", ")", "download", "runnable", "file", "open", "finder", ".", "take", "minute", "unpack", "run", "application", ".", "one-click", "installers", "experimental", ".", "report", "issue", "step", "reproduce", "http", ":", "//github.com/h2oai/h2ogpt/issues", ".", "*", "*", "note", ":", "*", "*", "app", "bundle", "unsigned", ".", "experience", "issue", "running", "app", ",", "run", "following", "command", ":", "``", "`", "bash", "$", "xattr", "-dr", "com.apple.quarantine", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "$", "chmod", "+x", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "``", "`", "*", "[", "macos", "manual", "install", "run", "doc", "]", "(", "docs/readme_macos.md", ")", "--", "-" ], [ "macos ( cpu/m1/m2 ) full document q/a capability * one-click installers ( experimental subject change ) nov 08 , 2023 - [ h2ogpt-osx-m1-cpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu ) - [ h2ogpt-osx-m1-gpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu ) download runnable file open finder .", "take minute unpack run application .", "one-click installers experimental .", "report issue step reproduce http : //github.com/h2oai/h2ogpt/issues .", "* * note : * * app bundle unsigned .", "experience issue running app , run following command : `` ` bash $ xattr -dr com.apple.quarantine { file-path } /h2ogpt-osx-m1-gpu $ chmod +x { file-path } /h2ogpt-osx-m1-gpu `` ` * [ macos manual install run doc ] ( docs/readme_macos.md ) -- -" ] ], "level of complexity": -1 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "Docs Guide\n\n* [Get Started](#get-started)\n * [Linux (CPU or CUDA)](docs/README_LINUX.md)\n * [macOS (CPU or M1/M2)](docs/README_MACOS.md)\n * [Windows 10/11 (CPU or CUDA)](docs/README_WINDOWS.md)\n * [GPU (CUDA, AutoGPTQ, exllama) Running Details](docs/README_GPU.md)\n * [CPU Running Details](docs/README_CPU.md)\n * [CLI chat](docs/README_CLI.md)\n * [Gradio UI](docs/README_ui.md)\n * [Client API (Gradio, OpenAI-Compliant)](docs/README_CLIENT.md)\n * [Inference Servers (HF TGI server, vLLM, Gradio, ExLLaMa, Replicate, OpenAI, Azure OpenAI)](docs/README_InferenceServers.md)\n * [Python Wheel](docs/README_WHEEL.md)\n * [Offline Installation](docs/README_offline.md)\n * [Low Memory](docs/FAQ.md#low-memory-mode)\n * [Docker](docs/README_DOCKER.md)\n* [LangChain Document Support](docs/README_LangChain.md)\n* [Compare to PrivateGPT et al.](docs/README_LangChain.md#what-is-h2ogpts-langchain-integration-like)\n* [Roadmap](#roadmap)\n* [Development](#development)\n* [Help](#help)\n * [LangChain file types supported](docs/README_LangChain.md#supported-datatypes)\n * [CLI Database control](docs/README_LangChain.md#database-creation)\n * [FAQ](docs/FAQ.md)\n * [Model Usage Notes](docs/FAQ.md#model-usage-notes)\n * [Adding LLM Models (including using GGUF and Attention Sinks)](docs/FAQ.md#adding-models)\n * [Adding Embedding Models](docs/FAQ.md#add-new-embedding-model)\n * [Adding Prompts](docs/FAQ.md#adding-prompt-templates)\n * [In-Context Learning](docs/FAQ.md#in-context-learning-via-prompt-engineering)\n * [Multiple GPUs](docs/FAQ.md#multiple-gpus)\n * [Low-Memory Usage](docs/FAQ.md#low-memory-mode)\n * [Environment Variables](docs/FAQ.md#what-envs-can-i-pass-to-control-h2ogpt)\n * [HTTPS access for server and client](docs/FAQ.md#https-access-for-server-and-client)\n * [Useful Links](docs/LINKS.md)\n * [Fine-Tuning](docs/FINETUNE.md)\n * [Triton](docs/TRITON.md)\n * [Commercial viability](docs/FAQ.md#commercial-viability)\n* [Acknowledgements](#acknowledgements)\n* [Why H2O.ai?](#why-h2oai)\n* [Disclaimer](#disclaimer)\n\n", "sentence": [ [ "doc", "guide", "<", "!", "--", "cat", "readme.md", "|", "./gh-md-toc", "-", "help", "heavily", "processed", "--", ">", "*", "[", "get", "started", "]", "(", "#", "get-started", ")", "*", "[", "linux", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_linux.md", ")", "*", "[", "macos", "(", "cpu", "m1/m2", ")", "]", "(", "docs/readme_macos.md", ")", "*", "[", "window", "10/11", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_windows.md", ")", "*", "[", "gpu", "(", "cuda", ",", "autogptq", ",", "exllama", ")", "running", "detail", "]", "(", "docs/readme_gpu.md", ")", "*", "[", "cpu", "running", "detail", "]", "(", "docs/readme_cpu.md", ")", "*", "[", "cli", "chat", "]", "(", "docs/readme_cli.md", ")", "*", "[", "gradio", "ui", "]", "(", "docs/readme_ui.md", ")", "*", "[", "client", "api", "(", "gradio", ",", "openai-compliant", ")", "]", "(", "docs/readme_client.md", ")", "*", "[", "inference", "server", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ")", "]", "(", "docs/readme_inferenceservers.md", ")", "*", "[", "python", "wheel", "]", "(", "docs/readme_wheel.md", ")", "*", "[", "offline", "installation", "]", "(", "docs/readme_offline.md", ")", "*", "[", "low", "memory", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "docker", "]", "(", "docs/readme_docker.md", ")", "*", "[", "langchain", "document", "support", "]", "(", "docs/readme_langchain.md", ")", "*", "[", "compare", "privategpt", "et", "al", ".", "]", "(", "docs/readme_langchain.md", "#", "what-is-h2ogpts-langchain-integration-like", ")", "*", "[", "roadmap", "]", "(", "#", "roadmap", ")", "*", "[", "development", "]", "(", "#", "development", ")", "*", "[", "help", "]", "(", "#", "help", ")", "*", "[", "langchain", "file", "type", "supported", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "*", "[", "cli", "database", "control", "]", "(", "docs/readme_langchain.md", "#", "database-creation", ")", "*", "[", "faq", "]", "(", "docs/faq.md", ")", "*", "[", "model", "usage", "note", "]", "(", "docs/faq.md", "#", "model-usage-notes", ")", "*", "[", "adding", "llm", "model", "(", "including", "using", "gguf", "attention", "sink", ")", "]", "(", "docs/faq.md", "#", "adding-models", ")", "*", "[", "adding", "embedding", "model", "]", "(", "docs/faq.md", "#", "add-new-embedding-model", ")", "*", "[", "adding", "prompt", "]", "(", "docs/faq.md", "#", "adding-prompt-templates", ")", "*", "[", "in-context", "learning", "]", "(", "docs/faq.md", "#", "in-context-learning-via-prompt-engineering", ")", "*", "[", "multiple", "gpus", "]", "(", "docs/faq.md", "#", "multiple-gpus", ")", "*", "[", "low-memory", "usage", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "environment", "variable", "]", "(", "docs/faq.md", "#", "what-envs-can-i-pass-to-control-h2ogpt", ")", "*", "[", "http", "access", "server", "client", "]", "(", "docs/faq.md", "#", "https-access-for-server-and-client", ")", "*", "[", "useful", "link", "]", "(", "docs/links.md", ")", "*", "[", "fine-tuning", "]", "(", "docs/finetune.md", ")", "*", "[", "triton", "]", "(", "docs/triton.md", ")", "*", "[", "commercial", "viability", "]", "(", "docs/faq.md", "#", "commercial-viability", ")", "*", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")", "*", "[", "h2o.ai", "?", "]", "(", "#", "why-h2oai", ")", "*", "[", "disclaimer", "]", "(", "#", "disclaimer", ")" ], [ "doc guide < ! -- cat readme.md | ./gh-md-toc - help heavily processed -- > * [ get started ] ( # get-started ) * [ linux ( cpu cuda ) ] ( docs/readme_linux.md ) * [ macos ( cpu m1/m2 ) ] ( docs/readme_macos.md ) * [ window 10/11 ( cpu cuda ) ] ( docs/readme_windows.md ) * [ gpu ( cuda , autogptq , exllama ) running detail ] ( docs/readme_gpu.md ) * [ cpu running detail ] ( docs/readme_cpu.md ) * [ cli chat ] ( docs/readme_cli.md ) * [ gradio ui ] ( docs/readme_ui.md ) * [ client api ( gradio , openai-compliant ) ] ( docs/readme_client.md ) * [ inference server ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai ) ] ( docs/readme_inferenceservers.md ) * [ python wheel ] ( docs/readme_wheel.md ) * [ offline installation ] ( docs/readme_offline.md ) * [ low memory ] ( docs/faq.md # low-memory-mode ) * [ docker ] ( docs/readme_docker.md ) * [ langchain document support ] ( docs/readme_langchain.md ) * [ compare privategpt et al .", "] ( docs/readme_langchain.md # what-is-h2ogpts-langchain-integration-like ) * [ roadmap ] ( # roadmap ) * [ development ] ( # development ) * [ help ] ( # help ) * [ langchain file type supported ] ( docs/readme_langchain.md # supported-datatypes ) * [ cli database control ] ( docs/readme_langchain.md # database-creation ) * [ faq ] ( docs/faq.md ) * [ model usage note ] ( docs/faq.md # model-usage-notes ) * [ adding llm model ( including using gguf attention sink ) ] ( docs/faq.md # adding-models ) * [ adding embedding model ] ( docs/faq.md # add-new-embedding-model ) * [ adding prompt ] ( docs/faq.md # adding-prompt-templates ) * [ in-context learning ] ( docs/faq.md # in-context-learning-via-prompt-engineering ) * [ multiple gpus ] ( docs/faq.md # multiple-gpus ) * [ low-memory usage ] ( docs/faq.md # low-memory-mode ) * [ environment variable ] ( docs/faq.md # what-envs-can-i-pass-to-control-h2ogpt ) * [ http access server client ] ( docs/faq.md # https-access-for-server-and-client ) * [ useful link ] ( docs/links.md ) * [ fine-tuning ] ( docs/finetune.md ) * [ triton ] ( docs/triton.md ) * [ commercial viability ] ( docs/faq.md # commercial-viability ) * [ acknowledgement ] ( # acknowledgement ) * [ h2o.ai ?", "] ( # why-h2oai ) * [ disclaimer ] ( # disclaimer )" ] ], "token": [ [ "doc", "guide", "<", "!", "--", "cat", "readme.md", "|", "./gh-md-toc", "-", "help", "heavily", "processed", "--", ">", "*", "[", "get", "started", "]", "(", "#", "get-started", ")", "*", "[", "linux", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_linux.md", ")", "*", "[", "macos", "(", "cpu", "m1/m2", ")", "]", "(", "docs/readme_macos.md", ")", "*", "[", "window", "10/11", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_windows.md", ")", "*", "[", "gpu", "(", "cuda", ",", "autogptq", ",", "exllama", ")", "running", "detail", "]", "(", "docs/readme_gpu.md", ")", "*", "[", "cpu", "running", "detail", "]", "(", "docs/readme_cpu.md", ")", "*", "[", "cli", "chat", "]", "(", "docs/readme_cli.md", ")", "*", "[", "gradio", "ui", "]", "(", "docs/readme_ui.md", ")", "*", "[", "client", "api", "(", "gradio", ",", "openai-compliant", ")", "]", "(", "docs/readme_client.md", ")", "*", "[", "inference", "server", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ")", "]", "(", "docs/readme_inferenceservers.md", ")", "*", "[", "python", "wheel", "]", "(", "docs/readme_wheel.md", ")", "*", "[", "offline", "installation", "]", "(", "docs/readme_offline.md", ")", "*", "[", "low", "memory", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "docker", "]", "(", "docs/readme_docker.md", ")", "*", "[", "langchain", "document", "support", "]", "(", "docs/readme_langchain.md", ")", "*", "[", "compare", "privategpt", "et", "al", ".", "]", "(", "docs/readme_langchain.md", "#", "what-is-h2ogpts-langchain-integration-like", ")", "*", "[", "roadmap", "]", "(", "#", "roadmap", ")", "*", "[", "development", "]", "(", "#", "development", ")", "*", "[", "help", "]", "(", "#", "help", ")", "*", "[", "langchain", "file", "type", "supported", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "*", "[", "cli", "database", "control", "]", "(", "docs/readme_langchain.md", "#", "database-creation", ")", "*", "[", "faq", "]", "(", "docs/faq.md", ")", "*", "[", "model", "usage", "note", "]", "(", "docs/faq.md", "#", "model-usage-notes", ")", "*", "[", "adding", "llm", "model", "(", "including", "using", "gguf", "attention", "sink", ")", "]", "(", "docs/faq.md", "#", "adding-models", ")", "*", "[", "adding", "embedding", "model", "]", "(", "docs/faq.md", "#", "add-new-embedding-model", ")", "*", "[", "adding", "prompt", "]", "(", "docs/faq.md", "#", "adding-prompt-templates", ")", "*", "[", "in-context", "learning", "]", "(", "docs/faq.md", "#", "in-context-learning-via-prompt-engineering", ")", "*", "[", "multiple", "gpus", "]", "(", "docs/faq.md", "#", "multiple-gpus", ")", "*", "[", "low-memory", "usage", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "environment", "variable", "]", "(", "docs/faq.md", "#", "what-envs-can-i-pass-to-control-h2ogpt", ")", "*", "[", "http", "access", "server", "client", "]", "(", "docs/faq.md", "#", "https-access-for-server-and-client", ")", "*", "[", "useful", "link", "]", "(", "docs/links.md", ")", "*", "[", "fine-tuning", "]", "(", "docs/finetune.md", ")", "*", "[", "triton", "]", "(", "docs/triton.md", ")", "*", "[", "commercial", "viability", "]", "(", "docs/faq.md", "#", "commercial-viability", ")", "*", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")", "*", "[", "h2o.ai", "?", "]", "(", "#", "why-h2oai", ")", "*", "[", "disclaimer", "]", "(", "#", "disclaimer", ")" ], [ "doc guide < ! -- cat readme.md | ./gh-md-toc - help heavily processed -- > * [ get started ] ( # get-started ) * [ linux ( cpu cuda ) ] ( docs/readme_linux.md ) * [ macos ( cpu m1/m2 ) ] ( docs/readme_macos.md ) * [ window 10/11 ( cpu cuda ) ] ( docs/readme_windows.md ) * [ gpu ( cuda , autogptq , exllama ) running detail ] ( docs/readme_gpu.md ) * [ cpu running detail ] ( docs/readme_cpu.md ) * [ cli chat ] ( docs/readme_cli.md ) * [ gradio ui ] ( docs/readme_ui.md ) * [ client api ( gradio , openai-compliant ) ] ( docs/readme_client.md ) * [ inference server ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai ) ] ( docs/readme_inferenceservers.md ) * [ python wheel ] ( docs/readme_wheel.md ) * [ offline installation ] ( docs/readme_offline.md ) * [ low memory ] ( docs/faq.md # low-memory-mode ) * [ docker ] ( docs/readme_docker.md ) * [ langchain document support ] ( docs/readme_langchain.md ) * [ compare privategpt et al .", "] ( docs/readme_langchain.md # what-is-h2ogpts-langchain-integration-like ) * [ roadmap ] ( # roadmap ) * [ development ] ( # development ) * [ help ] ( # help ) * [ langchain file type supported ] ( docs/readme_langchain.md # supported-datatypes ) * [ cli database control ] ( docs/readme_langchain.md # database-creation ) * [ faq ] ( docs/faq.md ) * [ model usage note ] ( docs/faq.md # model-usage-notes ) * [ adding llm model ( including using gguf attention sink ) ] ( docs/faq.md # adding-models ) * [ adding embedding model ] ( docs/faq.md # add-new-embedding-model ) * [ adding prompt ] ( docs/faq.md # adding-prompt-templates ) * [ in-context learning ] ( docs/faq.md # in-context-learning-via-prompt-engineering ) * [ multiple gpus ] ( docs/faq.md # multiple-gpus ) * [ low-memory usage ] ( docs/faq.md # low-memory-mode ) * [ environment variable ] ( docs/faq.md # what-envs-can-i-pass-to-control-h2ogpt ) * [ http access server client ] ( docs/faq.md # https-access-for-server-and-client ) * [ useful link ] ( docs/links.md ) * [ fine-tuning ] ( docs/finetune.md ) * [ triton ] ( docs/triton.md ) * [ commercial viability ] ( docs/faq.md # commercial-viability ) * [ acknowledgement ] ( # acknowledgement ) * [ h2o.ai ?", "] ( # why-h2oai ) * [ disclaimer ] ( # disclaimer )" ] ], "level of complexity": -1 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "Experimental features\n\nThese are not part of normal installation instructions and are experimental.\n\n* [Agents](docs/README_Agents.md) -- in Alpha testing. Optimal for OpenAI, but that also fails sometimes.\n\n", "sentence": [ [ "experimental", "feature", "part", "normal", "installation", "instruction", "experimental", ".", "*", "[", "agent", "]", "(", "docs/readme_agents.md", ")", "--", "alpha", "testing", ".", "optimal", "openai", ",", "also", "fails", "sometimes", "." ], [ "experimental feature part normal installation instruction experimental .", "* [ agent ] ( docs/readme_agents.md ) -- alpha testing .", "optimal openai , also fails sometimes ." ] ], "token": [ [ "experimental", "feature", "part", "normal", "installation", "instruction", "experimental", ".", "*", "[", "agent", "]", "(", "docs/readme_agents.md", ")", "--", "alpha", "testing", ".", "optimal", "openai", ",", "also", "fails", "sometimes", "." ], [ "experimental feature part normal installation instruction experimental .", "* [ agent ] ( docs/readme_agents.md ) -- alpha testing .", "optimal openai , also fails sometimes ." ] ], "level of complexity": -1 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "Development\n\n- To create a development environment for training and generation, follow the [installation instructions](docs/INSTALL.md).\n- To fine-tune any LLM models on your data, follow the [fine-tuning instructions](docs/FINETUNE.md).\n- To run h2oGPT tests:\n ```bash\n pip install requirements-parser pytest-instafail pytest-random-order playsound==1.3.0\n pytest --instafail -s -v tests\n ", "sentence": [ [ "development", "-", "create", "development", "environment", "training", "generation", ",", "follow", "[", "installation", "instruction", "]", "(", "docs/install.md", ")", ".", "-", "fine-tune", "llm", "model", "data", ",", "follow", "[", "fine-tuning", "instruction", "]", "(", "docs/finetune.md", ")", ".", "-", "run", "h2ogpt", "test", ":", "``", "`", "bash", "pip", "install", "requirements-parser", "pytest-instafail", "pytest-random-order", "playsound==1.3.0", "pytest", "--", "instafail", "-s", "-v", "test" ], [ "development - create development environment training generation , follow [ installation instruction ] ( docs/install.md ) .", "- fine-tune llm model data , follow [ fine-tuning instruction ] ( docs/finetune.md ) .", "- run h2ogpt test : `` ` bash pip install requirements-parser pytest-instafail pytest-random-order playsound==1.3.0 pytest -- instafail -s -v test" ] ], "token": [ [ "development", "-", "create", "development", "environment", "training", "generation", ",", "follow", "[", "installation", "instruction", "]", "(", "docs/install.md", ")", ".", "-", "fine-tune", "llm", "model", "data", ",", "follow", "[", "fine-tuning", "instruction", "]", "(", "docs/finetune.md", ")", ".", "-", "run", "h2ogpt", "test", ":", "``", "`", "bash", "pip", "install", "requirements-parser", "pytest-instafail", "pytest-random-order", "playsound==1.3.0", "pytest", "--", "instafail", "-s", "-v", "test" ], [ "development - create development environment training generation , follow [ installation instruction ] ( docs/install.md ) .", "- fine-tune llm model data , follow [ fine-tuning instruction ] ( docs/finetune.md ) .", "- run h2ogpt test : `` ` bash pip install requirements-parser pytest-instafail pytest-random-order playsound==1.3.0 pytest -- instafail -s -v test" ] ], "level of complexity": 0 }, { "url": "https://github.com/h2oai/h2ogpt", "readme_url": "https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md", "topic": [ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ], "text": "for client tests\n make -C client setup\n make -C client build\n pytest --instafail -s -v client/tests\n ", "sentence": [ [ "client", "test", "make", "-c", "client", "setup", "make", "-c", "client", "build", "pytest", "--", "instafail", "-s", "-v", "client/tests" ], [ "client test make -c client setup make -c client build pytest -- instafail -s -v client/tests" ] ], "token": [ [ "client", "test", "make", "-c", "client", "setup", "make", "-c", "client", "build", "pytest", "--", "instafail", "-s", "-v", "client/tests" ], [ "client test make -c client setup make -c client build pytest -- instafail -s -v client/tests" ] ], "level of complexity": -1 }, { "url": "https://github.com/ShishirPatil/gorilla", "readme_url": "https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md", "topic": [ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ], "text": "Gorilla: Large Language Model Connected with Massive APIs [[Project Website](https://shishirpatil.github.io/gorilla/)]\n\n\n\n\n**:fire: Gorilla OpenFunctions** is a drop-in alternative for function calling! [Release Blog](https://gorilla.cs.berkeley.edu/blogs/4_open_functions.html)\n\n**\ud83d\udfe2 Gorilla is Apache 2.0** With Gorilla being fine-tuned on MPT, and Falcon, you can use Gorilla commercially with no obligations! :golf: \n\n**:rocket: Try Gorilla in 60s** [![Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1DEBPsccVLF_aUnmD0FwPeHFrtdC0QIUP?usp=sharing) \n\n:computer: Use [Gorilla in your CLI](https://github.com/gorilla-llm/gorilla-cli) with `pip install gorilla-cli`\n\n**:newspaper_roll: Checkout our paper!** [![arXiv](https://img.shields.io/badge/arXiv-2305.15334-.svg?style=flat-square)](https://arxiv.org/abs/2305.15334)\n\n**:wave: Join our Discord!** [![Discord](https://img.shields.io/discord/1111172801899012102?label=Discord&logo=discord&logoColor=green&style=flat-square)](https://discord.gg/SwTyuTAxX3)\n\n\n`Gorilla` enables LLMs to use tools by invoking APIs. Given a natural language query, Gorilla comes up with the semantically- and syntactically- correct API to invoke. With Gorilla, we are the first to demonstrate how to use LLMs to invoke 1,600+ (and growing) API calls accurately while reducing hallucination. We also release APIBench, the largest collection of APIs, curated and easy to be trained on! Join us, as we try to expand the largest API store and teach LLMs how to write them! Hop on our Discord, or open a PR, or email us if you would like to have your API incorporated as well.\n\n", "sentence": [ [ "gorilla", ":", "large", "language", "model", "connected", "massive", "apis", "[", "[", "project", "website", "]", "(", "http", ":", "//shishirpatil.github.io/gorilla/", ")", "]", "<", "img", "src=", "''", "http", ":", "//github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png", "''", "width=50", "%", "height=50", "%", ">", "*", "*", ":", "fire", ":", "gorilla", "openfunctions", "*", "*", "drop-in", "alternative", "function", "calling", "!", "[", "release", "blog", "]", "(", "http", ":", "//gorilla.cs.berkeley.edu/blogs/4_open_functions.html", ")", "*", "*", "\ud83d\udfe2", "gorilla", "apache", "2.0", "*", "*", "gorilla", "fine-tuned", "mpt", ",", "falcon", ",", "use", "gorilla", "commercially", "obligation", "!", ":", "golf", ":", "*", "*", ":", "rocket", ":", "try", "gorilla", "60", "*", "*", "[", "!", "[", "colab", "]", "(", "http", ":", "//colab.research.google.com/assets/colab-badge.svg", ")", "]", "(", "http", ":", "//colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup", "?", "usp=sharing", ")", ":", "computer", ":", "use", "[", "gorilla", "cli", "]", "(", "http", ":", "//github.com/gorilla-llm/gorilla-cli", ")", "`", "pip", "install", "gorilla-cli", "`", "*", "*", ":", "newspaper_roll", ":", "checkout", "paper", "!", "*", "*", "[", "!", "[", "arxiv", "]", "(", "http", ":", "//img.shields.io/badge/arxiv-2305.15334-", "<", "color", ">", ".svg", "?", "style=flat-square", ")", "]", "(", "http", ":", "//arxiv.org/abs/2305.15334", ")", "*", "*", ":", "wave", ":", "join", "discord", "!", "*", "*", "[", "!", "[", "discord", "]", "(", "http", ":", "//img.shields.io/discord/1111172801899012102", "?", "label=discord", "&", "logo=discord", "&", "logocolor=green", "&", "style=flat-square", ")", "]", "(", "http", ":", "//discord.gg/swtyutaxx3", ")", "`", "gorilla", "`", "enables", "llm", "use", "tool", "invoking", "apis", ".", "given", "natural", "language", "query", ",", "gorilla", "come", "semantically-", "syntactically-", "correct", "api", "invoke", ".", "gorilla", ",", "first", "demonstrate", "use", "llm", "invoke", "1,600+", "(", "growing", ")", "api", "call", "accurately", "reducing", "hallucination", ".", "also", "release", "apibench", ",", "largest", "collection", "apis", ",", "curated", "easy", "trained", "!", "join", "u", ",", "try", "expand", "largest", "api", "store", "teach", "llm", "write", "!", "hop", "discord", ",", "open", "pr", ",", "email", "u", "would", "like", "api", "incorporated", "well", "." ], [ "gorilla : large language model connected massive apis [ [ project website ] ( http : //shishirpatil.github.io/gorilla/ ) ] < img src= '' http : //github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png '' width=50 % height=50 % > * * : fire : gorilla openfunctions * * drop-in alternative function calling !", "[ release blog ] ( http : //gorilla.cs.berkeley.edu/blogs/4_open_functions.html ) * * \ud83d\udfe2 gorilla apache 2.0 * * gorilla fine-tuned mpt , falcon , use gorilla commercially obligation !", ": golf : * * : rocket : try gorilla 60 * * [ !", "[ colab ] ( http : //colab.research.google.com/assets/colab-badge.svg ) ] ( http : //colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup ? usp=sharing ) : computer : use [ gorilla cli ] ( http : //github.com/gorilla-llm/gorilla-cli ) ` pip install gorilla-cli ` * * : newspaper_roll : checkout paper !", "* * [ !", "[ arxiv ] ( http : //img.shields.io/badge/arxiv-2305.15334- < color > .svg ? style=flat-square ) ] ( http : //arxiv.org/abs/2305.15334 ) * * : wave : join discord !", "* * [ !", "[ discord ] ( http : //img.shields.io/discord/1111172801899012102 ? label=discord & logo=discord & logocolor=green & style=flat-square ) ] ( http : //discord.gg/swtyutaxx3 ) ` gorilla ` enables llm use tool invoking apis .", "given natural language query , gorilla come semantically- syntactically- correct api invoke .", "gorilla , first demonstrate use llm invoke 1,600+ ( growing ) api call accurately reducing hallucination .", "also release apibench , largest collection apis , curated easy trained !", "join u , try expand largest api store teach llm write !", "hop discord , open pr , email u would like api incorporated well ." ] ], "token": [ [ "gorilla", ":", "large", "language", "model", "connected", "massive", "apis", "[", "[", "project", "website", "]", "(", "http", ":", "//shishirpatil.github.io/gorilla/", ")", "]", "<", "img", "src=", "''", "http", ":", "//github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png", "''", "width=50", "%", "height=50", "%", ">", "*", "*", ":", "fire", ":", "gorilla", "openfunctions", "*", "*", "drop-in", "alternative", "function", "calling", "!", "[", "release", "blog", "]", "(", "http", ":", "//gorilla.cs.berkeley.edu/blogs/4_open_functions.html", ")", "*", "*", "\ud83d\udfe2", "gorilla", "apache", "2.0", "*", "*", "gorilla", "fine-tuned", "mpt", ",", "falcon", ",", "use", "gorilla", "commercially", "obligation", "!", ":", "golf", ":", "*", "*", ":", "rocket", ":", "try", "gorilla", "60", "*", "*", "[", "!", "[", "colab", "]", "(", "http", ":", "//colab.research.google.com/assets/colab-badge.svg", ")", "]", "(", "http", ":", "//colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup", "?", "usp=sharing", ")", ":", "computer", ":", "use", "[", "gorilla", "cli", "]", "(", "http", ":", "//github.com/gorilla-llm/gorilla-cli", ")", "`", "pip", "install", "gorilla-cli", "`", "*", "*", ":", "newspaper_roll", ":", "checkout", "paper", "!", "*", "*", "[", "!", "[", "arxiv", "]", "(", "http", ":", "//img.shields.io/badge/arxiv-2305.15334-", "<", "color", ">", ".svg", "?", "style=flat-square", ")", "]", "(", "http", ":", "//arxiv.org/abs/2305.15334", ")", "*", "*", ":", "wave", ":", "join", "discord", "!", "*", "*", "[", "!", "[", "discord", "]", "(", "http", ":", "//img.shields.io/discord/1111172801899012102", "?", "label=discord", "&", "logo=discord", "&", "logocolor=green", "&", "style=flat-square", ")", "]", "(", "http", ":", "//discord.gg/swtyutaxx3", ")", "`", "gorilla", "`", "enables", "llm", "use", "tool", "invoking", "apis", ".", "given", "natural", "language", "query", ",", "gorilla", "come", "semantically-", "syntactically-", "correct", "api", "invoke", ".", "gorilla", ",", "first", "demonstrate", "use", "llm", "invoke", "1,600+", "(", "growing", ")", "api", "call", "accurately", "reducing", "hallucination", ".", "also", "release", "apibench", ",", "largest", "collection", "apis", ",", "curated", "easy", "trained", "!", "join", "u", ",", "try", "expand", "largest", "api", "store", "teach", "llm", "write", "!", "hop", "discord", ",", "open", "pr", ",", "email", "u", "would", "like", "api", "incorporated", "well", "." ], [ "gorilla : large language model connected massive apis [ [ project website ] ( http : //shishirpatil.github.io/gorilla/ ) ] < img src= '' http : //github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png '' width=50 % height=50 % > * * : fire : gorilla openfunctions * * drop-in alternative function calling !", "[ release blog ] ( http : //gorilla.cs.berkeley.edu/blogs/4_open_functions.html ) * * \ud83d\udfe2 gorilla apache 2.0 * * gorilla fine-tuned mpt , falcon , use gorilla commercially obligation !", ": golf : * * : rocket : try gorilla 60 * * [ !", "[ colab ] ( http : //colab.research.google.com/assets/colab-badge.svg ) ] ( http : //colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup ? usp=sharing ) : computer : use [ gorilla cli ] ( http : //github.com/gorilla-llm/gorilla-cli ) ` pip install gorilla-cli ` * * : newspaper_roll : checkout paper !", "* * [ !", "[ arxiv ] ( http : //img.shields.io/badge/arxiv-2305.15334- < color > .svg ? style=flat-square ) ] ( http : //arxiv.org/abs/2305.15334 ) * * : wave : join discord !", "* * [ !", "[ discord ] ( http : //img.shields.io/discord/1111172801899012102 ? label=discord & logo=discord & logocolor=green & style=flat-square ) ] ( http : //discord.gg/swtyutaxx3 ) ` gorilla ` enables llm use tool invoking apis .", "given natural language query , gorilla come semantically- syntactically- correct api invoke .", "gorilla , first demonstrate use llm invoke 1,600+ ( growing ) api call accurately reducing hallucination .", "also release apibench , largest collection apis , curated easy trained !", "join u , try expand largest api store teach llm write !", "hop discord , open pr , email u would like api incorporated well ." ] ], "level of complexity": 0 }, { "url": "https://github.com/ShishirPatil/gorilla", "readme_url": "https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md", "topic": [ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ], "text": "Repository Organization\n\nOur repository organization is shown below. \n\n - The `data` folder contains all the evaluation APIs `(APIBench)` and the community contributed APIs.\n - The `eval` folder contains all our evaluation code as well as the Gorilla outputs.\n - The `inference` folder contains all the inference code for running Gorilla locally.\n - [Coming Soon!] The `train` folder contains all the training code associated with Gorilla finetuning.\n\n\nFor our dataset collections, all the 1640 API documentation is in `data/api`. We also include the `APIBench` dataset created by self-instruct in `data/apibench`. For evaluation, we convert this into a LLM-friendly chat format, and the questions are in `eval/eval-data/questions`, and the corresponding responses are in `eval/eval-data/responses`. We have also included the evaluation scripts are in `eval/eval-scripts`. This would be entirely sufficient to train Gorilla yourself, and reproduce our results. Please see [evaluation](https://github.com/ShishirPatil/gorilla/tree/main/eval) for the details on how to use our evaluation pipeline.\n\nAdditionally, we have released all the model weights. `gorilla-7b-hf-v0` lets you invoke over 925 Hugging Face APIs. Similarly, `gorilla-7b-tf-v0` and `gorilla-7b-th-v0` have 626 (exhaustive) Tensorflow v2, and 94 (exhaustive) Torch Hub APIs. `gorilla-mpt-7b-hf-v0` and `gorilla-falcon-7b-hf-v0` are Apache 2.0 licensed models (commercially usable) fine-tuned on MPT-7B and Falcon-7B respectively. We will release a model with all three combined with generic chat capability and community contributed APIs as soon as we can scale our serving infrastructure. You can run Gorilla locally from instructions in the `inference/` sub-directory, or we also provide a hosted Gorilla chat completion API (see Colab)! If you have any suggestions, or if you run into any issues please feel free to reach out to us either through Discord or email or raise a Github issue.\n\n```\ngorilla\n\u251c\u2500\u2500 data\n\u2502 \u251c\u2500\u2500 api (TF/HF/TH APIs used in generating apibench)\n\u2502 \u2502 \u251c\u2500\u2500 {api_name}_api.jsonl\n\u2502 \u251c\u2500\u2500 apibench (Evaluating LLM models) v-1.0\n\u2502 \u2502 \u251c\u2500\u2500 {api_name}_train.jsonl, {api_name}_eval.jsonl\n| |\u2500\u2500 apizoo (Contributed by the community - evolving)\n\u2502 | \u251c\u2500\u2500 username1.json\n\u2502 \u2502 \u251c\u2500\u2500 username2.json\n\u2502 \u2502 \u251c\u2500\u2500 ...\n\u251c\u2500\u2500 eval\n\u2502 \u251c\u2500\u2500 README.md\n\u2502 \u251c\u2500\u2500 get_llm_responses.py\n\u2502 \u251c\u2500\u2500 eval-scripts\n\u2502 \u2502 \u251c\u2500\u2500 ast_eval_{api_name}.py\n\u2502 \u251c\u2500\u2500 eval-data\n\u2502 \u2502 \u251c\u2500\u2500 questions\n\u2502 \u2502 \u2502 \u251c\u2500\u2500 API name\n\u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 questions_{api_name}_{eval_metric}.jsonl\n\u2502 \u2502 \u251c\u2500\u2500 responses\n\u2502 \u2502 \u2502 \u251c\u2500\u2500 API name\n\u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 responses_{api_name}_Gorilla_FT_{eval_metric}.jsonl\n\u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 responses_{api_name}_Gorilla_RT_{eval_metric}.jsonl\n\u251c\u2500\u2500 inference\n\u2502 \u251c\u2500\u2500 README.md\n\u2502 \u251c\u2500\u2500 serve\n\u2502 \u2502 \u251c\u2500\u2500 gorilla_cli.py\n\u2502 \u2502 \u251c\u2500\u2500 conv_template.py\n\u251c\u2500\u2500 train (Coming Soon!)\n\n```\n\n", "sentence": [ [ "repository", "organization", "repository", "organization", "shown", ".", "-", "`", "data", "`", "folder", "contains", "evaluation", "apis", "`", "(", "apibench", ")", "`", "community", "contributed", "apis", ".", "-", "`", "eval", "`", "folder", "contains", "evaluation", "code", "well", "gorilla", "output", ".", "-", "`", "inference", "`", "folder", "contains", "inference", "code", "running", "gorilla", "locally", ".", "-", "<", "span", "style=", "''", "color", ":", "hr", "''", ">", "[", "coming", "soon", "!", "]", "<", "/span", ">", "`", "train", "`", "folder", "contains", "training", "code", "associated", "gorilla", "finetuning", ".", "dataset", "collection", ",", "1640", "api", "documentation", "`", "data/api", "`", ".", "also", "include", "`", "apibench", "`", "dataset", "created", "self-instruct", "`", "data/apibench", "`", ".", "evaluation", ",", "convert", "llm-friendly", "chat", "format", ",", "question", "`", "eval/eval-data/questions", "`", ",", "corresponding", "response", "`", "eval/eval-data/responses", "`", ".", "also", "included", "evaluation", "script", "`", "eval/eval-scripts", "`", ".", "would", "entirely", "sufficient", "train", "gorilla", ",", "reproduce", "result", ".", "please", "see", "[", "evaluation", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/eval", ")", "detail", "use", "evaluation", "pipeline", ".", "additionally", ",", "released", "model", "weight", ".", "`", "gorilla-7b-hf-v0", "`", "let", "invoke", "925", "hugging", "face", "apis", ".", "similarly", ",", "`", "gorilla-7b-tf-v0", "`", "`", "gorilla-7b-th-v0", "`", "626", "(", "exhaustive", ")", "tensorflow", "v2", ",", "94", "(", "exhaustive", ")", "torch", "hub", "apis", ".", "`", "gorilla-mpt-7b-hf-v0", "`", "`", "gorilla-falcon-7b-hf-v0", "`", "apache", "2.0", "licensed", "model", "(", "commercially", "usable", ")", "fine-tuned", "mpt-7b", "falcon-7b", "respectively", ".", "release", "model", "three", "combined", "generic", "chat", "capability", "community", "contributed", "apis", "soon", "scale", "serving", "infrastructure", ".", "run", "gorilla", "locally", "instruction", "`", "inference/", "`", "sub-directory", ",", "also", "provide", "hosted", "gorilla", "chat", "completion", "api", "(", "see", "colab", ")", "!", "suggestion", ",", "run", "issue", "please", "feel", "free", "reach", "u", "either", "discord", "email", "raise", "github", "issue", ".", "``", "`", "gorilla", "\u251c\u2500\u2500", "data", "\u2502", "\u251c\u2500\u2500", "api", "(", "tf/hf/th", "apis", "used", "generating", "apibench", ")", "\u2502", "\u2502", "\u251c\u2500\u2500", "{", "api_name", "}", "_api.jsonl", "\u2502", "\u251c\u2500\u2500", "apibench", "(", "evaluating", "llm", "model", ")", "v-1.0", "\u2502", "\u2502", "\u251c\u2500\u2500", "{", "api_name", "}", "_train.jsonl", ",", "{", "api_name", "}", "_eval.jsonl", "|", "|\u2500\u2500", "apizoo", "(", "contributed", "community", "-", "evolving", ")", "\u2502", "|", "\u251c\u2500\u2500", "username1.json", "\u2502", "\u2502", "\u251c\u2500\u2500", "username2.json", "\u2502", "\u2502", "\u251c\u2500\u2500", "...", "\u251c\u2500\u2500", "eval", "\u2502", "\u251c\u2500\u2500", "readme.md", "\u2502", "\u251c\u2500\u2500", "get_llm_responses.py", "\u2502", "\u251c\u2500\u2500", "eval-scripts", "\u2502", "\u2502", "\u251c\u2500\u2500", "ast_eval_", "{", "api_name", "}", ".py", "\u2502", "\u251c\u2500\u2500", "eval-data", "\u2502", "\u2502", "\u251c\u2500\u2500", "question", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "api", "name", "\u2502", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "questions_", "{", "api_name", "}", "_", "{", "eval_metric", "}", ".jsonl", "\u2502", "\u2502", "\u251c\u2500\u2500", "response", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "api", "name", "\u2502", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "responses_", "{", "api_name", "}", "_gorilla_ft_", "{", "eval_metric", "}", ".jsonl", "\u2502", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "responses_", "{", "api_name", "}", "_gorilla_rt_", "{", "eval_metric", "}", ".jsonl", "\u251c\u2500\u2500", "inference", "\u2502", "\u251c\u2500\u2500", "readme.md", "\u2502", "\u251c\u2500\u2500", "serve", "\u2502", "\u2502", "\u251c\u2500\u2500", "gorilla_cli.py", "\u2502", "\u2502", "\u251c\u2500\u2500", "conv_template.py", "\u251c\u2500\u2500", "train", "(", "coming", "soon", "!", ")", "``", "`" ], [ "repository organization repository organization shown .", "- ` data ` folder contains evaluation apis ` ( apibench ) ` community contributed apis .", "- ` eval ` folder contains evaluation code well gorilla output .", "- ` inference ` folder contains inference code running gorilla locally .", "- < span style= '' color : hr '' > [ coming soon !", "] < /span > ` train ` folder contains training code associated gorilla finetuning .", "dataset collection , 1640 api documentation ` data/api ` .", "also include ` apibench ` dataset created self-instruct ` data/apibench ` .", "evaluation , convert llm-friendly chat format , question ` eval/eval-data/questions ` , corresponding response ` eval/eval-data/responses ` .", "also included evaluation script ` eval/eval-scripts ` .", "would entirely sufficient train gorilla , reproduce result .", "please see [ evaluation ] ( http : //github.com/shishirpatil/gorilla/tree/main/eval ) detail use evaluation pipeline .", "additionally , released model weight .", "` gorilla-7b-hf-v0 ` let invoke 925 hugging face apis .", "similarly , ` gorilla-7b-tf-v0 ` ` gorilla-7b-th-v0 ` 626 ( exhaustive ) tensorflow v2 , 94 ( exhaustive ) torch hub apis .", "` gorilla-mpt-7b-hf-v0 ` ` gorilla-falcon-7b-hf-v0 ` apache 2.0 licensed model ( commercially usable ) fine-tuned mpt-7b falcon-7b respectively .", "release model three combined generic chat capability community contributed apis soon scale serving infrastructure .", "run gorilla locally instruction ` inference/ ` sub-directory , also provide hosted gorilla chat completion api ( see colab ) !", "suggestion , run issue please feel free reach u either discord email raise github issue .", "`` ` gorilla \u251c\u2500\u2500 data \u2502 \u251c\u2500\u2500 api ( tf/hf/th apis used generating apibench ) \u2502 \u2502 \u251c\u2500\u2500 { api_name } _api.jsonl \u2502 \u251c\u2500\u2500 apibench ( evaluating llm model ) v-1.0 \u2502 \u2502 \u251c\u2500\u2500 { api_name } _train.jsonl , { api_name } _eval.jsonl | |\u2500\u2500 apizoo ( contributed community - evolving ) \u2502 | \u251c\u2500\u2500 username1.json \u2502 \u2502 \u251c\u2500\u2500 username2.json \u2502 \u2502 \u251c\u2500\u2500 ... \u251c\u2500\u2500 eval \u2502 \u251c\u2500\u2500 readme.md \u2502 \u251c\u2500\u2500 get_llm_responses.py \u2502 \u251c\u2500\u2500 eval-scripts \u2502 \u2502 \u251c\u2500\u2500 ast_eval_ { api_name } .py \u2502 \u251c\u2500\u2500 eval-data \u2502 \u2502 \u251c\u2500\u2500 question \u2502 \u2502 \u2502 \u251c\u2500\u2500 api name \u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 questions_ { api_name } _ { eval_metric } .jsonl \u2502 \u2502 \u251c\u2500\u2500 response \u2502 \u2502 \u2502 \u251c\u2500\u2500 api name \u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 responses_ { api_name } _gorilla_ft_ { eval_metric } .jsonl \u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 responses_ { api_name } _gorilla_rt_ { eval_metric } .jsonl \u251c\u2500\u2500 inference \u2502 \u251c\u2500\u2500 readme.md \u2502 \u251c\u2500\u2500 serve \u2502 \u2502 \u251c\u2500\u2500 gorilla_cli.py \u2502 \u2502 \u251c\u2500\u2500 conv_template.py \u251c\u2500\u2500 train ( coming soon ! )", "`` `" ] ], "token": [ [ "repository", "organization", "repository", "organization", "shown", ".", "-", "`", "data", "`", "folder", "contains", "evaluation", "apis", "`", "(", "apibench", ")", "`", "community", "contributed", "apis", ".", "-", "`", "eval", "`", "folder", "contains", "evaluation", "code", "well", "gorilla", "output", ".", "-", "`", "inference", "`", "folder", "contains", "inference", "code", "running", "gorilla", "locally", ".", "-", "<", "span", "style=", "''", "color", ":", "hr", "''", ">", "[", "coming", "soon", "!", "]", "<", "/span", ">", "`", "train", "`", "folder", "contains", "training", "code", "associated", "gorilla", "finetuning", ".", "dataset", "collection", ",", "1640", "api", "documentation", "`", "data/api", "`", ".", "also", "include", "`", "apibench", "`", "dataset", "created", "self-instruct", "`", "data/apibench", "`", ".", "evaluation", ",", "convert", "llm-friendly", "chat", "format", ",", "question", "`", "eval/eval-data/questions", "`", ",", "corresponding", "response", "`", "eval/eval-data/responses", "`", ".", "also", "included", "evaluation", "script", "`", "eval/eval-scripts", "`", ".", "would", "entirely", "sufficient", "train", "gorilla", ",", "reproduce", "result", ".", "please", "see", "[", "evaluation", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/eval", ")", "detail", "use", "evaluation", "pipeline", ".", "additionally", ",", "released", "model", "weight", ".", "`", "gorilla-7b-hf-v0", "`", "let", "invoke", "925", "hugging", "face", "apis", ".", "similarly", ",", "`", "gorilla-7b-tf-v0", "`", "`", "gorilla-7b-th-v0", "`", "626", "(", "exhaustive", ")", "tensorflow", "v2", ",", "94", "(", "exhaustive", ")", "torch", "hub", "apis", ".", "`", "gorilla-mpt-7b-hf-v0", "`", "`", "gorilla-falcon-7b-hf-v0", "`", "apache", "2.0", "licensed", "model", "(", "commercially", "usable", ")", "fine-tuned", "mpt-7b", "falcon-7b", "respectively", ".", "release", "model", "three", "combined", "generic", "chat", "capability", "community", "contributed", "apis", "soon", "scale", "serving", "infrastructure", ".", "run", "gorilla", "locally", "instruction", "`", "inference/", "`", "sub-directory", ",", "also", "provide", "hosted", "gorilla", "chat", "completion", "api", "(", "see", "colab", ")", "!", "suggestion", ",", "run", "issue", "please", "feel", "free", "reach", "u", "either", "discord", "email", "raise", "github", "issue", ".", "``", "`", "gorilla", "\u251c\u2500\u2500", "data", "\u2502", "\u251c\u2500\u2500", "api", "(", "tf/hf/th", "apis", "used", "generating", "apibench", ")", "\u2502", "\u2502", "\u251c\u2500\u2500", "{", "api_name", "}", "_api.jsonl", "\u2502", "\u251c\u2500\u2500", "apibench", "(", "evaluating", "llm", "model", ")", "v-1.0", "\u2502", "\u2502", "\u251c\u2500\u2500", "{", "api_name", "}", "_train.jsonl", ",", "{", "api_name", "}", "_eval.jsonl", "|", "|\u2500\u2500", "apizoo", "(", "contributed", "community", "-", "evolving", ")", "\u2502", "|", "\u251c\u2500\u2500", "username1.json", "\u2502", "\u2502", "\u251c\u2500\u2500", "username2.json", "\u2502", "\u2502", "\u251c\u2500\u2500", "...", "\u251c\u2500\u2500", "eval", "\u2502", "\u251c\u2500\u2500", "readme.md", "\u2502", "\u251c\u2500\u2500", "get_llm_responses.py", "\u2502", "\u251c\u2500\u2500", "eval-scripts", "\u2502", "\u2502", "\u251c\u2500\u2500", "ast_eval_", "{", "api_name", "}", ".py", "\u2502", "\u251c\u2500\u2500", "eval-data", "\u2502", "\u2502", "\u251c\u2500\u2500", "question", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "api", "name", "\u2502", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "questions_", "{", "api_name", "}", "_", "{", "eval_metric", "}", ".jsonl", "\u2502", "\u2502", "\u251c\u2500\u2500", "response", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "api", "name", "\u2502", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "responses_", "{", "api_name", "}", "_gorilla_ft_", "{", "eval_metric", "}", ".jsonl", "\u2502", "\u2502", "\u2502", "\u2502", "\u251c\u2500\u2500", "responses_", "{", "api_name", "}", "_gorilla_rt_", "{", "eval_metric", "}", ".jsonl", "\u251c\u2500\u2500", "inference", "\u2502", "\u251c\u2500\u2500", "readme.md", "\u2502", "\u251c\u2500\u2500", "serve", "\u2502", "\u2502", "\u251c\u2500\u2500", "gorilla_cli.py", "\u2502", "\u2502", "\u251c\u2500\u2500", "conv_template.py", "\u251c\u2500\u2500", "train", "(", "coming", "soon", "!", ")", "``", "`" ], [ "repository organization repository organization shown .", "- ` data ` folder contains evaluation apis ` ( apibench ) ` community contributed apis .", "- ` eval ` folder contains evaluation code well gorilla output .", "- ` inference ` folder contains inference code running gorilla locally .", "- < span style= '' color : hr '' > [ coming soon !", "] < /span > ` train ` folder contains training code associated gorilla finetuning .", "dataset collection , 1640 api documentation ` data/api ` .", "also include ` apibench ` dataset created self-instruct ` data/apibench ` .", "evaluation , convert llm-friendly chat format , question ` eval/eval-data/questions ` , corresponding response ` eval/eval-data/responses ` .", "also included evaluation script ` eval/eval-scripts ` .", "would entirely sufficient train gorilla , reproduce result .", "please see [ evaluation ] ( http : //github.com/shishirpatil/gorilla/tree/main/eval ) detail use evaluation pipeline .", "additionally , released model weight .", "` gorilla-7b-hf-v0 ` let invoke 925 hugging face apis .", "similarly , ` gorilla-7b-tf-v0 ` ` gorilla-7b-th-v0 ` 626 ( exhaustive ) tensorflow v2 , 94 ( exhaustive ) torch hub apis .", "` gorilla-mpt-7b-hf-v0 ` ` gorilla-falcon-7b-hf-v0 ` apache 2.0 licensed model ( commercially usable ) fine-tuned mpt-7b falcon-7b respectively .", "release model three combined generic chat capability community contributed apis soon scale serving infrastructure .", "run gorilla locally instruction ` inference/ ` sub-directory , also provide hosted gorilla chat completion api ( see colab ) !", "suggestion , run issue please feel free reach u either discord email raise github issue .", "`` ` gorilla \u251c\u2500\u2500 data \u2502 \u251c\u2500\u2500 api ( tf/hf/th apis used generating apibench ) \u2502 \u2502 \u251c\u2500\u2500 { api_name } _api.jsonl \u2502 \u251c\u2500\u2500 apibench ( evaluating llm model ) v-1.0 \u2502 \u2502 \u251c\u2500\u2500 { api_name } _train.jsonl , { api_name } _eval.jsonl | |\u2500\u2500 apizoo ( contributed community - evolving ) \u2502 | \u251c\u2500\u2500 username1.json \u2502 \u2502 \u251c\u2500\u2500 username2.json \u2502 \u2502 \u251c\u2500\u2500 ... \u251c\u2500\u2500 eval \u2502 \u251c\u2500\u2500 readme.md \u2502 \u251c\u2500\u2500 get_llm_responses.py \u2502 \u251c\u2500\u2500 eval-scripts \u2502 \u2502 \u251c\u2500\u2500 ast_eval_ { api_name } .py \u2502 \u251c\u2500\u2500 eval-data \u2502 \u2502 \u251c\u2500\u2500 question \u2502 \u2502 \u2502 \u251c\u2500\u2500 api name \u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 questions_ { api_name } _ { eval_metric } .jsonl \u2502 \u2502 \u251c\u2500\u2500 response \u2502 \u2502 \u2502 \u251c\u2500\u2500 api name \u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 responses_ { api_name } _gorilla_ft_ { eval_metric } .jsonl \u2502 \u2502 \u2502 \u2502 \u251c\u2500\u2500 responses_ { api_name } _gorilla_rt_ { eval_metric } .jsonl \u251c\u2500\u2500 inference \u2502 \u251c\u2500\u2500 readme.md \u2502 \u251c\u2500\u2500 serve \u2502 \u2502 \u251c\u2500\u2500 gorilla_cli.py \u2502 \u2502 \u251c\u2500\u2500 conv_template.py \u251c\u2500\u2500 train ( coming soon ! )", "`` `" ] ], "level of complexity": -1 }, { "url": "https://github.com/ShishirPatil/gorilla", "readme_url": "https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md", "topic": [ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ], "text": "Contributing Your API\nWe aim to build an open-source, one-stop-shop for all APIs, LLMs can interact with! Any suggestions and contributions are welcome! Please see the details on [how to contribute](https://github.com/ShishirPatil/gorilla/tree/main/data/README.md). THIS WILL ALWAYS REMAIN OPEN SOURCE.\n\n\n", "sentence": [ [ "contributing", "api", "aim", "build", "open-source", ",", "one-stop-shop", "apis", ",", "llm", "interact", "!", "suggestion", "contribution", "welcome", "!", "please", "see", "detail", "[", "contribute", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/data/readme.md", ")", ".", "always", "remain", "open", "source", "." ], [ "contributing api aim build open-source , one-stop-shop apis , llm interact !", "suggestion contribution welcome !", "please see detail [ contribute ] ( http : //github.com/shishirpatil/gorilla/tree/main/data/readme.md ) .", "always remain open source ." ] ], "token": [ [ "contributing", "api", "aim", "build", "open-source", ",", "one-stop-shop", "apis", ",", "llm", "interact", "!", "suggestion", "contribution", "welcome", "!", "please", "see", "detail", "[", "contribute", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/data/readme.md", ")", ".", "always", "remain", "open", "source", "." ], [ "contributing api aim build open-source , one-stop-shop apis , llm interact !", "suggestion contribution welcome !", "please see detail [ contribute ] ( http : //github.com/shishirpatil/gorilla/tree/main/data/readme.md ) .", "always remain open source ." ] ], "level of complexity": -1 }, { "url": "https://github.com/ShishirPatil/gorilla", "readme_url": "https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md", "topic": [ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ], "text": "FAQ(s)\n\n1. I would like to use Gorilla commercially. Is there going to be a Apache 2.0 licensed version?\n\nYes! We now have models that you can use commercially without any obligations.\n\n\n2. Can we use Gorilla with Langchain, Toolformer, AutoGPT etc?\n\nAbsolutely! You've highlighted a great aspect of our tools. Gorilla is an end-to-end model, specifically tailored to serve correct API calls without requiring any additional coding. It's designed to work as part of a wider ecosystem and can be flexibly integrated with other tools.\n\nLangchain, is a versatile developer tool. Its \"agents\" can efficiently swap in any LLM, Gorilla included, making it a highly adaptable solution for various needs.\n\nAutoGPT, on the other hand, concentrates on the art of prompting GPT series models. It's worth noting that Gorilla, as a fully fine-tuned model, consistently shows remarkable accuracy, and lowers hallucination, outperforming GPT-4 in making specific API calls.\n\nNow, when it comes to ToolFormer, Toolformer zeroes in on a select set of tools, providing specialized functionalities. Gorilla, in contrast, has the capacity to manage thousands of API calls, offering a broader coverage over a more extensive range of tools.\n\nThe beauty of these tools truly shines when they collaborate, complementing each other's strengths and capabilities to create an even more powerful and comprehensive solution. This is where your contribution can make a difference. We enthusiastically welcome any inputs to further refine and enhance these tools. \n\n3. How to train your own Gorilla models? \n\nWe will release the training code as soon as we can get GPUs to test and finalize the pipeline. Given the demand for our hosted end-points, we have dedicated all of our GPUs to serve the models. If you would like to help with resources get in touch!\n\n\n", "sentence": [ [ "faq", "(", ")", "1", ".", "would", "like", "use", "gorilla", "commercially", ".", "going", "apache", "2.0", "licensed", "version", "?", "yes", "!", "model", "use", "commercially", "without", "obligation", ".", "2", ".", "use", "gorilla", "langchain", ",", "toolformer", ",", "autogpt", "etc", "?", "absolutely", "!", "'ve", "highlighted", "great", "aspect", "tool", ".", "gorilla", "end-to-end", "model", ",", "specifically", "tailored", "serve", "correct", "api", "call", "without", "requiring", "additional", "coding", ".", "'s", "designed", "work", "part", "wider", "ecosystem", "flexibly", "integrated", "tool", ".", "langchain", ",", "versatile", "developer", "tool", ".", "``", "agent", "''", "efficiently", "swap", "llm", ",", "gorilla", "included", ",", "making", "highly", "adaptable", "solution", "various", "need", ".", "autogpt", ",", "hand", ",", "concentrate", "art", "prompting", "gpt", "series", "model", ".", "'s", "worth", "noting", "gorilla", ",", "fully", "fine-tuned", "model", ",", "consistently", "show", "remarkable", "accuracy", ",", "lower", "hallucination", ",", "outperforming", "gpt-4", "making", "specific", "api", "call", ".", ",", "come", "toolformer", ",", "toolformer", "zero", "select", "set", "tool", ",", "providing", "specialized", "functionality", ".", "gorilla", ",", "contrast", ",", "capacity", "manage", "thousand", "api", "call", ",", "offering", "broader", "coverage", "extensive", "range", "tool", ".", "beauty", "tool", "truly", "shine", "collaborate", ",", "complementing", "'s", "strength", "capability", "create", "even", "powerful", "comprehensive", "solution", ".", "contribution", "make", "difference", ".", "enthusiastically", "welcome", "input", "refine", "enhance", "tool", ".", "3", ".", "train", "gorilla", "model", "?", "release", "training", "code", "soon", "get", "gpus", "test", "finalize", "pipeline", ".", "given", "demand", "hosted", "end-points", ",", "dedicated", "gpus", "serve", "model", ".", "would", "like", "help", "resource", "get", "touch", "!" ], [ "faq ( ) 1 .", "would like use gorilla commercially .", "going apache 2.0 licensed version ?", "yes !", "model use commercially without obligation .", "2 .", "use gorilla langchain , toolformer , autogpt etc ?", "absolutely !", "'ve highlighted great aspect tool .", "gorilla end-to-end model , specifically tailored serve correct api call without requiring additional coding .", "'s designed work part wider ecosystem flexibly integrated tool .", "langchain , versatile developer tool .", "`` agent '' efficiently swap llm , gorilla included , making highly adaptable solution various need .", "autogpt , hand , concentrate art prompting gpt series model .", "'s worth noting gorilla , fully fine-tuned model , consistently show remarkable accuracy , lower hallucination , outperforming gpt-4 making specific api call .", ", come toolformer , toolformer zero select set tool , providing specialized functionality .", "gorilla , contrast , capacity manage thousand api call , offering broader coverage extensive range tool .", "beauty tool truly shine collaborate , complementing 's strength capability create even powerful comprehensive solution .", "contribution make difference .", "enthusiastically welcome input refine enhance tool .", "3 .", "train gorilla model ?", "release training code soon get gpus test finalize pipeline .", "given demand hosted end-points , dedicated gpus serve model .", "would like help resource get touch !" ] ], "token": [ [ "faq", "(", ")", "1", ".", "would", "like", "use", "gorilla", "commercially", ".", "going", "apache", "2.0", "licensed", "version", "?", "yes", "!", "model", "use", "commercially", "without", "obligation", ".", "2", ".", "use", "gorilla", "langchain", ",", "toolformer", ",", "autogpt", "etc", "?", "absolutely", "!", "'ve", "highlighted", "great", "aspect", "tool", ".", "gorilla", "end-to-end", "model", ",", "specifically", "tailored", "serve", "correct", "api", "call", "without", "requiring", "additional", "coding", ".", "'s", "designed", "work", "part", "wider", "ecosystem", "flexibly", "integrated", "tool", ".", "langchain", ",", "versatile", "developer", "tool", ".", "``", "agent", "''", "efficiently", "swap", "llm", ",", "gorilla", "included", ",", "making", "highly", "adaptable", "solution", "various", "need", ".", "autogpt", ",", "hand", ",", "concentrate", "art", "prompting", "gpt", "series", "model", ".", "'s", "worth", "noting", "gorilla", ",", "fully", "fine-tuned", "model", ",", "consistently", "show", "remarkable", "accuracy", ",", "lower", "hallucination", ",", "outperforming", "gpt-4", "making", "specific", "api", "call", ".", ",", "come", "toolformer", ",", "toolformer", "zero", "select", "set", "tool", ",", "providing", "specialized", "functionality", ".", "gorilla", ",", "contrast", ",", "capacity", "manage", "thousand", "api", "call", ",", "offering", "broader", "coverage", "extensive", "range", "tool", ".", "beauty", "tool", "truly", "shine", "collaborate", ",", "complementing", "'s", "strength", "capability", "create", "even", "powerful", "comprehensive", "solution", ".", "contribution", "make", "difference", ".", "enthusiastically", "welcome", "input", "refine", "enhance", "tool", ".", "3", ".", "train", "gorilla", "model", "?", "release", "training", "code", "soon", "get", "gpus", "test", "finalize", "pipeline", ".", "given", "demand", "hosted", "end-points", ",", "dedicated", "gpus", "serve", "model", ".", "would", "like", "help", "resource", "get", "touch", "!" ], [ "faq ( ) 1 .", "would like use gorilla commercially .", "going apache 2.0 licensed version ?", "yes !", "model use commercially without obligation .", "2 .", "use gorilla langchain , toolformer , autogpt etc ?", "absolutely !", "'ve highlighted great aspect tool .", "gorilla end-to-end model , specifically tailored serve correct api call without requiring additional coding .", "'s designed work part wider ecosystem flexibly integrated tool .", "langchain , versatile developer tool .", "`` agent '' efficiently swap llm , gorilla included , making highly adaptable solution various need .", "autogpt , hand , concentrate art prompting gpt series model .", "'s worth noting gorilla , fully fine-tuned model , consistently show remarkable accuracy , lower hallucination , outperforming gpt-4 making specific api call .", ", come toolformer , toolformer zero select set tool , providing specialized functionality .", "gorilla , contrast , capacity manage thousand api call , offering broader coverage extensive range tool .", "beauty tool truly shine collaborate , complementing 's strength capability create even powerful comprehensive solution .", "contribution make difference .", "enthusiastically welcome input refine enhance tool .", "3 .", "train gorilla model ?", "release training code soon get gpus test finalize pipeline .", "given demand hosted end-points , dedicated gpus serve model .", "would like help resource get touch !" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Quickstart\n\nBelow, we provide simple examples to show how to use Qwen-Chat with \ud83e\udd16 ModelScope and \ud83e\udd17 Transformers.\n\nYou can use our pre-built docker images to skip most of the environment setup steps, see Section [\"Using Pre-built Docker Images\"](#-docker) for more details. \n\nIf not using docker, please make sure you have setup the environment and installed the required packages. Make sure you meet the above requirements, and then install the dependent libraries.\n\n```bash\npip install -r requirements.txt\n```\n\nIf your device supports fp16 or bf16, we recommend installing [flash-attention](https://github.com/Dao-AILab/flash-attention) (**we support flash attention 2 now.**) for higher efficiency and lower memory usage. (**flash-attention is optional and the project can run normally without installing it**)\n\n```bash\ngit clone https://github.com/Dao-AILab/flash-attention\ncd flash-attention && pip install .\n", "sentence": [ [ "quickstart", ",", "provide", "simple", "example", "show", "use", "qwen-chat", "\ud83e\udd16", "modelscope", "\ud83e\udd17", "transformer", ".", "use", "pre-built", "docker", "image", "skip", "environment", "setup", "step", ",", "see", "section", "[", "``", "using", "pre-built", "docker", "image", "''", "]", "(", "#", "-docker", ")", "detail", ".", "using", "docker", ",", "please", "make", "sure", "setup", "environment", "installed", "required", "package", ".", "make", "sure", "meet", "requirement", ",", "install", "dependent", "library", ".", "``", "`", "bash", "pip", "install", "-r", "requirements.txt", "``", "`", "device", "support", "fp16", "bf16", ",", "recommend", "installing", "[", "flash-attention", "]", "(", "http", ":", "//github.com/dao-ailab/flash-attention", ")", "(", "*", "*", "support", "flash", "attention", "2", ".", "*", "*", ")", "higher", "efficiency", "lower", "memory", "usage", ".", "(", "*", "*", "flash-attention", "optional", "project", "run", "normally", "without", "installing", "*", "*", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/dao-ailab/flash-attention", "cd", "flash-attention", "&", "&", "pip", "install", "." ], [ "quickstart , provide simple example show use qwen-chat \ud83e\udd16 modelscope \ud83e\udd17 transformer .", "use pre-built docker image skip environment setup step , see section [ `` using pre-built docker image '' ] ( # -docker ) detail .", "using docker , please make sure setup environment installed required package .", "make sure meet requirement , install dependent library .", "`` ` bash pip install -r requirements.txt `` ` device support fp16 bf16 , recommend installing [ flash-attention ] ( http : //github.com/dao-ailab/flash-attention ) ( * * support flash attention 2 .", "* * ) higher efficiency lower memory usage .", "( * * flash-attention optional project run normally without installing * * ) `` ` bash git clone http : //github.com/dao-ailab/flash-attention cd flash-attention & & pip install ." ] ], "token": [ [ "quickstart", ",", "provide", "simple", "example", "show", "use", "qwen-chat", "\ud83e\udd16", "modelscope", "\ud83e\udd17", "transformer", ".", "use", "pre-built", "docker", "image", "skip", "environment", "setup", "step", ",", "see", "section", "[", "``", "using", "pre-built", "docker", "image", "''", "]", "(", "#", "-docker", ")", "detail", ".", "using", "docker", ",", "please", "make", "sure", "setup", "environment", "installed", "required", "package", ".", "make", "sure", "meet", "requirement", ",", "install", "dependent", "library", ".", "``", "`", "bash", "pip", "install", "-r", "requirements.txt", "``", "`", "device", "support", "fp16", "bf16", ",", "recommend", "installing", "[", "flash-attention", "]", "(", "http", ":", "//github.com/dao-ailab/flash-attention", ")", "(", "*", "*", "support", "flash", "attention", "2", ".", "*", "*", ")", "higher", "efficiency", "lower", "memory", "usage", ".", "(", "*", "*", "flash-attention", "optional", "project", "run", "normally", "without", "installing", "*", "*", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/dao-ailab/flash-attention", "cd", "flash-attention", "&", "&", "pip", "install", "." ], [ "quickstart , provide simple example show use qwen-chat \ud83e\udd16 modelscope \ud83e\udd17 transformer .", "use pre-built docker image skip environment setup step , see section [ `` using pre-built docker image '' ] ( # -docker ) detail .", "using docker , please make sure setup environment installed required package .", "make sure meet requirement , install dependent library .", "`` ` bash pip install -r requirements.txt `` ` device support fp16 bf16 , recommend installing [ flash-attention ] ( http : //github.com/dao-ailab/flash-attention ) ( * * support flash attention 2 .", "* * ) higher efficiency lower memory usage .", "( * * flash-attention optional project run normally without installing * * ) `` ` bash git clone http : //github.com/dao-ailab/flash-attention cd flash-attention & & pip install ." ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Below are optional. Installing them might be slow.\n", "sentence": [ [ "optional", ".", "installing", "might", "slow", "." ], [ "optional .", "installing might slow ." ] ], "token": [ [ "optional", ".", "installing", "might", "slow", "." ], [ "optional .", "installing might slow ." ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "pip install csrc/layer_norm\n", "sentence": [ [ "pip", "install", "csrc/layer_norm" ], [ "pip install csrc/layer_norm" ] ], "token": [ [ "pip", "install", "csrc/layer_norm" ], [ "pip install csrc/layer_norm" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "pip install csrc/rotary\n```\n\nNow you can start with ModelScope or Transformers.\n\n", "sentence": [ [ "pip", "install", "csrc/rotary", "``", "`", "start", "modelscope", "transformer", "." ], [ "pip install csrc/rotary `` ` start modelscope transformer ." ] ], "token": [ [ "pip", "install", "csrc/rotary", "``", "`", "start", "modelscope", "transformer", "." ], [ "pip install csrc/rotary `` ` start modelscope transformer ." ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "DashScope\nThe most simple way to use Qwen through APIs is DashScope API service through Alibaba Cloud. We give an introduction to the usage. Additionally, we provide a script for you to deploy an OpenAI-style API on your own servers.\n\nDashScope is the large language model API service provided by Alibaba Cloud, which now supports Qwen. Note that the models behind DashScope are in-house versions temporarily without details provided. The services include `qwen-turbo` and `qwen-plus`, where the former one runs faster and the latter achieves better performance. For more information, visit the documentation [here](https://dashscope.aliyun.com).\n\nPlease head to the official website [link](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.0.6c2774fahtfXdn) to create a DashScope account and obtain the API key (AK). We recommend setting the AK with an environment variable:\n```bash\nexport DASHSCOPE_API_KEY=\"YOUR_DASHSCOPE_API_KEY\"\n```\nThen please install the packages and click [here](https://help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk) for the documentation. If you use Python, you can install DashScope with pip:\n```bash\npip install dashscope\n```\nIf you use JAVA SDK, you can install it in this way:\n```xml\n\n\n com.alibaba\n dashscope-sdk-java\n the-latest-version\n\n```\nThe simplest way to use DashScope is the usage with messages, which is similar to OpenAI API. The example is demonstrated below:\n```python\nimport random\nfrom http import HTTPStatus\nfrom dashscope import Generation\n\n\ndef call_with_messages():\n messages = [{'role': 'system', 'content': 'You are a helpful assistant.'},\n {'role': 'user', 'content': '\u5982\u4f55\u505a\u897f\u7ea2\u67ff\u9e21\u86cb\uff1f'}]\n gen = Generation()\n response = gen.call(\n Generation.Models.qwen_turbo,\n messages=messages,\n seed=random.randint(1, 10000), ", "sentence": [ [ "dashscope", "simple", "way", "use", "qwen", "apis", "dashscope", "api", "service", "alibaba", "cloud", ".", "give", "introduction", "usage", ".", "additionally", ",", "provide", "script", "deploy", "openai-style", "api", "server", ".", "dashscope", "large", "language", "model", "api", "service", "provided", "alibaba", "cloud", ",", "support", "qwen", ".", "note", "model", "behind", "dashscope", "in-house", "version", "temporarily", "without", "detail", "provided", ".", "service", "include", "`", "qwen-turbo", "`", "`", "qwen-plus", "`", ",", "former", "one", "run", "faster", "latter", "achieves", "better", "performance", ".", "information", ",", "visit", "documentation", "[", "]", "(", "http", ":", "//dashscope.aliyun.com", ")", ".", "please", "head", "official", "website", "[", "link", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key", "?", "spm=a2c4g.11186623.0.0.6c2774fahtfxdn", ")", "create", "dashscope", "account", "obtain", "api", "key", "(", "ak", ")", ".", "recommend", "setting", "ak", "environment", "variable", ":", "``", "`", "bash", "export", "dashscope_api_key=", "''", "your_dashscope_api_key", "''", "``", "`", "please", "install", "package", "click", "[", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk", ")", "documentation", ".", "use", "python", ",", "install", "dashscope", "pip", ":", "``", "`", "bash", "pip", "install", "dashscope", "``", "`", "use", "java", "sdk", ",", "install", "way", ":", "``", "`", "xml", "<", "!", "--", "http", ":", "//mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java", "--", ">", "<", "dependency", ">", "<", "groupid", ">", "com.alibaba", "<", "/groupid", ">", "<", "artifactid", ">", "dashscope-sdk-java", "<", "/artifactid", ">", "<", "version", ">", "the-latest-version", "<", "/version", ">", "<", "/dependency", ">", "``", "`", "simplest", "way", "use", "dashscope", "usage", "message", ",", "similar", "openai", "api", ".", "example", "demonstrated", ":", "``", "`", "python", "import", "random", "http", "import", "httpstatus", "dashscope", "import", "generation", "def", "call_with_messages", "(", ")", ":", "message", "=", "[", "{", "'role", "'", ":", "'system", "'", ",", "'content", "'", ":", "'you", "helpful", "assistant", ".", "'", "}", ",", "{", "'role", "'", ":", "'user", "'", ",", "'content", "'", ":", "'\u5982\u4f55\u505a\u897f\u7ea2\u67ff\u9e21\u86cb\uff1f", "'", "}", "]", "gen", "=", "generation", "(", ")", "response", "=", "gen.call", "(", "generation.models.qwen_turbo", ",", "messages=messages", ",", "seed=random.randint", "(", "1", ",", "10000", ")", "," ], [ "dashscope simple way use qwen apis dashscope api service alibaba cloud .", "give introduction usage .", "additionally , provide script deploy openai-style api server .", "dashscope large language model api service provided alibaba cloud , support qwen .", "note model behind dashscope in-house version temporarily without detail provided .", "service include ` qwen-turbo ` ` qwen-plus ` , former one run faster latter achieves better performance .", "information , visit documentation [ ] ( http : //dashscope.aliyun.com ) .", "please head official website [ link ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key ? spm=a2c4g.11186623.0.0.6c2774fahtfxdn ) create dashscope account obtain api key ( ak ) .", "recommend setting ak environment variable : `` ` bash export dashscope_api_key= '' your_dashscope_api_key '' `` ` please install package click [ ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk ) documentation .", "use python , install dashscope pip : `` ` bash pip install dashscope `` ` use java sdk , install way : `` ` xml < ! -- http : //mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java -- > < dependency > < groupid > com.alibaba < /groupid > < artifactid > dashscope-sdk-java < /artifactid > < version > the-latest-version < /version > < /dependency > `` ` simplest way use dashscope usage message , similar openai api .", "example demonstrated : `` ` python import random http import httpstatus dashscope import generation def call_with_messages ( ) : message = [ { 'role ' : 'system ' , 'content ' : 'you helpful assistant .", "' } , { 'role ' : 'user ' , 'content ' : '\u5982\u4f55\u505a\u897f\u7ea2\u67ff\u9e21\u86cb\uff1f ' } ] gen = generation ( ) response = gen.call ( generation.models.qwen_turbo , messages=messages , seed=random.randint ( 1 , 10000 ) ," ] ], "token": [ [ "dashscope", "simple", "way", "use", "qwen", "apis", "dashscope", "api", "service", "alibaba", "cloud", ".", "give", "introduction", "usage", ".", "additionally", ",", "provide", "script", "deploy", "openai-style", "api", "server", ".", "dashscope", "large", "language", "model", "api", "service", "provided", "alibaba", "cloud", ",", "support", "qwen", ".", "note", "model", "behind", "dashscope", "in-house", "version", "temporarily", "without", "detail", "provided", ".", "service", "include", "`", "qwen-turbo", "`", "`", "qwen-plus", "`", ",", "former", "one", "run", "faster", "latter", "achieves", "better", "performance", ".", "information", ",", "visit", "documentation", "[", "]", "(", "http", ":", "//dashscope.aliyun.com", ")", ".", "please", "head", "official", "website", "[", "link", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key", "?", "spm=a2c4g.11186623.0.0.6c2774fahtfxdn", ")", "create", "dashscope", "account", "obtain", "api", "key", "(", "ak", ")", ".", "recommend", "setting", "ak", "environment", "variable", ":", "``", "`", "bash", "export", "dashscope_api_key=", "''", "your_dashscope_api_key", "''", "``", "`", "please", "install", "package", "click", "[", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk", ")", "documentation", ".", "use", "python", ",", "install", "dashscope", "pip", ":", "``", "`", "bash", "pip", "install", "dashscope", "``", "`", "use", "java", "sdk", ",", "install", "way", ":", "``", "`", "xml", "<", "!", "--", "http", ":", "//mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java", "--", ">", "<", "dependency", ">", "<", "groupid", ">", "com.alibaba", "<", "/groupid", ">", "<", "artifactid", ">", "dashscope-sdk-java", "<", "/artifactid", ">", "<", "version", ">", "the-latest-version", "<", "/version", ">", "<", "/dependency", ">", "``", "`", "simplest", "way", "use", "dashscope", "usage", "message", ",", "similar", "openai", "api", ".", "example", "demonstrated", ":", "``", "`", "python", "import", "random", "http", "import", "httpstatus", "dashscope", "import", "generation", "def", "call_with_messages", "(", ")", ":", "message", "=", "[", "{", "'role", "'", ":", "'system", "'", ",", "'content", "'", ":", "'you", "helpful", "assistant", ".", "'", "}", ",", "{", "'role", "'", ":", "'user", "'", ",", "'content", "'", ":", "'\u5982\u4f55\u505a\u897f\u7ea2\u67ff\u9e21\u86cb\uff1f", "'", "}", "]", "gen", "=", "generation", "(", ")", "response", "=", "gen.call", "(", "generation.models.qwen_turbo", ",", "messages=messages", ",", "seed=random.randint", "(", "1", ",", "10000", ")", "," ], [ "dashscope simple way use qwen apis dashscope api service alibaba cloud .", "give introduction usage .", "additionally , provide script deploy openai-style api server .", "dashscope large language model api service provided alibaba cloud , support qwen .", "note model behind dashscope in-house version temporarily without detail provided .", "service include ` qwen-turbo ` ` qwen-plus ` , former one run faster latter achieves better performance .", "information , visit documentation [ ] ( http : //dashscope.aliyun.com ) .", "please head official website [ link ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key ? spm=a2c4g.11186623.0.0.6c2774fahtfxdn ) create dashscope account obtain api key ( ak ) .", "recommend setting ak environment variable : `` ` bash export dashscope_api_key= '' your_dashscope_api_key '' `` ` please install package click [ ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk ) documentation .", "use python , install dashscope pip : `` ` bash pip install dashscope `` ` use java sdk , install way : `` ` xml < ! -- http : //mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java -- > < dependency > < groupid > com.alibaba < /groupid > < artifactid > dashscope-sdk-java < /artifactid > < version > the-latest-version < /version > < /dependency > `` ` simplest way use dashscope usage message , similar openai api .", "example demonstrated : `` ` python import random http import httpstatus dashscope import generation def call_with_messages ( ) : message = [ { 'role ' : 'system ' , 'content ' : 'you helpful assistant .", "' } , { 'role ' : 'user ' , 'content ' : '\u5982\u4f55\u505a\u897f\u7ea2\u67ff\u9e21\u86cb\uff1f ' } ] gen = generation ( ) response = gen.call ( generation.models.qwen_turbo , messages=messages , seed=random.randint ( 1 , 10000 ) ," ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "GPTQ\n\nWe provide a solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release the Int4 and Int8 quantized models, which achieve nearly lossless model effects but improved performance on both memory costs and inference speed.\n\nHere we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages:\n\n```bash\npip install auto-gptq optimum\n```\n\nIf you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a wheel.\n\n> Note: The pre-compiled `auto-gptq` packages strongly depend on the version of `torch` and its CUDA version. Moreover, due to recent update, \n> you may also encounter unsupported version errors from `transformers`, `optimum`, or `peft`.\n> We recommend using the latest versions meeting the following requirements:\n> - torch==2.1 auto-gptq>=0.5.1 transformers>=4.35.0 optimum>=1.14.0 peft>=0.6.1\n> - torch>=2.0,<2.1 auto-gptq<0.5.0 transformers<4.35.0 optimum<1.14.0 peft>=0.5.0,<0.6.0\n\nThen you can load the quantized model easily and run inference as same as usual:\n\n```python\n", "sentence": [ [ "gptq", "provide", "solution", "based", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", ",", "release", "int4", "int8", "quantized", "model", ",", "achieve", "nearly", "lossless", "model", "effect", "improved", "performance", "memory", "cost", "inference", "speed", ".", "demonstrate", "use", "provided", "quantized", "model", "inference", ".", "start", ",", "make", "sure", "meet", "requirement", "auto-gptq", "(", "e.g.", ",", "torch", "2.0", ",", "transformer", "4.32.0", ",", "etc", ".", ")", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "meet", "problem", "installing", "`", "auto-gptq", "`", ",", "advise", "check", "official", "[", "repo", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "find", "wheel", ".", ">", "note", ":", "pre-compiled", "`", "auto-gptq", "`", "package", "strongly", "depend", "version", "`", "torch", "`", "cuda", "version", ".", "moreover", ",", "due", "recent", "update", ",", ">", "may", "also", "encounter", "unsupported", "version", "error", "`", "transformer", "`", ",", "`", "optimum", "`", ",", "`", "peft", "`", ".", ">", "recommend", "using", "latest", "version", "meeting", "following", "requirement", ":", ">", "-", "torch==2.1", "auto-gptq", ">", "=0.5.1", "transformer", ">", "=4.35.0", "optimum", ">", "=1.14.0", "peft", ">", "=0.6.1", ">", "-", "torch", ">", "=2.0", ",", "<", "2.1", "auto-gptq", "<", "0.5.0", "transformer", "<", "4.35.0", "optimum", "<", "1.14.0", "peft", ">", "=0.5.0", ",", "<", "0.6.0", "load", "quantized", "model", "easily", "run", "inference", "usual", ":", "``", "`", "python" ], [ "gptq provide solution based [ autogptq ] ( http : //github.com/panqiwei/autogptq ) , release int4 int8 quantized model , achieve nearly lossless model effect improved performance memory cost inference speed .", "demonstrate use provided quantized model inference .", "start , make sure meet requirement auto-gptq ( e.g. , torch 2.0 , transformer 4.32.0 , etc . )", "install required package : `` ` bash pip install auto-gptq optimum `` ` meet problem installing ` auto-gptq ` , advise check official [ repo ] ( http : //github.com/panqiwei/autogptq ) find wheel .", "> note : pre-compiled ` auto-gptq ` package strongly depend version ` torch ` cuda version .", "moreover , due recent update , > may also encounter unsupported version error ` transformer ` , ` optimum ` , ` peft ` .", "> recommend using latest version meeting following requirement : > - torch==2.1 auto-gptq > =0.5.1 transformer > =4.35.0 optimum > =1.14.0 peft > =0.6.1 > - torch > =2.0 , < 2.1 auto-gptq < 0.5.0 transformer < 4.35.0 optimum < 1.14.0 peft > =0.5.0 , < 0.6.0 load quantized model easily run inference usual : `` ` python" ] ], "token": [ [ "gptq", "provide", "solution", "based", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", ",", "release", "int4", "int8", "quantized", "model", ",", "achieve", "nearly", "lossless", "model", "effect", "improved", "performance", "memory", "cost", "inference", "speed", ".", "demonstrate", "use", "provided", "quantized", "model", "inference", ".", "start", ",", "make", "sure", "meet", "requirement", "auto-gptq", "(", "e.g.", ",", "torch", "2.0", ",", "transformer", "4.32.0", ",", "etc", ".", ")", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "meet", "problem", "installing", "`", "auto-gptq", "`", ",", "advise", "check", "official", "[", "repo", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "find", "wheel", ".", ">", "note", ":", "pre-compiled", "`", "auto-gptq", "`", "package", "strongly", "depend", "version", "`", "torch", "`", "cuda", "version", ".", "moreover", ",", "due", "recent", "update", ",", ">", "may", "also", "encounter", "unsupported", "version", "error", "`", "transformer", "`", ",", "`", "optimum", "`", ",", "`", "peft", "`", ".", ">", "recommend", "using", "latest", "version", "meeting", "following", "requirement", ":", ">", "-", "torch==2.1", "auto-gptq", ">", "=0.5.1", "transformer", ">", "=4.35.0", "optimum", ">", "=1.14.0", "peft", ">", "=0.6.1", ">", "-", "torch", ">", "=2.0", ",", "<", "2.1", "auto-gptq", "<", "0.5.0", "transformer", "<", "4.35.0", "optimum", "<", "1.14.0", "peft", ">", "=0.5.0", ",", "<", "0.6.0", "load", "quantized", "model", "easily", "run", "inference", "usual", ":", "``", "`", "python" ], [ "gptq provide solution based [ autogptq ] ( http : //github.com/panqiwei/autogptq ) , release int4 int8 quantized model , achieve nearly lossless model effect improved performance memory cost inference speed .", "demonstrate use provided quantized model inference .", "start , make sure meet requirement auto-gptq ( e.g. , torch 2.0 , transformer 4.32.0 , etc . )", "install required package : `` ` bash pip install auto-gptq optimum `` ` meet problem installing ` auto-gptq ` , advise check official [ repo ] ( http : //github.com/panqiwei/autogptq ) find wheel .", "> note : pre-compiled ` auto-gptq ` package strongly depend version ` torch ` cuda version .", "moreover , due recent update , > may also encounter unsupported version error ` transformer ` , ` optimum ` , ` peft ` .", "> recommend using latest version meeting following requirement : > - torch==2.1 auto-gptq > =0.5.1 transformer > =4.35.0 optimum > =1.14.0 peft > =0.6.1 > - torch > =2.0 , < 2.1 auto-gptq < 0.5.0 transformer < 4.35.0 optimum < 1.14.0 peft > =0.5.0 , < 0.6.0 load quantized model easily run inference usual : `` ` python" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Usage\nNow we provide the official training script, `finetune.py`, for users to finetune the pretrained model for downstream applications in a simple fashion. Additionally, we provide shell scripts to launch finetuning with no worries. This script supports the training with [DeepSpeed](https://github.com/microsoft/DeepSpeed) and [FSDP](https://engineering.fb.com/2021/07/15/open-source/fsdp/). The shell scripts that we provide use DeepSpeed (Note: this may have conflicts with the latest version of pydantic and you should use make sure `pydantic<2.0`) and Peft. You can install them by:\n```bash\npip install peft deepspeed\n```\n\nTo prepare your training data, you need to put all the samples into a list and save it to a json file. Each sample is a dictionary consisting of an id and a list for conversation. Below is a simple example list with 1 sample:\n```json\n[\n {\n \"id\": \"identity_0\",\n \"conversations\": [\n {\n \"from\": \"user\",\n \"value\": \"\u4f60\u597d\"\n },\n {\n \"from\": \"assistant\",\n \"value\": \"\u6211\u662f\u4e00\u4e2a\u8bed\u8a00\u6a21\u578b\uff0c\u6211\u53eb\u901a\u4e49\u5343\u95ee\u3002\"\n }\n ]\n }\n]\n```\n\nAfter data preparation, you can use the provided shell scripts to run finetuning. Remember to specify the path to the data file, `$DATA`.\n\nThe finetuning scripts allow you to perform:\n- Full-parameter finetuning\n- LoRA\n- Q-LoRA\n\nFull-parameter finetuning requires updating all parameters in the whole training process. To launch your training, run the following script:\n\n```bash\n", "sentence": [ [ "usage", "provide", "official", "training", "script", ",", "`", "finetune.py", "`", ",", "user", "finetune", "pretrained", "model", "downstream", "application", "simple", "fashion", ".", "additionally", ",", "provide", "shell", "script", "launch", "finetuning", "worry", ".", "script", "support", "training", "[", "deepspeed", "]", "(", "http", ":", "//github.com/microsoft/deepspeed", ")", "[", "fsdp", "]", "(", "http", ":", "//engineering.fb.com/2021/07/15/open-source/fsdp/", ")", ".", "shell", "script", "provide", "use", "deepspeed", "(", "note", ":", "may", "conflict", "latest", "version", "pydantic", "use", "make", "sure", "`", "pydantic", "<", "2.0", "`", ")", "peft", ".", "install", ":", "``", "`", "bash", "pip", "install", "peft", "deepspeed", "``", "`", "prepare", "training", "data", ",", "need", "put", "sample", "list", "save", "json", "file", ".", "sample", "dictionary", "consisting", "id", "list", "conversation", ".", "simple", "example", "list", "1", "sample", ":", "``", "`", "json", "[", "{", "``", "id", "''", ":", "``", "identity_0", "''", ",", "``", "conversation", "''", ":", "[", "{", "``", "''", ":", "``", "user", "''", ",", "``", "value", "''", ":", "``", "\u4f60\u597d", "''", "}", ",", "{", "``", "''", ":", "``", "assistant", "''", ",", "``", "value", "''", ":", "``", "\u6211\u662f\u4e00\u4e2a\u8bed\u8a00\u6a21\u578b\uff0c\u6211\u53eb\u901a\u4e49\u5343\u95ee\u3002", "''", "}", "]", "}", "]", "``", "`", "data", "preparation", ",", "use", "provided", "shell", "script", "run", "finetuning", ".", "remember", "specify", "path", "data", "file", ",", "`", "$", "data", "`", ".", "finetuning", "script", "allow", "perform", ":", "-", "full-parameter", "finetuning", "-", "lora", "-", "q-lora", "full-parameter", "finetuning", "requires", "updating", "parameter", "whole", "training", "process", ".", "launch", "training", ",", "run", "following", "script", ":", "``", "`", "bash" ], [ "usage provide official training script , ` finetune.py ` , user finetune pretrained model downstream application simple fashion .", "additionally , provide shell script launch finetuning worry .", "script support training [ deepspeed ] ( http : //github.com/microsoft/deepspeed ) [ fsdp ] ( http : //engineering.fb.com/2021/07/15/open-source/fsdp/ ) .", "shell script provide use deepspeed ( note : may conflict latest version pydantic use make sure ` pydantic < 2.0 ` ) peft .", "install : `` ` bash pip install peft deepspeed `` ` prepare training data , need put sample list save json file .", "sample dictionary consisting id list conversation .", "simple example list 1 sample : `` ` json [ { `` id '' : `` identity_0 '' , `` conversation '' : [ { `` '' : `` user '' , `` value '' : `` \u4f60\u597d '' } , { `` '' : `` assistant '' , `` value '' : `` \u6211\u662f\u4e00\u4e2a\u8bed\u8a00\u6a21\u578b\uff0c\u6211\u53eb\u901a\u4e49\u5343\u95ee\u3002 '' } ] } ] `` ` data preparation , use provided shell script run finetuning .", "remember specify path data file , ` $ data ` .", "finetuning script allow perform : - full-parameter finetuning - lora - q-lora full-parameter finetuning requires updating parameter whole training process .", "launch training , run following script : `` ` bash" ] ], "token": [ [ "usage", "provide", "official", "training", "script", ",", "`", "finetune.py", "`", ",", "user", "finetune", "pretrained", "model", "downstream", "application", "simple", "fashion", ".", "additionally", ",", "provide", "shell", "script", "launch", "finetuning", "worry", ".", "script", "support", "training", "[", "deepspeed", "]", "(", "http", ":", "//github.com/microsoft/deepspeed", ")", "[", "fsdp", "]", "(", "http", ":", "//engineering.fb.com/2021/07/15/open-source/fsdp/", ")", ".", "shell", "script", "provide", "use", "deepspeed", "(", "note", ":", "may", "conflict", "latest", "version", "pydantic", "use", "make", "sure", "`", "pydantic", "<", "2.0", "`", ")", "peft", ".", "install", ":", "``", "`", "bash", "pip", "install", "peft", "deepspeed", "``", "`", "prepare", "training", "data", ",", "need", "put", "sample", "list", "save", "json", "file", ".", "sample", "dictionary", "consisting", "id", "list", "conversation", ".", "simple", "example", "list", "1", "sample", ":", "``", "`", "json", "[", "{", "``", "id", "''", ":", "``", "identity_0", "''", ",", "``", "conversation", "''", ":", "[", "{", "``", "''", ":", "``", "user", "''", ",", "``", "value", "''", ":", "``", "\u4f60\u597d", "''", "}", ",", "{", "``", "''", ":", "``", "assistant", "''", ",", "``", "value", "''", ":", "``", "\u6211\u662f\u4e00\u4e2a\u8bed\u8a00\u6a21\u578b\uff0c\u6211\u53eb\u901a\u4e49\u5343\u95ee\u3002", "''", "}", "]", "}", "]", "``", "`", "data", "preparation", ",", "use", "provided", "shell", "script", "run", "finetuning", ".", "remember", "specify", "path", "data", "file", ",", "`", "$", "data", "`", ".", "finetuning", "script", "allow", "perform", ":", "-", "full-parameter", "finetuning", "-", "lora", "-", "q-lora", "full-parameter", "finetuning", "requires", "updating", "parameter", "whole", "training", "process", ".", "launch", "training", ",", "run", "following", "script", ":", "``", "`", "bash" ], [ "usage provide official training script , ` finetune.py ` , user finetune pretrained model downstream application simple fashion .", "additionally , provide shell script launch finetuning worry .", "script support training [ deepspeed ] ( http : //github.com/microsoft/deepspeed ) [ fsdp ] ( http : //engineering.fb.com/2021/07/15/open-source/fsdp/ ) .", "shell script provide use deepspeed ( note : may conflict latest version pydantic use make sure ` pydantic < 2.0 ` ) peft .", "install : `` ` bash pip install peft deepspeed `` ` prepare training data , need put sample list save json file .", "sample dictionary consisting id list conversation .", "simple example list 1 sample : `` ` json [ { `` id '' : `` identity_0 '' , `` conversation '' : [ { `` '' : `` user '' , `` value '' : `` \u4f60\u597d '' } , { `` '' : `` assistant '' , `` value '' : `` \u6211\u662f\u4e00\u4e2a\u8bed\u8a00\u6a21\u578b\uff0c\u6211\u53eb\u901a\u4e49\u5343\u95ee\u3002 '' } ] } ] `` ` data preparation , use provided shell script run finetuning .", "remember specify path data file , ` $ data ` .", "finetuning script allow perform : - full-parameter finetuning - lora - q-lora full-parameter finetuning requires updating parameter whole training process .", "launch training , run following script : `` ` bash" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Distributed training. We do not provide single-GPU training script as the insufficient GPU memory will break down the training.\nbash finetune/finetune_ds.sh\n```\n\nRemember to specify the correct model name or path, the data path, as well as the output directory in the shell scripts. Another thing to notice is that we use DeepSpeed ZeRO 3 in this script. If you want to make changes, just remove the argument `--deepspeed` or make changes in the DeepSpeed configuration json file based on your requirements. Additionally, this script supports mixed-precision training, and thus you can use `--bf16 True` or `--fp16 True`. Remember to use DeepSpeed when you use fp16 due to mixed precision training. Empirically we advise you to use bf16 to make your training consistent with our pretraining and alignment if your machine supports bf16, and thus we use it by default.\n\nSimilarly, to run LoRA, use another script to run as shown below. Before you start, make sure that you have installed `peft`. Also, you need to specify your paths to your model, data, and output. We advise you to use absolute path for your pretrained model. This is because LoRA only saves the adapter and the absolute path in the adapter configuration json file is used for finding out the pretrained model to load. Also, this script support both bf16 and fp16.\n\n```bash\n", "sentence": [ [ "distributed", "training", ".", "provide", "single-gpu", "training", "script", "insufficient", "gpu", "memory", "break", "training", ".", "bash", "finetune/finetune_ds.sh", "``", "`", "remember", "specify", "correct", "model", "name", "path", ",", "data", "path", ",", "well", "output", "directory", "shell", "script", ".", "another", "thing", "notice", "use", "deepspeed", "zero", "3", "script", ".", "want", "make", "change", ",", "remove", "argument", "`", "--", "deepspeed", "`", "make", "change", "deepspeed", "configuration", "json", "file", "based", "requirement", ".", "additionally", ",", "script", "support", "mixed-precision", "training", ",", "thus", "use", "`", "--", "bf16", "true", "`", "`", "--", "fp16", "true", "`", ".", "remember", "use", "deepspeed", "use", "fp16", "due", "mixed", "precision", "training", ".", "empirically", "advise", "use", "bf16", "make", "training", "consistent", "pretraining", "alignment", "machine", "support", "bf16", ",", "thus", "use", "default", ".", "similarly", ",", "run", "lora", ",", "use", "another", "script", "run", "shown", ".", "start", ",", "make", "sure", "installed", "`", "peft", "`", ".", "also", ",", "need", "specify", "path", "model", ",", "data", ",", "output", ".", "advise", "use", "absolute", "path", "pretrained", "model", ".", "lora", "save", "adapter", "absolute", "path", "adapter", "configuration", "json", "file", "used", "finding", "pretrained", "model", "load", ".", "also", ",", "script", "support", "bf16", "fp16", ".", "``", "`", "bash" ], [ "distributed training .", "provide single-gpu training script insufficient gpu memory break training .", "bash finetune/finetune_ds.sh `` ` remember specify correct model name path , data path , well output directory shell script .", "another thing notice use deepspeed zero 3 script .", "want make change , remove argument ` -- deepspeed ` make change deepspeed configuration json file based requirement .", "additionally , script support mixed-precision training , thus use ` -- bf16 true ` ` -- fp16 true ` .", "remember use deepspeed use fp16 due mixed precision training .", "empirically advise use bf16 make training consistent pretraining alignment machine support bf16 , thus use default .", "similarly , run lora , use another script run shown .", "start , make sure installed ` peft ` .", "also , need specify path model , data , output .", "advise use absolute path pretrained model .", "lora save adapter absolute path adapter configuration json file used finding pretrained model load .", "also , script support bf16 fp16 .", "`` ` bash" ] ], "token": [ [ "distributed", "training", ".", "provide", "single-gpu", "training", "script", "insufficient", "gpu", "memory", "break", "training", ".", "bash", "finetune/finetune_ds.sh", "``", "`", "remember", "specify", "correct", "model", "name", "path", ",", "data", "path", ",", "well", "output", "directory", "shell", "script", ".", "another", "thing", "notice", "use", "deepspeed", "zero", "3", "script", ".", "want", "make", "change", ",", "remove", "argument", "`", "--", "deepspeed", "`", "make", "change", "deepspeed", "configuration", "json", "file", "based", "requirement", ".", "additionally", ",", "script", "support", "mixed-precision", "training", ",", "thus", "use", "`", "--", "bf16", "true", "`", "`", "--", "fp16", "true", "`", ".", "remember", "use", "deepspeed", "use", "fp16", "due", "mixed", "precision", "training", ".", "empirically", "advise", "use", "bf16", "make", "training", "consistent", "pretraining", "alignment", "machine", "support", "bf16", ",", "thus", "use", "default", ".", "similarly", ",", "run", "lora", ",", "use", "another", "script", "run", "shown", ".", "start", ",", "make", "sure", "installed", "`", "peft", "`", ".", "also", ",", "need", "specify", "path", "model", ",", "data", ",", "output", ".", "advise", "use", "absolute", "path", "pretrained", "model", ".", "lora", "save", "adapter", "absolute", "path", "adapter", "configuration", "json", "file", "used", "finding", "pretrained", "model", "load", ".", "also", ",", "script", "support", "bf16", "fp16", ".", "``", "`", "bash" ], [ "distributed training .", "provide single-gpu training script insufficient gpu memory break training .", "bash finetune/finetune_ds.sh `` ` remember specify correct model name path , data path , well output directory shell script .", "another thing notice use deepspeed zero 3 script .", "want make change , remove argument ` -- deepspeed ` make change deepspeed configuration json file based requirement .", "additionally , script support mixed-precision training , thus use ` -- bf16 true ` ` -- fp16 true ` .", "remember use deepspeed use fp16 due mixed precision training .", "empirically advise use bf16 make training consistent pretraining alignment machine support bf16 , thus use default .", "similarly , run lora , use another script run shown .", "start , make sure installed ` peft ` .", "also , need specify path model , data , output .", "advise use absolute path pretrained model .", "lora save adapter absolute path adapter configuration json file used finding pretrained model load .", "also , script support bf16 fp16 .", "`` ` bash" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Distributed training\nbash finetune/finetune_lora_ds.sh\n```\n\nIn comparison with full-parameter finetuning, LoRA ([paper](https://arxiv.org/abs/2106.09685)) only updates the parameters of adapter layers but keeps the original large language model layers frozen. This allows much fewer memory costs and thus fewer computation costs. \n\nNote that if you use LoRA to finetune the base language model, e.g., Qwen-7B, instead of chat models, e.g., Qwen-7B-Chat, the script automatically switches the embedding and output layer as trainable parameters. This is because the base language model has no knowledge of special tokens brought by ChatML format. Thus these layers should be updated for the model to understand and predict the tokens. Or in another word, if your training brings in special tokens in LoRA, you should set the layers to trainable parameters by setting `modules_to_save` inside the code. Also, if we have these parameters trainable, it is not available to use ZeRO 3, and this is why we use ZeRO 2 in the script by default. If you do not have new trainable parameters, you can switch to ZeRO 3 by changing the DeepSpeed configuration file. Additionally, we find that there is a significant gap between the memory footprint of LoRA with and without these trainable parameters. Therefore, if you have trouble with memory, we advise you to LoRA finetune the chat models. Check the profile below for more information. \n\nIf you still suffer from insufficient memory, you can consider Q-LoRA ([paper](https://arxiv.org/abs/2305.14314)), which uses the quantized large language model and other techniques such as paged attention to allow even fewer memory costs. \n\nNote: to run single-GPU Q-LoRA training, you may need to install `mpi4py` through `pip` or `conda`.\n\nTo run Q-LoRA, directly run the following script:\n\n```bash\n", "sentence": [ [ "distributed", "training", "bash", "finetune/finetune_lora_ds.sh", "``", "`", "comparison", "full-parameter", "finetuning", ",", "lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ")", "update", "parameter", "adapter", "layer", "keep", "original", "large", "language", "model", "layer", "frozen", ".", "allows", "much", "fewer", "memory", "cost", "thus", "fewer", "computation", "cost", ".", "note", "use", "lora", "finetune", "base", "language", "model", ",", "e.g.", ",", "qwen-7b", ",", "instead", "chat", "model", ",", "e.g.", ",", "qwen-7b-chat", ",", "script", "automatically", "switch", "embedding", "output", "layer", "trainable", "parameter", ".", "base", "language", "model", "knowledge", "special", "token", "brought", "chatml", "format", ".", "thus", "layer", "updated", "model", "understand", "predict", "token", ".", "another", "word", ",", "training", "brings", "special", "token", "lora", ",", "set", "layer", "trainable", "parameter", "setting", "`", "modules_to_save", "`", "inside", "code", ".", "also", ",", "parameter", "trainable", ",", "available", "use", "zero", "3", ",", "use", "zero", "2", "script", "default", ".", "new", "trainable", "parameter", ",", "switch", "zero", "3", "changing", "deepspeed", "configuration", "file", ".", "additionally", ",", "find", "significant", "gap", "memory", "footprint", "lora", "without", "trainable", "parameter", ".", "therefore", ",", "trouble", "memory", ",", "advise", "lora", "finetune", "chat", "model", ".", "check", "profile", "information", ".", "still", "suffer", "insufficient", "memory", ",", "consider", "q-lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ")", ",", "us", "quantized", "large", "language", "model", "technique", "paged", "attention", "allow", "even", "fewer", "memory", "cost", ".", "note", ":", "run", "single-gpu", "q-lora", "training", ",", "may", "need", "install", "`", "mpi4py", "`", "`", "pip", "`", "`", "conda", "`", ".", "run", "q-lora", ",", "directly", "run", "following", "script", ":", "``", "`", "bash" ], [ "distributed training bash finetune/finetune_lora_ds.sh `` ` comparison full-parameter finetuning , lora ( [ paper ] ( http : //arxiv.org/abs/2106.09685 ) ) update parameter adapter layer keep original large language model layer frozen .", "allows much fewer memory cost thus fewer computation cost .", "note use lora finetune base language model , e.g. , qwen-7b , instead chat model , e.g. , qwen-7b-chat , script automatically switch embedding output layer trainable parameter .", "base language model knowledge special token brought chatml format .", "thus layer updated model understand predict token .", "another word , training brings special token lora , set layer trainable parameter setting ` modules_to_save ` inside code .", "also , parameter trainable , available use zero 3 , use zero 2 script default .", "new trainable parameter , switch zero 3 changing deepspeed configuration file .", "additionally , find significant gap memory footprint lora without trainable parameter .", "therefore , trouble memory , advise lora finetune chat model .", "check profile information .", "still suffer insufficient memory , consider q-lora ( [ paper ] ( http : //arxiv.org/abs/2305.14314 ) ) , us quantized large language model technique paged attention allow even fewer memory cost .", "note : run single-gpu q-lora training , may need install ` mpi4py ` ` pip ` ` conda ` .", "run q-lora , directly run following script : `` ` bash" ] ], "token": [ [ "distributed", "training", "bash", "finetune/finetune_lora_ds.sh", "``", "`", "comparison", "full-parameter", "finetuning", ",", "lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ")", "update", "parameter", "adapter", "layer", "keep", "original", "large", "language", "model", "layer", "frozen", ".", "allows", "much", "fewer", "memory", "cost", "thus", "fewer", "computation", "cost", ".", "note", "use", "lora", "finetune", "base", "language", "model", ",", "e.g.", ",", "qwen-7b", ",", "instead", "chat", "model", ",", "e.g.", ",", "qwen-7b-chat", ",", "script", "automatically", "switch", "embedding", "output", "layer", "trainable", "parameter", ".", "base", "language", "model", "knowledge", "special", "token", "brought", "chatml", "format", ".", "thus", "layer", "updated", "model", "understand", "predict", "token", ".", "another", "word", ",", "training", "brings", "special", "token", "lora", ",", "set", "layer", "trainable", "parameter", "setting", "`", "modules_to_save", "`", "inside", "code", ".", "also", ",", "parameter", "trainable", ",", "available", "use", "zero", "3", ",", "use", "zero", "2", "script", "default", ".", "new", "trainable", "parameter", ",", "switch", "zero", "3", "changing", "deepspeed", "configuration", "file", ".", "additionally", ",", "find", "significant", "gap", "memory", "footprint", "lora", "without", "trainable", "parameter", ".", "therefore", ",", "trouble", "memory", ",", "advise", "lora", "finetune", "chat", "model", ".", "check", "profile", "information", ".", "still", "suffer", "insufficient", "memory", ",", "consider", "q-lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ")", ",", "us", "quantized", "large", "language", "model", "technique", "paged", "attention", "allow", "even", "fewer", "memory", "cost", ".", "note", ":", "run", "single-gpu", "q-lora", "training", ",", "may", "need", "install", "`", "mpi4py", "`", "`", "pip", "`", "`", "conda", "`", ".", "run", "q-lora", ",", "directly", "run", "following", "script", ":", "``", "`", "bash" ], [ "distributed training bash finetune/finetune_lora_ds.sh `` ` comparison full-parameter finetuning , lora ( [ paper ] ( http : //arxiv.org/abs/2106.09685 ) ) update parameter adapter layer keep original large language model layer frozen .", "allows much fewer memory cost thus fewer computation cost .", "note use lora finetune base language model , e.g. , qwen-7b , instead chat model , e.g. , qwen-7b-chat , script automatically switch embedding output layer trainable parameter .", "base language model knowledge special token brought chatml format .", "thus layer updated model understand predict token .", "another word , training brings special token lora , set layer trainable parameter setting ` modules_to_save ` inside code .", "also , parameter trainable , available use zero 3 , use zero 2 script default .", "new trainable parameter , switch zero 3 changing deepspeed configuration file .", "additionally , find significant gap memory footprint lora without trainable parameter .", "therefore , trouble memory , advise lora finetune chat model .", "check profile information .", "still suffer insufficient memory , consider q-lora ( [ paper ] ( http : //arxiv.org/abs/2305.14314 ) ) , us quantized large language model technique paged attention allow even fewer memory cost .", "note : run single-gpu q-lora training , may need install ` mpi4py ` ` pip ` ` conda ` .", "run q-lora , directly run following script : `` ` bash" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Quantize Fine-tuned Models\n\nThis section applies to full-parameter/LoRA fine-tuned models. (Note: You do not need to quantize the Q-LoRA fine-tuned model because it is already quantized.)\nIf you use LoRA, please follow the above instructions to merge your model before quantization. \n\nWe recommend using [auto_gptq](https://github.com/PanQiWei/AutoGPTQ) to quantize the finetuned model. \n\n```bash\npip install auto-gptq optimum\n```\n\nNote: Currently AutoGPTQ has a bug referred in [this issue](https://github.com/PanQiWei/AutoGPTQ/issues/370). Here is a [workaround PR](https://github.com/PanQiWei/AutoGPTQ/pull/495), and you can pull this branch and install from the source.\n\nFirst, prepare the calibration data. You can reuse the fine-tuning data, or use other data following the same format.\n\nSecond, run the following script:\n\n```bash\npython run_gptq.py \\\n --model_name_or_path $YOUR_LORA_MODEL_PATH \\\n --data_path $DATA \\\n --out_path $OUTPUT_PATH \\\n --bits 4 ", "sentence": [ [ "quantize", "fine-tuned", "model", "section", "applies", "full-parameter/lora", "fine-tuned", "model", ".", "(", "note", ":", "need", "quantize", "q-lora", "fine-tuned", "model", "already", "quantized", ".", ")", "use", "lora", ",", "please", "follow", "instruction", "merge", "model", "quantization", ".", "recommend", "using", "[", "auto_gptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "quantize", "finetuned", "model", ".", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "note", ":", "currently", "autogptq", "bug", "referred", "[", "issue", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/issues/370", ")", ".", "[", "workaround", "pr", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/pull/495", ")", ",", "pull", "branch", "install", "source", ".", "first", ",", "prepare", "calibration", "data", ".", "reuse", "fine-tuning", "data", ",", "use", "data", "following", "format", ".", "second", ",", "run", "following", "script", ":", "``", "`", "bash", "python", "run_gptq.py", "\\", "--", "model_name_or_path", "$", "your_lora_model_path", "\\", "--", "data_path", "$", "data", "\\", "--", "out_path", "$", "output_path", "\\", "--", "bit", "4" ], [ "quantize fine-tuned model section applies full-parameter/lora fine-tuned model .", "( note : need quantize q-lora fine-tuned model already quantized . )", "use lora , please follow instruction merge model quantization .", "recommend using [ auto_gptq ] ( http : //github.com/panqiwei/autogptq ) quantize finetuned model .", "`` ` bash pip install auto-gptq optimum `` ` note : currently autogptq bug referred [ issue ] ( http : //github.com/panqiwei/autogptq/issues/370 ) .", "[ workaround pr ] ( http : //github.com/panqiwei/autogptq/pull/495 ) , pull branch install source .", "first , prepare calibration data .", "reuse fine-tuning data , use data following format .", "second , run following script : `` ` bash python run_gptq.py \\ -- model_name_or_path $ your_lora_model_path \\ -- data_path $ data \\ -- out_path $ output_path \\ -- bit 4" ] ], "token": [ [ "quantize", "fine-tuned", "model", "section", "applies", "full-parameter/lora", "fine-tuned", "model", ".", "(", "note", ":", "need", "quantize", "q-lora", "fine-tuned", "model", "already", "quantized", ".", ")", "use", "lora", ",", "please", "follow", "instruction", "merge", "model", "quantization", ".", "recommend", "using", "[", "auto_gptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "quantize", "finetuned", "model", ".", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "note", ":", "currently", "autogptq", "bug", "referred", "[", "issue", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/issues/370", ")", ".", "[", "workaround", "pr", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/pull/495", ")", ",", "pull", "branch", "install", "source", ".", "first", ",", "prepare", "calibration", "data", ".", "reuse", "fine-tuning", "data", ",", "use", "data", "following", "format", ".", "second", ",", "run", "following", "script", ":", "``", "`", "bash", "python", "run_gptq.py", "\\", "--", "model_name_or_path", "$", "your_lora_model_path", "\\", "--", "data_path", "$", "data", "\\", "--", "out_path", "$", "output_path", "\\", "--", "bit", "4" ], [ "quantize fine-tuned model section applies full-parameter/lora fine-tuned model .", "( note : need quantize q-lora fine-tuned model already quantized . )", "use lora , please follow instruction merge model quantization .", "recommend using [ auto_gptq ] ( http : //github.com/panqiwei/autogptq ) quantize finetuned model .", "`` ` bash pip install auto-gptq optimum `` ` note : currently autogptq bug referred [ issue ] ( http : //github.com/panqiwei/autogptq/issues/370 ) .", "[ workaround pr ] ( http : //github.com/panqiwei/autogptq/pull/495 ) , pull branch install source .", "first , prepare calibration data .", "reuse fine-tuning data , use data following format .", "second , run following script : `` ` bash python run_gptq.py \\ -- model_name_or_path $ your_lora_model_path \\ -- data_path $ data \\ -- out_path $ output_path \\ -- bit 4" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Profiling of Memory and Speed\nWe profile the GPU memory and training speed of both LoRA (LoRA (emb) refers to training the embedding and output layer, while LoRA has no trainable embedding and output layer) and Q-LoRA in the setup of single-GPU training. In this test, we experiment on a single A100-SXM4-80G GPU, and we use CUDA 11.8 and Pytorch 2.0. Flash attention 2 is applied. We uniformly use a batch size of 1 and gradient accumulation of 8. We profile the memory (GB) and speed (s/iter) of inputs of different lengths, namely 256, 512, 1024, 2048, 4096, and 8192. We also report the statistics of full-parameter finetuning with Qwen-7B on 2 A100 GPUs. We only report the statistics of 256, 512, and 1024 tokens due to the limitation of GPU memory. \n\nFor Qwen-7B, we also test the performance of multinode finetuning. We experiment using two servers, each containing two A100-SXM4-80G GPUs, and the rest of configurations are the same as other Qwen-7B experiments. The results of multinode finetuning are marked as LoRA (multinode) in the table.\n\nFor Qwen-72B, we experiment in two ways: 1) Lora fintuning + DeepSpeed ZeRO 3 on 4 A100-SXM4-80G GPUs and 2) QLora (int4) fine-tuning on a single A100-SXM4-80G GPU. Note that OOM occurs on 4 A100-SXM4-80G GPUs both with LoRA (emb) fine-tuning and LoRA fine-tuning without Deepspeed ZeRO 3 (you can pass `--deepspeed finetune/ds_config_zero3.json` to [`finetune/finetune_lora_ds.sh`](finetune/finetune_lora_ds.sh) to enable DeepSpeed ZeRO 3).\n\nThe statistics are listed below:\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Model SizeMethod#Nodes#GPUs per nodeSequence Length
2565121024204840968192
1.8BLoRA116.7G / 1.0s/it7.4G / 1.0s/it8.4G / 1.1s/it11.0G / 1.7s/it16.2G / 3.3s/it21.8G / 6.8s/it
LoRA (emb)1113.7G / 1.0s/it14.0G / 1.0s/it14.0G / 1.1s/it15.1G / 1.8s/it19.7G / 3.4s/it27.7G / 7.0s/it
Q-LoRA115.8G / 1.4s/it6.0G / 1.4s/it6.6G / 1.4s/it7.8G / 2.0s/it10.2G / 3.4s/it15.8G / 6.5s/it
Full-parameter1143.5G / 2.1s/it43.5G / 2.2s/it43.5G / 2.2s/it43.5G / 2.3s/it47.1G / 2.8s/it48.3G / 5.6s/it
7BLoRA1120.1G / 1.2s/it20.4G / 1.5s/it21.5G / 2.8s/it23.8G / 5.2s/it29.7G / 10.1s/it36.6G / 21.3s/it
LoRA (emb)1133.7G / 1.4s/it34.1G / 1.6s/it35.2G / 2.9s/it35.1G / 5.3s/it39.2G / 10.3s/it48.5G / 21.7s/it
Q-LoRA1111.5G / 3.0s/it11.5G / 3.0s/it12.3G / 3.5s/it13.9G / 7.0s/it16.9G / 11.6s/it23.5G / 22.3s/it
Full-parameter12139.2G / 4.0s/it148.0G / 4.0s/it162.0G / 4.5s/it---
LoRA (multinode)2274.7G / 2.09s/it77.6G / 3.16s/it84.9G / 5.17s/it95.1G / 9.25s/it121.1G / 18.1s/it155.5G / 37.4s/it
14BLoRA1134.6G / 1.6s/it35.1G / 2.4s/it35.3G / 4.4s/it37.4G / 8.4s/it42.5G / 17.0s/it55.2G / 36.0s/it
LoRA (emb)1151.2 / 1.7s/it51.1G / 2.6s/it51.5G / 4.6s/it54.1G / 8.6s/it56.8G / 17.2s/it67.7G / 36.3s/it
Q-LoRA1118.7G / 5.3s/it18.4G / 6.3s/it18.9G / 8.2s/it19.9G / 11.8s/it23.0G / 20.1s/it27.9G / 38.3s/it
72BLoRA + Deepspeed Zero314215.4G / 17.6s/it217.7G / 20.5s/it222.6G / 29.4s/it228.8G / 45.7s/it249.0G / 83.4s/it289.2G / 161.5s/it
Q-LoRA1161.4G / 27.4s/it61.4G / 31.5s/it62.9G / 41.4s/it64.1G / 59.5s/it68.0G / 97.7s/it75.6G / 179.8s/it
\n\n
\n\n", "sentence": [ [ "profiling", "memory", "speed", "profile", "gpu", "memory", "training", "speed", "lora", "(", "lora", "(", "emb", ")", "refers", "training", "embedding", "output", "layer", ",", "lora", "trainable", "embedding", "output", "layer", ")", "q-lora", "setup", "single-gpu", "training", ".", "test", ",", "experiment", "single", "a100-sxm4-80g", "gpu", ",", "use", "cuda", "11.8", "pytorch", "2.0", ".", "flash", "attention", "2", "applied", ".", "uniformly", "use", "batch", "size", "1", "gradient", "accumulation", "8", ".", "profile", "memory", "(", "gb", ")", "speed", "(", "s/iter", ")", "input", "different", "length", ",", "namely", "256", ",", "512", ",", "1024", ",", "2048", ",", "4096", ",", "8192", ".", "also", "report", "statistic", "full-parameter", "finetuning", "qwen-7b", "2", "a100", "gpus", ".", "report", "statistic", "256", ",", "512", ",", "1024", "token", "due", "limitation", "gpu", "memory", ".", "qwen-7b", ",", "also", "test", "performance", "multinode", "finetuning", ".", "experiment", "using", "two", "server", ",", "containing", "two", "a100-sxm4-80g", "gpus", ",", "rest", "configuration", "qwen-7b", "experiment", ".", "result", "multinode", "finetuning", "marked", "lora", "(", "multinode", ")", "table", ".", "qwen-72b", ",", "experiment", "two", "way", ":", "1", ")", "lora", "fintuning", "+", "deepspeed", "zero", "3", "4", "a100-sxm4-80g", "gpus", "2", ")", "qlora", "(", "int4", ")", "fine-tuning", "single", "a100-sxm4-80g", "gpu", ".", "note", "oom", "occurs", "4", "a100-sxm4-80g", "gpus", "lora", "(", "emb", ")", "fine-tuning", "lora", "fine-tuning", "without", "deepspeed", "zero", "3", "(", "pas", "`", "--", "deepspeed", "finetune/ds_config_zero3.json", "`", "[", "`", "finetune/finetune_lora_ds.sh", "`", "]", "(", "finetune/finetune_lora_ds.sh", ")", "enable", "deepspeed", "zero", "3", ")", ".", "statistic", "listed", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "model", "size", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "method", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "node", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "gpus", "per", "node", "<", "/th", ">", "<", "th", "colspan=", "''", "6", "''", "align=", "''", "center", "''", ">", "sequence", "length", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "256", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "512", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "1024", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "2048", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "4096", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "8192", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "4", "''", ">", "1.8b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.4g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.4g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.0g", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.2g", "/", "3.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.8g", "/", "6.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.1g", "/", "1.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.7g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.7g", "/", "7.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.8g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.0g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.6g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.8g", "/", "2.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.2g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.8g", "/", "6.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.1g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3g", "/", "5.6s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "5", "''", ">", "7b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.1g", "/", "1.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.4g", "/", "1.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8g", "/", "5.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "29.7g", "/", "10.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "36.6g", "/", "21.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.7g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.1g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.2g", "/", "2.9s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "39.2g", "/", "10.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.5g", "/", "21.7s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "12.3g", "/", "3.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.9g", "/", "7.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.9g", "/", "11.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.5g", "/", "22.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "139.2g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "148.0g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "162.0g", "/", "4.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "multinode", ")", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.7g", "/", "2.09s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "77.6g", "/", "3.16s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "84.9g", "/", "5.17s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.1g", "/", "9.25s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "121.1g", "/", "18.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "155.5g", "/", "37.4s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "3", "''", ">", "14b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "2.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.3g", "/", "4.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "37.4g", "/", "8.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "42.5g", "/", "17.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.2g", "/", "36.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.2", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.1g", "/", "2.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.5g", "/", "4.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.1g", "/", "8.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "56.8g", "/", "17.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.7g", "/", "36.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.7g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.4g", "/", "6.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.9g", "/", "8.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.9g", "/", "11.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.0g", "/", "20.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.9g", "/", "38.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "72b", "<", "/th", ">", "<", "td", ">", "lora", "+", "deepspeed", "zero3", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "215.4g", "/", "17.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "217.7g", "/", "20.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "222.6g", "/", "29.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "228.8g", "/", "45.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "249.0g", "/", "83.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "289.2g", "/", "161.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "27.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "31.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "62.9g", "/", "41.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "64.1g", "/", "59.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "68.0g", "/", "97.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "75.6g", "/", "179.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "br", ">" ], [ "profiling memory speed profile gpu memory training speed lora ( lora ( emb ) refers training embedding output layer , lora trainable embedding output layer ) q-lora setup single-gpu training .", "test , experiment single a100-sxm4-80g gpu , use cuda 11.8 pytorch 2.0 .", "flash attention 2 applied .", "uniformly use batch size 1 gradient accumulation 8 .", "profile memory ( gb ) speed ( s/iter ) input different length , namely 256 , 512 , 1024 , 2048 , 4096 , 8192 .", "also report statistic full-parameter finetuning qwen-7b 2 a100 gpus .", "report statistic 256 , 512 , 1024 token due limitation gpu memory .", "qwen-7b , also test performance multinode finetuning .", "experiment using two server , containing two a100-sxm4-80g gpus , rest configuration qwen-7b experiment .", "result multinode finetuning marked lora ( multinode ) table .", "qwen-72b , experiment two way : 1 ) lora fintuning + deepspeed zero 3 4 a100-sxm4-80g gpus 2 ) qlora ( int4 ) fine-tuning single a100-sxm4-80g gpu .", "note oom occurs 4 a100-sxm4-80g gpus lora ( emb ) fine-tuning lora fine-tuning without deepspeed zero 3 ( pas ` -- deepspeed finetune/ds_config_zero3.json ` [ ` finetune/finetune_lora_ds.sh ` ] ( finetune/finetune_lora_ds.sh ) enable deepspeed zero 3 ) .", "statistic listed : < table > < tr > < th rowspan= '' 2 '' > model size < /th > < th rowspan= '' 2 '' > method < /th > < th rowspan= '' 2 '' > # node < /th > < th rowspan= '' 2 '' > # gpus per node < /th > < th colspan= '' 6 '' align= '' center '' > sequence length < /th > < /tr > < tr > < th align= '' center '' > 256 < /th > < th align= '' center '' > 512 < /th > < th align= '' center '' > 1024 < /th > < th align= '' center '' > 2048 < /th > < th align= '' center '' > 4096 < /th > < th align= '' center '' > 8192 < /th > < /tr > < tr > < th rowspan= '' 4 '' > 1.8b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 6.7g / 1.0s/it < /td > < td align= '' center '' > 7.4g / 1.0s/it < /td > < td align= '' center '' > 8.4g / 1.1s/it < /td > < td align= '' center '' > 11.0g / 1.7s/it < /td > < td align= '' center '' > 16.2g / 3.3s/it < /td > < td align= '' center '' > 21.8g / 6.8s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 13.7g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.1s/it < /td > < td align= '' center '' > 15.1g / 1.8s/it < /td > < td align= '' center '' > 19.7g / 3.4s/it < /td > < td align= '' center '' > 27.7g / 7.0s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 5.8g / 1.4s/it < /td > < td align= '' center '' > 6.0g / 1.4s/it < /td > < td align= '' center '' > 6.6g / 1.4s/it < /td > < td align= '' center '' > 7.8g / 2.0s/it < /td > < td align= '' center '' > 10.2g / 3.4s/it < /td > < td align= '' center '' > 15.8g / 6.5s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 43.5g / 2.1s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.3s/it < /td > < td align= '' center '' > 47.1g / 2.8s/it < /td > < td align= '' center '' > 48.3g / 5.6s/it < /td > < /tr > < tr > < th rowspan= '' 5 '' > 7b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 20.1g / 1.2s/it < /td > < td align= '' center '' > 20.4g / 1.5s/it < /td > < td align= '' center '' > 21.5g / 2.8s/it < /td > < td align= '' center '' > 23.8g / 5.2s/it < /td > < td align= '' center '' > 29.7g / 10.1s/it < /td > < td align= '' center '' > 36.6g / 21.3s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 33.7g / 1.4s/it < /td > < td align= '' center '' > 34.1g / 1.6s/it < /td > < td align= '' center '' > 35.2g / 2.9s/it < /td > < td align= '' center '' > 35.1g / 5.3s/it < /td > < td align= '' center '' > 39.2g / 10.3s/it < /td > < td align= '' center '' > 48.5g / 21.7s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 12.3g / 3.5s/it < /td > < td align= '' center '' > 13.9g / 7.0s/it < /td > < td align= '' center '' > 16.9g / 11.6s/it < /td > < td align= '' center '' > 23.5g / 22.3s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 2 < /td > < td align= '' center '' > 139.2g / 4.0s/it < /td > < td align= '' center '' > 148.0g / 4.0s/it < /td > < td align= '' center '' > 162.0g / 4.5s/it < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < /tr > < tr > < td > lora ( multinode ) < /td > < td > 2 < /td > < td > 2 < /td > < td align= '' center '' > 74.7g / 2.09s/it < /td > < td align= '' center '' > 77.6g / 3.16s/it < /td > < td align= '' center '' > 84.9g / 5.17s/it < /td > < td align= '' center '' > 95.1g / 9.25s/it < /td > < td align= '' center '' > 121.1g / 18.1s/it < /td > < td align= '' center '' > 155.5g / 37.4s/it < /td > < /tr > < tr > < th rowspan= '' 3 '' > 14b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 34.6g / 1.6s/it < /td > < td align= '' center '' > 35.1g / 2.4s/it < /td > < td align= '' center '' > 35.3g / 4.4s/it < /td > < td align= '' center '' > 37.4g / 8.4s/it < /td > < td align= '' center '' > 42.5g / 17.0s/it < /td > < td align= '' center '' > 55.2g / 36.0s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 51.2 / 1.7s/it < /td > < td align= '' center '' > 51.1g / 2.6s/it < /td > < td align= '' center '' > 51.5g / 4.6s/it < /td > < td align= '' center '' > 54.1g / 8.6s/it < /td > < td align= '' center '' > 56.8g / 17.2s/it < /td > < td align= '' center '' > 67.7g / 36.3s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 18.7g / 5.3s/it < /td > < td align= '' center '' > 18.4g / 6.3s/it < /td > < td align= '' center '' > 18.9g / 8.2s/it < /td > < td align= '' center '' > 19.9g / 11.8s/it < /td > < td align= '' center '' > 23.0g / 20.1s/it < /td > < td align= '' center '' > 27.9g / 38.3s/it < /td > < /tr > < tr > < th rowspan= '' 2 '' > 72b < /th > < td > lora + deepspeed zero3 < /td > < td > 1 < /td > < td > 4 < /td > < td align= '' center '' > 215.4g / 17.6s/it < /td > < td align= '' center '' > 217.7g / 20.5s/it < /td > < td align= '' center '' > 222.6g / 29.4s/it < /td > < td align= '' center '' > 228.8g / 45.7s/it < /td > < td align= '' center '' > 249.0g / 83.4s/it < /td > < td align= '' center '' > 289.2g / 161.5s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 61.4g / 27.4s/it < /td > < td align= '' center '' > 61.4g / 31.5s/it < /td > < td align= '' center '' > 62.9g / 41.4s/it < /td > < td align= '' center '' > 64.1g / 59.5s/it < /td > < td align= '' center '' > 68.0g / 97.7s/it < /td > < td align= '' center '' > 75.6g / 179.8s/it < /td > < /tr > < /table > < br >" ] ], "token": [ [ "profiling", "memory", "speed", "profile", "gpu", "memory", "training", "speed", "lora", "(", "lora", "(", "emb", ")", "refers", "training", "embedding", "output", "layer", ",", "lora", "trainable", "embedding", "output", "layer", ")", "q-lora", "setup", "single-gpu", "training", ".", "test", ",", "experiment", "single", "a100-sxm4-80g", "gpu", ",", "use", "cuda", "11.8", "pytorch", "2.0", ".", "flash", "attention", "2", "applied", ".", "uniformly", "use", "batch", "size", "1", "gradient", "accumulation", "8", ".", "profile", "memory", "(", "gb", ")", "speed", "(", "s/iter", ")", "input", "different", "length", ",", "namely", "256", ",", "512", ",", "1024", ",", "2048", ",", "4096", ",", "8192", ".", "also", "report", "statistic", "full-parameter", "finetuning", "qwen-7b", "2", "a100", "gpus", ".", "report", "statistic", "256", ",", "512", ",", "1024", "token", "due", "limitation", "gpu", "memory", ".", "qwen-7b", ",", "also", "test", "performance", "multinode", "finetuning", ".", "experiment", "using", "two", "server", ",", "containing", "two", "a100-sxm4-80g", "gpus", ",", "rest", "configuration", "qwen-7b", "experiment", ".", "result", "multinode", "finetuning", "marked", "lora", "(", "multinode", ")", "table", ".", "qwen-72b", ",", "experiment", "two", "way", ":", "1", ")", "lora", "fintuning", "+", "deepspeed", "zero", "3", "4", "a100-sxm4-80g", "gpus", "2", ")", "qlora", "(", "int4", ")", "fine-tuning", "single", "a100-sxm4-80g", "gpu", ".", "note", "oom", "occurs", "4", "a100-sxm4-80g", "gpus", "lora", "(", "emb", ")", "fine-tuning", "lora", "fine-tuning", "without", "deepspeed", "zero", "3", "(", "pas", "`", "--", "deepspeed", "finetune/ds_config_zero3.json", "`", "[", "`", "finetune/finetune_lora_ds.sh", "`", "]", "(", "finetune/finetune_lora_ds.sh", ")", "enable", "deepspeed", "zero", "3", ")", ".", "statistic", "listed", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "model", "size", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "method", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "node", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "gpus", "per", "node", "<", "/th", ">", "<", "th", "colspan=", "''", "6", "''", "align=", "''", "center", "''", ">", "sequence", "length", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "256", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "512", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "1024", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "2048", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "4096", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "8192", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "4", "''", ">", "1.8b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.4g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.4g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.0g", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.2g", "/", "3.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.8g", "/", "6.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.1g", "/", "1.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.7g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.7g", "/", "7.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.8g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.0g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.6g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.8g", "/", "2.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.2g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.8g", "/", "6.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.1g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3g", "/", "5.6s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "5", "''", ">", "7b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.1g", "/", "1.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.4g", "/", "1.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8g", "/", "5.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "29.7g", "/", "10.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "36.6g", "/", "21.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.7g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.1g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.2g", "/", "2.9s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "39.2g", "/", "10.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.5g", "/", "21.7s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "12.3g", "/", "3.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.9g", "/", "7.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.9g", "/", "11.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.5g", "/", "22.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "139.2g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "148.0g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "162.0g", "/", "4.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "multinode", ")", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.7g", "/", "2.09s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "77.6g", "/", "3.16s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "84.9g", "/", "5.17s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.1g", "/", "9.25s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "121.1g", "/", "18.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "155.5g", "/", "37.4s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "3", "''", ">", "14b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "2.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.3g", "/", "4.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "37.4g", "/", "8.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "42.5g", "/", "17.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.2g", "/", "36.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.2", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.1g", "/", "2.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.5g", "/", "4.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.1g", "/", "8.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "56.8g", "/", "17.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.7g", "/", "36.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.7g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.4g", "/", "6.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.9g", "/", "8.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.9g", "/", "11.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.0g", "/", "20.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.9g", "/", "38.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "72b", "<", "/th", ">", "<", "td", ">", "lora", "+", "deepspeed", "zero3", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "215.4g", "/", "17.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "217.7g", "/", "20.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "222.6g", "/", "29.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "228.8g", "/", "45.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "249.0g", "/", "83.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "289.2g", "/", "161.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "27.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "31.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "62.9g", "/", "41.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "64.1g", "/", "59.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "68.0g", "/", "97.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "75.6g", "/", "179.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "br", ">" ], [ "profiling memory speed profile gpu memory training speed lora ( lora ( emb ) refers training embedding output layer , lora trainable embedding output layer ) q-lora setup single-gpu training .", "test , experiment single a100-sxm4-80g gpu , use cuda 11.8 pytorch 2.0 .", "flash attention 2 applied .", "uniformly use batch size 1 gradient accumulation 8 .", "profile memory ( gb ) speed ( s/iter ) input different length , namely 256 , 512 , 1024 , 2048 , 4096 , 8192 .", "also report statistic full-parameter finetuning qwen-7b 2 a100 gpus .", "report statistic 256 , 512 , 1024 token due limitation gpu memory .", "qwen-7b , also test performance multinode finetuning .", "experiment using two server , containing two a100-sxm4-80g gpus , rest configuration qwen-7b experiment .", "result multinode finetuning marked lora ( multinode ) table .", "qwen-72b , experiment two way : 1 ) lora fintuning + deepspeed zero 3 4 a100-sxm4-80g gpus 2 ) qlora ( int4 ) fine-tuning single a100-sxm4-80g gpu .", "note oom occurs 4 a100-sxm4-80g gpus lora ( emb ) fine-tuning lora fine-tuning without deepspeed zero 3 ( pas ` -- deepspeed finetune/ds_config_zero3.json ` [ ` finetune/finetune_lora_ds.sh ` ] ( finetune/finetune_lora_ds.sh ) enable deepspeed zero 3 ) .", "statistic listed : < table > < tr > < th rowspan= '' 2 '' > model size < /th > < th rowspan= '' 2 '' > method < /th > < th rowspan= '' 2 '' > # node < /th > < th rowspan= '' 2 '' > # gpus per node < /th > < th colspan= '' 6 '' align= '' center '' > sequence length < /th > < /tr > < tr > < th align= '' center '' > 256 < /th > < th align= '' center '' > 512 < /th > < th align= '' center '' > 1024 < /th > < th align= '' center '' > 2048 < /th > < th align= '' center '' > 4096 < /th > < th align= '' center '' > 8192 < /th > < /tr > < tr > < th rowspan= '' 4 '' > 1.8b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 6.7g / 1.0s/it < /td > < td align= '' center '' > 7.4g / 1.0s/it < /td > < td align= '' center '' > 8.4g / 1.1s/it < /td > < td align= '' center '' > 11.0g / 1.7s/it < /td > < td align= '' center '' > 16.2g / 3.3s/it < /td > < td align= '' center '' > 21.8g / 6.8s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 13.7g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.1s/it < /td > < td align= '' center '' > 15.1g / 1.8s/it < /td > < td align= '' center '' > 19.7g / 3.4s/it < /td > < td align= '' center '' > 27.7g / 7.0s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 5.8g / 1.4s/it < /td > < td align= '' center '' > 6.0g / 1.4s/it < /td > < td align= '' center '' > 6.6g / 1.4s/it < /td > < td align= '' center '' > 7.8g / 2.0s/it < /td > < td align= '' center '' > 10.2g / 3.4s/it < /td > < td align= '' center '' > 15.8g / 6.5s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 43.5g / 2.1s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.3s/it < /td > < td align= '' center '' > 47.1g / 2.8s/it < /td > < td align= '' center '' > 48.3g / 5.6s/it < /td > < /tr > < tr > < th rowspan= '' 5 '' > 7b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 20.1g / 1.2s/it < /td > < td align= '' center '' > 20.4g / 1.5s/it < /td > < td align= '' center '' > 21.5g / 2.8s/it < /td > < td align= '' center '' > 23.8g / 5.2s/it < /td > < td align= '' center '' > 29.7g / 10.1s/it < /td > < td align= '' center '' > 36.6g / 21.3s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 33.7g / 1.4s/it < /td > < td align= '' center '' > 34.1g / 1.6s/it < /td > < td align= '' center '' > 35.2g / 2.9s/it < /td > < td align= '' center '' > 35.1g / 5.3s/it < /td > < td align= '' center '' > 39.2g / 10.3s/it < /td > < td align= '' center '' > 48.5g / 21.7s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 12.3g / 3.5s/it < /td > < td align= '' center '' > 13.9g / 7.0s/it < /td > < td align= '' center '' > 16.9g / 11.6s/it < /td > < td align= '' center '' > 23.5g / 22.3s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 2 < /td > < td align= '' center '' > 139.2g / 4.0s/it < /td > < td align= '' center '' > 148.0g / 4.0s/it < /td > < td align= '' center '' > 162.0g / 4.5s/it < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < /tr > < tr > < td > lora ( multinode ) < /td > < td > 2 < /td > < td > 2 < /td > < td align= '' center '' > 74.7g / 2.09s/it < /td > < td align= '' center '' > 77.6g / 3.16s/it < /td > < td align= '' center '' > 84.9g / 5.17s/it < /td > < td align= '' center '' > 95.1g / 9.25s/it < /td > < td align= '' center '' > 121.1g / 18.1s/it < /td > < td align= '' center '' > 155.5g / 37.4s/it < /td > < /tr > < tr > < th rowspan= '' 3 '' > 14b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 34.6g / 1.6s/it < /td > < td align= '' center '' > 35.1g / 2.4s/it < /td > < td align= '' center '' > 35.3g / 4.4s/it < /td > < td align= '' center '' > 37.4g / 8.4s/it < /td > < td align= '' center '' > 42.5g / 17.0s/it < /td > < td align= '' center '' > 55.2g / 36.0s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 51.2 / 1.7s/it < /td > < td align= '' center '' > 51.1g / 2.6s/it < /td > < td align= '' center '' > 51.5g / 4.6s/it < /td > < td align= '' center '' > 54.1g / 8.6s/it < /td > < td align= '' center '' > 56.8g / 17.2s/it < /td > < td align= '' center '' > 67.7g / 36.3s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 18.7g / 5.3s/it < /td > < td align= '' center '' > 18.4g / 6.3s/it < /td > < td align= '' center '' > 18.9g / 8.2s/it < /td > < td align= '' center '' > 19.9g / 11.8s/it < /td > < td align= '' center '' > 23.0g / 20.1s/it < /td > < td align= '' center '' > 27.9g / 38.3s/it < /td > < /tr > < tr > < th rowspan= '' 2 '' > 72b < /th > < td > lora + deepspeed zero3 < /td > < td > 1 < /td > < td > 4 < /td > < td align= '' center '' > 215.4g / 17.6s/it < /td > < td align= '' center '' > 217.7g / 20.5s/it < /td > < td align= '' center '' > 222.6g / 29.4s/it < /td > < td align= '' center '' > 228.8g / 45.7s/it < /td > < td align= '' center '' > 249.0g / 83.4s/it < /td > < td align= '' center '' > 289.2g / 161.5s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 61.4g / 27.4s/it < /td > < td align= '' center '' > 61.4g / 31.5s/it < /td > < td align= '' center '' > 62.9g / 41.4s/it < /td > < td align= '' center '' > 64.1g / 59.5s/it < /td > < td align= '' center '' > 68.0g / 97.7s/it < /td > < td align= '' center '' > 75.6g / 179.8s/it < /td > < /tr > < /table > < br >" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "vLLM \n\nFor deployment and fast inference, we suggest using vLLM. \n\nIf you use cuda 12.1 and pytorch 2.1, you can directly use the following command to install vLLM.\n\n```bash\n", "sentence": [ [ "vllm", "deployment", "fast", "inference", ",", "suggest", "using", "vllm", ".", "use", "cuda", "12.1", "pytorch", "2.1", ",", "directly", "use", "following", "command", "install", "vllm", ".", "``", "`", "bash" ], [ "vllm deployment fast inference , suggest using vllm .", "use cuda 12.1 pytorch 2.1 , directly use following command install vllm .", "`` ` bash" ] ], "token": [ [ "vllm", "deployment", "fast", "inference", ",", "suggest", "using", "vllm", ".", "use", "cuda", "12.1", "pytorch", "2.1", ",", "directly", "use", "following", "command", "install", "vllm", ".", "``", "`", "bash" ], [ "vllm deployment fast inference , suggest using vllm .", "use cuda 12.1 pytorch 2.1 , directly use following command install vllm .", "`` ` bash" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "pip install vllm ", "sentence": [ [ "pip", "install", "vllm" ], [ "pip install vllm" ] ], "token": [ [ "pip", "install", "vllm" ], [ "pip install vllm" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "The below lines support int4 quantization (int8 will be supported soon). The installation are slower (~10 minutes).\ngit clone https://github.com/QwenLM/vllm-gptq\ncd vllm-gptq\npip install -e .\n```\n\nOtherwise, please refer to the official vLLM [Installation Instructions](https://docs.vllm.ai/en/latest/getting_started/installation.html), or our [vLLM repo for GPTQ quantization](https://github.com/QwenLM/vllm-gptq).\n\n", "sentence": [ [ "line", "support", "int4", "quantization", "(", "int8", "supported", "soon", ")", ".", "installation", "slower", "(", "~10", "minute", ")", ".", "git", "clone", "http", ":", "//github.com/qwenlm/vllm-gptq", "cd", "vllm-gptq", "pip", "install", "-e", ".", "``", "`", "otherwise", ",", "please", "refer", "official", "vllm", "[", "installation", "instruction", "]", "(", "http", ":", "//docs.vllm.ai/en/latest/getting_started/installation.html", ")", ",", "[", "vllm", "repo", "gptq", "quantization", "]", "(", "http", ":", "//github.com/qwenlm/vllm-gptq", ")", "." ], [ "line support int4 quantization ( int8 supported soon ) .", "installation slower ( ~10 minute ) .", "git clone http : //github.com/qwenlm/vllm-gptq cd vllm-gptq pip install -e .", "`` ` otherwise , please refer official vllm [ installation instruction ] ( http : //docs.vllm.ai/en/latest/getting_started/installation.html ) , [ vllm repo gptq quantization ] ( http : //github.com/qwenlm/vllm-gptq ) ." ] ], "token": [ [ "line", "support", "int4", "quantization", "(", "int8", "supported", "soon", ")", ".", "installation", "slower", "(", "~10", "minute", ")", ".", "git", "clone", "http", ":", "//github.com/qwenlm/vllm-gptq", "cd", "vllm-gptq", "pip", "install", "-e", ".", "``", "`", "otherwise", ",", "please", "refer", "official", "vllm", "[", "installation", "instruction", "]", "(", "http", ":", "//docs.vllm.ai/en/latest/getting_started/installation.html", ")", ",", "[", "vllm", "repo", "gptq", "quantization", "]", "(", "http", ":", "//github.com/qwenlm/vllm-gptq", ")", "." ], [ "line support int4 quantization ( int8 supported soon ) .", "installation slower ( ~10 minute ) .", "git clone http : //github.com/qwenlm/vllm-gptq cd vllm-gptq pip install -e .", "`` ` otherwise , please refer official vllm [ installation instruction ] ( http : //docs.vllm.ai/en/latest/getting_started/installation.html ) , [ vllm repo gptq quantization ] ( http : //github.com/qwenlm/vllm-gptq ) ." ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "vLLM + Web Demo / OpenAI-like API\n\nYou can use FastChat to lauch a web demo or an OpenAI API server. First, install FastChat:\n\n```bash\npip install \"fschat[model_worker,webui]\"\n```\n\nTo run Qwen with vLLM and FastChat, you need launch a controller by:\n```bash\npython -m fastchat.serve.controller\n```\n\nThen you can launch the model worker, which means loading your model for inference. For single GPU inference, you can directly run:\n```bash\npython -m fastchat.serve.vllm_worker --model-path $model_path --trust-remote-code --dtype bfloat16\n", "sentence": [ [ "vllm", "+", "web", "demo", "/", "openai-like", "api", "use", "fastchat", "lauch", "web", "demo", "openai", "api", "server", ".", "first", ",", "install", "fastchat", ":", "``", "`", "bash", "pip", "install", "``", "fschat", "[", "model_worker", ",", "webui", "]", "''", "``", "`", "run", "qwen", "vllm", "fastchat", ",", "need", "launch", "controller", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.controller", "``", "`", "launch", "model", "worker", ",", "mean", "loading", "model", "inference", ".", "single", "gpu", "inference", ",", "directly", "run", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.vllm_worker", "--", "model-path", "$", "model_path", "--", "trust-remote-code", "--", "dtype", "bfloat16" ], [ "vllm + web demo / openai-like api use fastchat lauch web demo openai api server .", "first , install fastchat : `` ` bash pip install `` fschat [ model_worker , webui ] '' `` ` run qwen vllm fastchat , need launch controller : `` ` bash python -m fastchat.serve.controller `` ` launch model worker , mean loading model inference .", "single gpu inference , directly run : `` ` bash python -m fastchat.serve.vllm_worker -- model-path $ model_path -- trust-remote-code -- dtype bfloat16" ] ], "token": [ [ "vllm", "+", "web", "demo", "/", "openai-like", "api", "use", "fastchat", "lauch", "web", "demo", "openai", "api", "server", ".", "first", ",", "install", "fastchat", ":", "``", "`", "bash", "pip", "install", "``", "fschat", "[", "model_worker", ",", "webui", "]", "''", "``", "`", "run", "qwen", "vllm", "fastchat", ",", "need", "launch", "controller", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.controller", "``", "`", "launch", "model", "worker", ",", "mean", "loading", "model", "inference", ".", "single", "gpu", "inference", ",", "directly", "run", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.vllm_worker", "--", "model-path", "$", "model_path", "--", "trust-remote-code", "--", "dtype", "bfloat16" ], [ "vllm + web demo / openai-like api use fastchat lauch web demo openai api server .", "first , install fastchat : `` ` bash pip install `` fschat [ model_worker , webui ] '' `` ` run qwen vllm fastchat , need launch controller : `` ` bash python -m fastchat.serve.controller `` ` launch model worker , mean loading model inference .", "single gpu inference , directly run : `` ` bash python -m fastchat.serve.vllm_worker -- model-path $ model_path -- trust-remote-code -- dtype bfloat16" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Web UI\n\nWe provide code for users to build a web UI demo (thanks to @wysaid). Before you start, make sure you install the following packages:\n\n```\npip install -r requirements_web_demo.txt\n```\n\nThen run the command below and click on the generated link:\n\n```bash\npython web_demo.py\n```\n\n

\n
\n \n
\n

\n\n", "sentence": [ [ "web", "ui", "provide", "code", "user", "build", "web", "ui", "demo", "(", "thanks", "@", "wysaid", ")", ".", "start", ",", "make", "sure", "install", "following", "package", ":", "``", "`", "pip", "install", "-r", "requirements_web_demo.txt", "``", "`", "run", "command", "click", "generated", "link", ":", "``", "`", "bash", "python", "web_demo.py", "``", "`", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/web_demo.gif", "''", "width=", "''", "600", "''", "/", ">", "<", "br", ">", "<", "p", ">" ], [ "web ui provide code user build web ui demo ( thanks @ wysaid ) .", "start , make sure install following package : `` ` pip install -r requirements_web_demo.txt `` ` run command click generated link : `` ` bash python web_demo.py `` ` < p align= '' center '' > < br > < img src= '' assets/web_demo.gif '' width= '' 600 '' / > < br > < p >" ] ], "token": [ [ "web", "ui", "provide", "code", "user", "build", "web", "ui", "demo", "(", "thanks", "@", "wysaid", ")", ".", "start", ",", "make", "sure", "install", "following", "package", ":", "``", "`", "pip", "install", "-r", "requirements_web_demo.txt", "``", "`", "run", "command", "click", "generated", "link", ":", "``", "`", "bash", "python", "web_demo.py", "``", "`", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/web_demo.gif", "''", "width=", "''", "600", "''", "/", ">", "<", "br", ">", "<", "p", ">" ], [ "web ui provide code user build web ui demo ( thanks @ wysaid ) .", "start , make sure install following package : `` ` pip install -r requirements_web_demo.txt `` ` run command click generated link : `` ` bash python web_demo.py `` ` < p align= '' center '' > < br > < img src= '' assets/web_demo.gif '' width= '' 600 '' / > < br > < p >" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "API\n\nWe provide methods to deploy local API based on OpenAI API (thanks to @hanpenggit). Before you start, install the required packages:\n\n```bash\npip install fastapi uvicorn \"openai<1.0\" pydantic sse_starlette\n```\n\nThen run the command to deploy your API:\n\n```bash\npython openai_api.py\n```\n\nYou can change your arguments, e.g., `-c` for checkpoint name or path, `--cpu-only` for CPU deployment, etc. If you meet problems launching your API deployment, updating the packages to the latest version can probably solve them.\n\nUsing the API is also simple. See the example below:\n\n```python\nimport openai\nopenai.api_base = \"http://localhost:8000/v1\"\nopenai.api_key = \"none\"\n\n", "sentence": [ [ "api", "provide", "method", "deploy", "local", "api", "based", "openai", "api", "(", "thanks", "@", "hanpenggit", ")", ".", "start", ",", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "fastapi", "uvicorn", "``", "openai", "<", "1.0", "''", "pydantic", "sse_starlette", "``", "`", "run", "command", "deploy", "api", ":", "``", "`", "bash", "python", "openai_api.py", "``", "`", "change", "argument", ",", "e.g.", ",", "`", "-c", "`", "checkpoint", "name", "path", ",", "`", "--", "cpu-only", "`", "cpu", "deployment", ",", "etc", ".", "meet", "problem", "launching", "api", "deployment", ",", "updating", "package", "latest", "version", "probably", "solve", ".", "using", "api", "also", "simple", ".", "see", "example", ":", "``", "`", "python", "import", "openai", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''" ], [ "api provide method deploy local api based openai api ( thanks @ hanpenggit ) .", "start , install required package : `` ` bash pip install fastapi uvicorn `` openai < 1.0 '' pydantic sse_starlette `` ` run command deploy api : `` ` bash python openai_api.py `` ` change argument , e.g. , ` -c ` checkpoint name path , ` -- cpu-only ` cpu deployment , etc .", "meet problem launching api deployment , updating package latest version probably solve .", "using api also simple .", "see example : `` ` python import openai openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none ''" ] ], "token": [ [ "api", "provide", "method", "deploy", "local", "api", "based", "openai", "api", "(", "thanks", "@", "hanpenggit", ")", ".", "start", ",", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "fastapi", "uvicorn", "``", "openai", "<", "1.0", "''", "pydantic", "sse_starlette", "``", "`", "run", "command", "deploy", "api", ":", "``", "`", "bash", "python", "openai_api.py", "``", "`", "change", "argument", ",", "e.g.", ",", "`", "-c", "`", "checkpoint", "name", "path", ",", "`", "--", "cpu-only", "`", "cpu", "deployment", ",", "etc", ".", "meet", "problem", "launching", "api", "deployment", ",", "updating", "package", "latest", "version", "probably", "solve", ".", "using", "api", "also", "simple", ".", "see", "example", ":", "``", "`", "python", "import", "openai", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''" ], [ "api provide method deploy local api based openai api ( thanks @ hanpenggit ) .", "start , install required package : `` ` bash pip install fastapi uvicorn `` openai < 1.0 '' pydantic sse_starlette `` ` run command deploy api : `` ` bash python openai_api.py `` ` change argument , e.g. , ` -c ` checkpoint name path , ` -- cpu-only ` cpu deployment , etc .", "meet problem launching api deployment , updating package latest version probably solve .", "using api also simple .", "see example : `` ` python import openai openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none ''" ] ], "level of complexity": 0 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "\ud83d\udc33 Docker\n\nTo simplify the deployment process, we provide docker images with pre-built environments: [qwenllm/qwen](https://hub.docker.com/r/qwenllm/qwen). You only need to install the driver and download model files to launch demos, deploy OpenAI API, and finetune the model.\n\n", "sentence": [ [ "\ud83d\udc33", "docker", "simplify", "deployment", "process", ",", "provide", "docker", "image", "pre-built", "environment", ":", "[", "qwenllm/qwen", "]", "(", "http", ":", "//hub.docker.com/r/qwenllm/qwen", ")", ".", "need", "install", "driver", "download", "model", "file", "launch", "demo", ",", "deploy", "openai", "api", ",", "finetune", "model", "." ], [ "\ud83d\udc33 docker simplify deployment process , provide docker image pre-built environment : [ qwenllm/qwen ] ( http : //hub.docker.com/r/qwenllm/qwen ) .", "need install driver download model file launch demo , deploy openai api , finetune model ." ] ], "token": [ [ "\ud83d\udc33", "docker", "simplify", "deployment", "process", ",", "provide", "docker", "image", "pre-built", "environment", ":", "[", "qwenllm/qwen", "]", "(", "http", ":", "//hub.docker.com/r/qwenllm/qwen", ")", ".", "need", "install", "driver", "download", "model", "file", "launch", "demo", ",", "deploy", "openai", "api", ",", "finetune", "model", "." ], [ "\ud83d\udc33 docker simplify deployment process , provide docker image pre-built environment : [ qwenllm/qwen ] ( http : //hub.docker.com/r/qwenllm/qwen ) .", "need install driver download model file launch demo , deploy openai api , finetune model ." ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Preparation\n\n1. Install the correct version of Nvidia driver depending on the image to use:\n - `qwenllm/qwen:cu117` (**recommend**): `>= 515.48.07`\n - `qwenllm/qwen:cu114` (w/o flash-attention): `>= 470.82.01`\n - `qwenllm/qwen:cu121`: `>= 530.30.02`\n - `qwenllm/qwen:latest`: same as `qwenllm/qwen:cu117`\n\n2. Install and configure [docker](https://docs.docker.com/engine/install/) and [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html):\n\n```bash\n", "sentence": [ [ "preparation", "1", ".", "install", "correct", "version", "nvidia", "driver", "depending", "image", "use", ":", "-", "`", "qwenllm/qwen", ":", "cu117", "`", "(", "*", "*", "recommend", "*", "*", ")", ":", "`", ">", "=", "515.48.07", "`", "-", "`", "qwenllm/qwen", ":", "cu114", "`", "(", "w/o", "flash-attention", ")", ":", "`", ">", "=", "470.82.01", "`", "-", "`", "qwenllm/qwen", ":", "cu121", "`", ":", "`", ">", "=", "530.30.02", "`", "-", "`", "qwenllm/qwen", ":", "latest", "`", ":", "`", "qwenllm/qwen", ":", "cu117", "`", "2", ".", "install", "configure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/engine/install/", ")", "[", "nvidia-container-toolkit", "]", "(", "http", ":", "//docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html", ")", ":", "``", "`", "bash" ], [ "preparation 1 .", "install correct version nvidia driver depending image use : - ` qwenllm/qwen : cu117 ` ( * * recommend * * ) : ` > = 515.48.07 ` - ` qwenllm/qwen : cu114 ` ( w/o flash-attention ) : ` > = 470.82.01 ` - ` qwenllm/qwen : cu121 ` : ` > = 530.30.02 ` - ` qwenllm/qwen : latest ` : ` qwenllm/qwen : cu117 ` 2 .", "install configure [ docker ] ( http : //docs.docker.com/engine/install/ ) [ nvidia-container-toolkit ] ( http : //docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html ) : `` ` bash" ] ], "token": [ [ "preparation", "1", ".", "install", "correct", "version", "nvidia", "driver", "depending", "image", "use", ":", "-", "`", "qwenllm/qwen", ":", "cu117", "`", "(", "*", "*", "recommend", "*", "*", ")", ":", "`", ">", "=", "515.48.07", "`", "-", "`", "qwenllm/qwen", ":", "cu114", "`", "(", "w/o", "flash-attention", ")", ":", "`", ">", "=", "470.82.01", "`", "-", "`", "qwenllm/qwen", ":", "cu121", "`", ":", "`", ">", "=", "530.30.02", "`", "-", "`", "qwenllm/qwen", ":", "latest", "`", ":", "`", "qwenllm/qwen", ":", "cu117", "`", "2", ".", "install", "configure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/engine/install/", ")", "[", "nvidia-container-toolkit", "]", "(", "http", ":", "//docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html", ")", ":", "``", "`", "bash" ], [ "preparation 1 .", "install correct version nvidia driver depending image use : - ` qwenllm/qwen : cu117 ` ( * * recommend * * ) : ` > = 515.48.07 ` - ` qwenllm/qwen : cu114 ` ( w/o flash-attention ) : ` > = 470.82.01 ` - ` qwenllm/qwen : cu121 ` : ` > = 530.30.02 ` - ` qwenllm/qwen : latest ` : ` qwenllm/qwen : cu117 ` 2 .", "install configure [ docker ] ( http : //docs.docker.com/engine/install/ ) [ nvidia-container-toolkit ] ( http : //docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html ) : `` ` bash" ] ], "level of complexity": 1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "test if docker is correctly installed\nsudo docker run hello-world\n\n", "sentence": [ [ "test", "docker", "correctly", "installed", "sudo", "docker", "run", "hello-world" ], [ "test docker correctly installed sudo docker run hello-world" ] ], "token": [ [ "test", "docker", "correctly", "installed", "sudo", "docker", "run", "hello-world" ], [ "test docker correctly installed sudo docker run hello-world" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "test if nvidia-container-toolkit is correctly installed\nsudo docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi\n```\n\n3. Download model checkpoints and codes to your environment (see [here](#DownloadModel)).\n\n", "sentence": [ [ "test", "nvidia-container-toolkit", "correctly", "installed", "sudo", "docker", "run", "--", "rm", "--", "runtime=nvidia", "--", "gpus", "ubuntu", "nvidia-smi", "``", "`", "3", ".", "download", "model", "checkpoint", "code", "environment", "(", "see", "[", "]", "(", "#", "downloadmodel", ")", ")", "." ], [ "test nvidia-container-toolkit correctly installed sudo docker run -- rm -- runtime=nvidia -- gpus ubuntu nvidia-smi `` ` 3 .", "download model checkpoint code environment ( see [ ] ( # downloadmodel ) ) ." ] ], "token": [ [ "test", "nvidia-container-toolkit", "correctly", "installed", "sudo", "docker", "run", "--", "rm", "--", "runtime=nvidia", "--", "gpus", "ubuntu", "nvidia-smi", "``", "`", "3", ".", "download", "model", "checkpoint", "code", "environment", "(", "see", "[", "]", "(", "#", "downloadmodel", ")", ")", "." ], [ "test nvidia-container-toolkit correctly installed sudo docker run -- rm -- runtime=nvidia -- gpus ubuntu nvidia-smi `` ` 3 .", "download model checkpoint code environment ( see [ ] ( # downloadmodel ) ) ." ] ], "level of complexity": 1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Deployment\n\nHere we use Qwen-7B-Chat as an example. Before launching a web demo or API, you can setup the configuration as shown below:\n\n```bash\nIMAGE_NAME=qwenllm/qwen:cu117\nPORT=8901\nCHECKPOINT_PATH=/path/to/Qwen-7B-Chat ", "sentence": [ [ "deployment", "use", "qwen-7b-chat", "example", ".", "launching", "web", "demo", "api", ",", "setup", "configuration", "shown", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "port=8901", "checkpoint_path=/path/to/qwen-7b-chat" ], [ "deployment use qwen-7b-chat example .", "launching web demo api , setup configuration shown : `` ` bash image_name=qwenllm/qwen : cu117 port=8901 checkpoint_path=/path/to/qwen-7b-chat" ] ], "token": [ [ "deployment", "use", "qwen-7b-chat", "example", ".", "launching", "web", "demo", "api", ",", "setup", "configuration", "shown", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "port=8901", "checkpoint_path=/path/to/qwen-7b-chat" ], [ "deployment use qwen-7b-chat example .", "launching web demo api , setup configuration shown : `` ` bash image_name=qwenllm/qwen : cu117 port=8901 checkpoint_path=/path/to/qwen-7b-chat" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Finetuning\n\nThe method of finetuning using the pre-built Docker image is basically the same as [the above chapter](#Finetuning) (we have already installed dependencies in the image):\n\nThe following is an example of single-GPU LoRA:\n```bash\nIMAGE_NAME=qwenllm/qwen:cu117\nCHECKPOINT_PATH=/path/to/Qwen-7B ", "sentence": [ [ "finetuning", "method", "finetuning", "using", "pre-built", "docker", "image", "basically", "[", "chapter", "]", "(", "#", "finetuning", ")", "(", "already", "installed", "dependency", "image", ")", ":", "following", "example", "single-gpu", "lora", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "checkpoint_path=/path/to/qwen-7b" ], [ "finetuning method finetuning using pre-built docker image basically [ chapter ] ( # finetuning ) ( already installed dependency image ) : following example single-gpu lora : `` ` bash image_name=qwenllm/qwen : cu117 checkpoint_path=/path/to/qwen-7b" ] ], "token": [ [ "finetuning", "method", "finetuning", "using", "pre-built", "docker", "image", "basically", "[", "chapter", "]", "(", "#", "finetuning", ")", "(", "already", "installed", "dependency", "image", ")", ":", "following", "example", "single-gpu", "lora", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "checkpoint_path=/path/to/qwen-7b" ], [ "finetuning method finetuning using pre-built docker image basically [ chapter ] ( # finetuning ) ( already installed dependency image ) : following example single-gpu lora : `` ` bash image_name=qwenllm/qwen : cu117 checkpoint_path=/path/to/qwen-7b" ] ], "level of complexity": -1 }, { "url": "https://github.com/QwenLM/Qwen", "readme_url": "https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md", "topic": [ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ], "text": "Tool Usage\n\nQwen-Chat has been optimized for tool usage and function calling capabilities. Users can develop agents, LangChain applications, and even augment Qwen with a Python Code Interpreter.\n\nWe provide documentation on how to implement tool calls based on the principle of ReAct Prompting, please refer to [the ReAct example](examples/react_prompt.md). Based on this principle, we provide support for function calling in [openai_api.py](openai_api.py).\n\nWe have tested the model's tool calling capabilities on our open-source Chinese evaluation benchmark and found that Qwen-Chat consistently performs well:\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Chinese Tool-Use Benchmark (Version 20231206)
ModelTool Selection (Acc.\u2191)Tool Input (Rouge-L\u2191)False Positive Error\u2193
GPT-498.0%0.95323.9%
GPT-3.574.5%0.80780.6%
Qwen-1_8B-Chat85.0%0.83927.6%
Qwen-7B-Chat95.5%0.90011.6%
Qwen-14B-Chat96.9%0.9175.6%
Qwen-72B-Chat98.2%0.9271.1%
\n\nTo assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark).\n\nWe have observed that Qwen performs well in terms of code executability and result accuracy when generating code:\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Code Interpreter Benchmark (Version 20231206)
ModelAccuracy of Code Execution Results (%)Executable Rate of Code (%)
Math\u2191Visualization-Hard\u2191Visualization-Easy\u2191General\u2191
GPT-482.866.760.882.8
GPT-3.547.333.355.774.1
LLaMA2-13B-Chat8.31.215.248.3
CodeLLaMA-13B-Instruct28.215.521.574.1
InternLM-20B-Chat34.610.725.165.5
ChatGLM3-6B54.24.815.267.1
Qwen-1.8B-Chat25.621.422.865.5
Qwen-7B-Chat41.923.838.067.2
Qwen-14B-Chat58.431.045.665.5
Qwen-72B-Chat72.741.743.082.8
\n\n

\n
\n \n
\n

\n\n
\n\n", "sentence": [ [ "tool", "usage", "qwen-chat", "optimized", "tool", "usage", "function", "calling", "capability", ".", "user", "develop", "agent", ",", "langchain", "application", ",", "even", "augment", "qwen", "python", "code", "interpreter", ".", "provide", "documentation", "implement", "tool", "call", "based", "principle", "react", "prompting", ",", "please", "refer", "[", "react", "example", "]", "(", "examples/react_prompt.md", ")", ".", "based", "principle", ",", "provide", "support", "function", "calling", "[", "openai_api.py", "]", "(", "openai_api.py", ")", ".", "tested", "model", "'s", "tool", "calling", "capability", "open-source", "chinese", "evaluation", "benchmark", "found", "qwen-chat", "consistently", "performs", "well", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "4", "''", "align=", "''", "center", "''", ">", "chinese", "tool-use", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "selection", "(", "acc.\u2191", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "input", "(", "rouge-l\u2191", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "false", "positive", "error\u2193", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.953", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.9", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.807", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "80.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1_8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "85.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.839", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.900", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "96.9", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.917", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.2", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.927", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.1", "%", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "ass", "qwen", "'s", "ability", "use", "python", "code", "interpreter", "task", "mathematical", "problem", "solving", ",", "data", "visualization", ",", "general-purpose", "task", "file", "handling", "web", "scraping", ",", "created", "open-sourced", "benchmark", "specifically", "designed", "evaluating", "capability", ".", "find", "benchmark", "[", "link", "]", "(", "http", ":", "//github.com/qwenlm/qwen-agent/tree/main/benchmark", ")", ".", "observed", "qwen", "performs", "well", "term", "code", "executability", "result", "accuracy", "generating", "code", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "5", "''", "align=", "''", "center", "''", ">", "code", "interpreter", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "colspan=", "''", "3", "''", "align=", "''", "center", "''", ">", "accuracy", "code", "execution", "result", "(", "%", ")", "<", "/th", ">", "<", "th", "colspan=", "''", "1", "''", "align=", "''", "center", "''", ">", "executable", "rate", "code", "(", "%", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "math\u2191", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-hard\u2191", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-easy\u2191", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "general\u2191", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "66.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "60.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "llama2-13b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "codellama-13b-instruct", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "28.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "internlm-20b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm3-6b", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "4.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1.8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "22.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.9", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "38.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.2", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "58.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "31.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "45.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "72.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/code_interpreter_showcase_001.jpg", "''", "/", ">", "<", "br", ">", "<", "p", ">", "<", "br", ">" ], [ "tool usage qwen-chat optimized tool usage function calling capability .", "user develop agent , langchain application , even augment qwen python code interpreter .", "provide documentation implement tool call based principle react prompting , please refer [ react example ] ( examples/react_prompt.md ) .", "based principle , provide support function calling [ openai_api.py ] ( openai_api.py ) .", "tested model 's tool calling capability open-source chinese evaluation benchmark found qwen-chat consistently performs well : < table > < tr > < th colspan= '' 4 '' align= '' center '' > chinese tool-use benchmark ( version 20231206 ) < /th > < /tr > < tr > < th align= '' center '' > model < /th > < th align= '' center '' > tool selection ( acc.\u2191 ) < /th > < th align= '' center '' > tool input ( rouge-l\u2191 ) < /th > < th align= '' center '' > false positive error\u2193 < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 98.0 % < /td > < td align= '' center '' > 0.953 < /td > < td align= '' center '' > 23.9 % < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 74.5 % < /td > < td align= '' center '' > 0.807 < /td > < td align= '' center '' > 80.6 % < /td > < /tr > < tr > < td > qwen-1_8b-chat < /td > < td align= '' center '' > 85.0 % < /td > < td align= '' center '' > 0.839 < /td > < td align= '' center '' > 27.6 % < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 95.5 % < /td > < td align= '' center '' > 0.900 < /td > < td align= '' center '' > 11.6 % < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 96.9 % < /td > < td align= '' center '' > 0.917 < /td > < td align= '' center '' > 5.6 % < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 98.2 % < /td > < td align= '' center '' > 0.927 < /td > < td align= '' center '' > 1.1 % < /td > < /tr > < /table > ass qwen 's ability use python code interpreter task mathematical problem solving , data visualization , general-purpose task file handling web scraping , created open-sourced benchmark specifically designed evaluating capability .", "find benchmark [ link ] ( http : //github.com/qwenlm/qwen-agent/tree/main/benchmark ) .", "observed qwen performs well term code executability result accuracy generating code : < table > < tr > < th colspan= '' 5 '' align= '' center '' > code interpreter benchmark ( version 20231206 ) < /th > < /tr > < tr > < th rowspan= '' 2 '' align= '' center '' > model < /th > < th colspan= '' 3 '' align= '' center '' > accuracy code execution result ( % ) < /th > < th colspan= '' 1 '' align= '' center '' > executable rate code ( % ) < /th > < /tr > < tr > < th align= '' center '' > math\u2191 < /th > < th align= '' center '' > visualization-hard\u2191 < /th > < th align= '' center '' > visualization-easy\u2191 < /th > < th align= '' center '' > general\u2191 < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 82.8 < /td > < td align= '' center '' > 66.7 < /td > < td align= '' center '' > 60.8 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 47.3 < /td > < td align= '' center '' > 33.3 < /td > < td align= '' center '' > 55.7 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > llama2-13b-chat < /td > < td align= '' center '' > 8.3 < /td > < td align= '' center '' > 1.2 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 48.3 < /td > < /tr > < tr > < td > codellama-13b-instruct < /td > < td align= '' center '' > 28.2 < /td > < td align= '' center '' > 15.5 < /td > < td align= '' center '' > 21.5 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > internlm-20b-chat < /td > < td align= '' center '' > 34.6 < /td > < td align= '' center '' > 10.7 < /td > < td align= '' center '' > 25.1 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > chatglm3-6b < /td > < td align= '' center '' > 54.2 < /td > < td align= '' center '' > 4.8 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 67.1 < /td > < /tr > < tr > < td > qwen-1.8b-chat < /td > < td align= '' center '' > 25.6 < /td > < td align= '' center '' > 21.4 < /td > < td align= '' center '' > 22.8 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 41.9 < /td > < td align= '' center '' > 23.8 < /td > < td align= '' center '' > 38.0 < /td > < td align= '' center '' > 67.2 < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 58.4 < /td > < td align= '' center '' > 31.0 < /td > < td align= '' center '' > 45.6 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 72.7 < /td > < td align= '' center '' > 41.7 < /td > < td align= '' center '' > 43.0 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < /table > < p align= '' center '' > < br > < img src= '' assets/code_interpreter_showcase_001.jpg '' / > < br > < p > < br >" ] ], "token": [ [ "tool", "usage", "qwen-chat", "optimized", "tool", "usage", "function", "calling", "capability", ".", "user", "develop", "agent", ",", "langchain", "application", ",", "even", "augment", "qwen", "python", "code", "interpreter", ".", "provide", "documentation", "implement", "tool", "call", "based", "principle", "react", "prompting", ",", "please", "refer", "[", "react", "example", "]", "(", "examples/react_prompt.md", ")", ".", "based", "principle", ",", "provide", "support", "function", "calling", "[", "openai_api.py", "]", "(", "openai_api.py", ")", ".", "tested", "model", "'s", "tool", "calling", "capability", "open-source", "chinese", "evaluation", "benchmark", "found", "qwen-chat", "consistently", "performs", "well", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "4", "''", "align=", "''", "center", "''", ">", "chinese", "tool-use", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "selection", "(", "acc.\u2191", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "input", "(", "rouge-l\u2191", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "false", "positive", "error\u2193", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.953", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.9", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.807", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "80.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1_8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "85.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.839", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.900", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "96.9", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.917", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.2", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.927", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.1", "%", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "ass", "qwen", "'s", "ability", "use", "python", "code", "interpreter", "task", "mathematical", "problem", "solving", ",", "data", "visualization", ",", "general-purpose", "task", "file", "handling", "web", "scraping", ",", "created", "open-sourced", "benchmark", "specifically", "designed", "evaluating", "capability", ".", "find", "benchmark", "[", "link", "]", "(", "http", ":", "//github.com/qwenlm/qwen-agent/tree/main/benchmark", ")", ".", "observed", "qwen", "performs", "well", "term", "code", "executability", "result", "accuracy", "generating", "code", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "5", "''", "align=", "''", "center", "''", ">", "code", "interpreter", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "colspan=", "''", "3", "''", "align=", "''", "center", "''", ">", "accuracy", "code", "execution", "result", "(", "%", ")", "<", "/th", ">", "<", "th", "colspan=", "''", "1", "''", "align=", "''", "center", "''", ">", "executable", "rate", "code", "(", "%", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "math\u2191", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-hard\u2191", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-easy\u2191", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "general\u2191", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "66.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "60.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "llama2-13b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "codellama-13b-instruct", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "28.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "internlm-20b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm3-6b", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "4.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1.8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "22.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.9", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "38.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.2", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "58.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "31.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "45.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "72.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/code_interpreter_showcase_001.jpg", "''", "/", ">", "<", "br", ">", "<", "p", ">", "<", "br", ">" ], [ "tool usage qwen-chat optimized tool usage function calling capability .", "user develop agent , langchain application , even augment qwen python code interpreter .", "provide documentation implement tool call based principle react prompting , please refer [ react example ] ( examples/react_prompt.md ) .", "based principle , provide support function calling [ openai_api.py ] ( openai_api.py ) .", "tested model 's tool calling capability open-source chinese evaluation benchmark found qwen-chat consistently performs well : < table > < tr > < th colspan= '' 4 '' align= '' center '' > chinese tool-use benchmark ( version 20231206 ) < /th > < /tr > < tr > < th align= '' center '' > model < /th > < th align= '' center '' > tool selection ( acc.\u2191 ) < /th > < th align= '' center '' > tool input ( rouge-l\u2191 ) < /th > < th align= '' center '' > false positive error\u2193 < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 98.0 % < /td > < td align= '' center '' > 0.953 < /td > < td align= '' center '' > 23.9 % < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 74.5 % < /td > < td align= '' center '' > 0.807 < /td > < td align= '' center '' > 80.6 % < /td > < /tr > < tr > < td > qwen-1_8b-chat < /td > < td align= '' center '' > 85.0 % < /td > < td align= '' center '' > 0.839 < /td > < td align= '' center '' > 27.6 % < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 95.5 % < /td > < td align= '' center '' > 0.900 < /td > < td align= '' center '' > 11.6 % < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 96.9 % < /td > < td align= '' center '' > 0.917 < /td > < td align= '' center '' > 5.6 % < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 98.2 % < /td > < td align= '' center '' > 0.927 < /td > < td align= '' center '' > 1.1 % < /td > < /tr > < /table > ass qwen 's ability use python code interpreter task mathematical problem solving , data visualization , general-purpose task file handling web scraping , created open-sourced benchmark specifically designed evaluating capability .", "find benchmark [ link ] ( http : //github.com/qwenlm/qwen-agent/tree/main/benchmark ) .", "observed qwen performs well term code executability result accuracy generating code : < table > < tr > < th colspan= '' 5 '' align= '' center '' > code interpreter benchmark ( version 20231206 ) < /th > < /tr > < tr > < th rowspan= '' 2 '' align= '' center '' > model < /th > < th colspan= '' 3 '' align= '' center '' > accuracy code execution result ( % ) < /th > < th colspan= '' 1 '' align= '' center '' > executable rate code ( % ) < /th > < /tr > < tr > < th align= '' center '' > math\u2191 < /th > < th align= '' center '' > visualization-hard\u2191 < /th > < th align= '' center '' > visualization-easy\u2191 < /th > < th align= '' center '' > general\u2191 < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 82.8 < /td > < td align= '' center '' > 66.7 < /td > < td align= '' center '' > 60.8 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 47.3 < /td > < td align= '' center '' > 33.3 < /td > < td align= '' center '' > 55.7 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > llama2-13b-chat < /td > < td align= '' center '' > 8.3 < /td > < td align= '' center '' > 1.2 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 48.3 < /td > < /tr > < tr > < td > codellama-13b-instruct < /td > < td align= '' center '' > 28.2 < /td > < td align= '' center '' > 15.5 < /td > < td align= '' center '' > 21.5 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > internlm-20b-chat < /td > < td align= '' center '' > 34.6 < /td > < td align= '' center '' > 10.7 < /td > < td align= '' center '' > 25.1 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > chatglm3-6b < /td > < td align= '' center '' > 54.2 < /td > < td align= '' center '' > 4.8 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 67.1 < /td > < /tr > < tr > < td > qwen-1.8b-chat < /td > < td align= '' center '' > 25.6 < /td > < td align= '' center '' > 21.4 < /td > < td align= '' center '' > 22.8 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 41.9 < /td > < td align= '' center '' > 23.8 < /td > < td align= '' center '' > 38.0 < /td > < td align= '' center '' > 67.2 < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 58.4 < /td > < td align= '' center '' > 31.0 < /td > < td align= '' center '' > 45.6 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 72.7 < /td > < td align= '' center '' > 41.7 < /td > < td align= '' center '' > 43.0 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < /table > < p align= '' center '' > < br > < img src= '' assets/code_interpreter_showcase_001.jpg '' / > < br > < p > < br >" ] ], "level of complexity": -1 }, { "url": "https://github.com/rasbt/LLMs-from-scratch", "readme_url": "https://raw.githubusercontent.com/rasbt/LLMs-from-scratch/main/README.md", "topic": [ "chatgpt", "gpt", "large-language-models", "llm", "python", "pytorch" ], "text": "Table of Contents\n\nPlease note that the `Readme.md` file is a Markdown (`.md`) file. If you have downloaded this code bundle from the Manning website and are viewing it on your local computer, I recommend using a Markdown editor or previewer for proper viewing. If you haven't installed a Markdown editor yet, [MarkText](https://www.marktext.cc) is a good free option.\n\nAlternatively, you can view this and other files on GitHub at [https://github.com/rasbt/LLMs-from-scratch](https://github.com/rasbt/LLMs-from-scratch).\n\n
\n
\n\n| Chapter Title | Main Code (for quick access) | All Code + Supplementary |\n|------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------------------|\n| Ch 1: Understanding Large Language Models | No code | No code |\n| Ch 2: Working with Text Data | - [ch02.ipynb](ch02/01_main-chapter-code/ch02.ipynb)
- [dataloader.ipynb](ch02/01_main-chapter-code/dataloader.ipynb) (summary)
- [exercise-solutions.ipynb](ch02/01_main-chapter-code/exercise-solutions.ipynb) | [./ch02](./ch02) |\n| Ch 3: Coding Attention Mechanisms | - [ch03.ipynb](ch03/01_main-chapter-code/ch03.ipynb)
- [multihead-attention.ipynb](ch03/01_main-chapter-code/multihead-attention.ipynb) (summary) | [./ch03](./ch03) |\n| Ch 4: Implementing a GPT Model from Scratch | - [ch04.ipynb](ch04/01_main-chapter-code/ch04.ipynb)
- [gpt.py](ch04/01_main-chapter-code/gpt.py) (summary) | [./ch04](./ch04) |\n| Ch 5: Pretraining on Unlabeled Data | Q1 2024 | ... |\n| Ch 6: Finetuning for Text Classification | Q2 2024 | ... |\n| Ch 7: Finetuning with Human Feedback | Q2 2024 | ... |\n| Ch 8: Using Large Language Models in Practice | Q2/3 2024 | ... |\n| Appendix A: Introduction to PyTorch* | - [code-part1.ipynb](appendix-A/03_main-chapter-code/code-part1.ipynb)
- [code-part2.ipynb](appendix-A/03_main-chapter-code/code-part2.ipynb)
- [DDP-script.py](appendix-A/03_main-chapter-code/DDP-script.py)
- [exercise-solutions.ipynb](appendix-A/03_main-chapter-code/exercise-solutions.ipynb) | [./appendix-A](./appendix-A) |\n\n(* Please see [this](appendix-A/01_optional-python-setup-preferences) and [this](appendix-A/02_installing-python-libraries) folder if you need more guidance on installing Python and Python packages.)\n\n\n\n
\n
\n\n\n\n(A mental model summarizing the contents covered in this book.)\n\n", "sentence": [ [ "table", "content", "please", "note", "`", "readme.md", "`", "file", "markdown", "(", "`", ".md", "`", ")", "file", ".", "downloaded", "code", "bundle", "manning", "website", "viewing", "local", "computer", ",", "recommend", "using", "markdown", "editor", "previewer", "proper", "viewing", ".", "n't", "installed", "markdown", "editor", "yet", ",", "[", "marktext", "]", "(", "http", ":", "//www.marktext.cc", ")", "good", "free", "option", ".", "alternatively", ",", "view", "file", "github", "[", "http", ":", "//github.com/rasbt/llms-from-scratch", "]", "(", "http", ":", "//github.com/rasbt/llms-from-scratch", ")", ".", "<", "br", ">", "<", "br", ">", "|", "chapter", "title", "|", "main", "code", "(", "quick", "access", ")", "|", "code", "+", "supplementary", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "|", "ch", "1", ":", "understanding", "large", "language", "model", "|", "code", "|", "code", "|", "|", "ch", "2", ":", "working", "text", "data", "|", "-", "[", "ch02.ipynb", "]", "(", "ch02/01_main-chapter-code/ch02.ipynb", ")", "<", "br/", ">", "-", "[", "dataloader.ipynb", "]", "(", "ch02/01_main-chapter-code/dataloader.ipynb", ")", "(", "summary", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "ch02/01_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./ch02", "]", "(", "./ch02", ")", "|", "|", "ch", "3", ":", "coding", "attention", "mechanism", "|", "-", "[", "ch03.ipynb", "]", "(", "ch03/01_main-chapter-code/ch03.ipynb", ")", "<", "br/", ">", "-", "[", "multihead-attention.ipynb", "]", "(", "ch03/01_main-chapter-code/multihead-attention.ipynb", ")", "(", "summary", ")", "|", "[", "./ch03", "]", "(", "./ch03", ")", "|", "|", "ch", "4", ":", "implementing", "gpt", "model", "scratch", "|", "-", "[", "ch04.ipynb", "]", "(", "ch04/01_main-chapter-code/ch04.ipynb", ")", "<", "br/", ">", "-", "[", "gpt.py", "]", "(", "ch04/01_main-chapter-code/gpt.py", ")", "(", "summary", ")", "|", "[", "./ch04", "]", "(", "./ch04", ")", "|", "|", "ch", "5", ":", "pretraining", "unlabeled", "data", "|", "q1", "2024", "|", "...", "|", "|", "ch", "6", ":", "finetuning", "text", "classification", "|", "q2", "2024", "|", "...", "|", "|", "ch", "7", ":", "finetuning", "human", "feedback", "|", "q2", "2024", "|", "...", "|", "|", "ch", "8", ":", "using", "large", "language", "model", "practice", "|", "q2/3", "2024", "|", "...", "|", "|", "appendix", ":", "introduction", "pytorch", "*", "|", "-", "[", "code-part1.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part1.ipynb", ")", "<", "br/", ">", "-", "[", "code-part2.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part2.ipynb", ")", "<", "br/", ">", "-", "[", "ddp-script.py", "]", "(", "appendix-a/03_main-chapter-code/ddp-script.py", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "appendix-a/03_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./appendix-a", "]", "(", "./appendix-a", ")", "|", "(", "*", "please", "see", "[", "]", "(", "appendix-a/01_optional-python-setup-preferences", ")", "[", "]", "(", "appendix-a/02_installing-python-libraries", ")", "folder", "need", "guidance", "installing", "python", "python", "package", ".", ")", "<", "br", ">", "<", "br", ">", "<", "img", "src=", "''", "images/mental-model.jpg", "''", "width=", "''", "600px", "''", ">", "(", "mental", "model", "summarizing", "content", "covered", "book", ".", ")" ], [ "table content please note ` readme.md ` file markdown ( ` .md ` ) file .", "downloaded code bundle manning website viewing local computer , recommend using markdown editor previewer proper viewing .", "n't installed markdown editor yet , [ marktext ] ( http : //www.marktext.cc ) good free option .", "alternatively , view file github [ http : //github.com/rasbt/llms-from-scratch ] ( http : //github.com/rasbt/llms-from-scratch ) .", "< br > < br > | chapter title | main code ( quick access ) | code + supplementary | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| | ch 1 : understanding large language model | code | code | | ch 2 : working text data | - [ ch02.ipynb ] ( ch02/01_main-chapter-code/ch02.ipynb ) < br/ > - [ dataloader.ipynb ] ( ch02/01_main-chapter-code/dataloader.ipynb ) ( summary ) < br/ > - [ exercise-solutions.ipynb ] ( ch02/01_main-chapter-code/exercise-solutions.ipynb ) | [ ./ch02 ] ( ./ch02 ) | | ch 3 : coding attention mechanism | - [ ch03.ipynb ] ( ch03/01_main-chapter-code/ch03.ipynb ) < br/ > - [ multihead-attention.ipynb ] ( ch03/01_main-chapter-code/multihead-attention.ipynb ) ( summary ) | [ ./ch03 ] ( ./ch03 ) | | ch 4 : implementing gpt model scratch | - [ ch04.ipynb ] ( ch04/01_main-chapter-code/ch04.ipynb ) < br/ > - [ gpt.py ] ( ch04/01_main-chapter-code/gpt.py ) ( summary ) | [ ./ch04 ] ( ./ch04 ) | | ch 5 : pretraining unlabeled data | q1 2024 | ... | | ch 6 : finetuning text classification | q2 2024 | ... | | ch 7 : finetuning human feedback | q2 2024 | ... | | ch 8 : using large language model practice | q2/3 2024 | ... | | appendix : introduction pytorch * | - [ code-part1.ipynb ] ( appendix-a/03_main-chapter-code/code-part1.ipynb ) < br/ > - [ code-part2.ipynb ] ( appendix-a/03_main-chapter-code/code-part2.ipynb ) < br/ > - [ ddp-script.py ] ( appendix-a/03_main-chapter-code/ddp-script.py ) < br/ > - [ exercise-solutions.ipynb ] ( appendix-a/03_main-chapter-code/exercise-solutions.ipynb ) | [ ./appendix-a ] ( ./appendix-a ) | ( * please see [ ] ( appendix-a/01_optional-python-setup-preferences ) [ ] ( appendix-a/02_installing-python-libraries ) folder need guidance installing python python package . )", "< br > < br > < img src= '' images/mental-model.jpg '' width= '' 600px '' > ( mental model summarizing content covered book . )" ] ], "token": [ [ "table", "content", "please", "note", "`", "readme.md", "`", "file", "markdown", "(", "`", ".md", "`", ")", "file", ".", "downloaded", "code", "bundle", "manning", "website", "viewing", "local", "computer", ",", "recommend", "using", "markdown", "editor", "previewer", "proper", "viewing", ".", "n't", "installed", "markdown", "editor", "yet", ",", "[", "marktext", "]", "(", "http", ":", "//www.marktext.cc", ")", "good", "free", "option", ".", "alternatively", ",", "view", "file", "github", "[", "http", ":", "//github.com/rasbt/llms-from-scratch", "]", "(", "http", ":", "//github.com/rasbt/llms-from-scratch", ")", ".", "<", "br", ">", "<", "br", ">", "|", "chapter", "title", "|", "main", "code", "(", "quick", "access", ")", "|", "code", "+", "supplementary", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "|", "ch", "1", ":", "understanding", "large", "language", "model", "|", "code", "|", "code", "|", "|", "ch", "2", ":", "working", "text", "data", "|", "-", "[", "ch02.ipynb", "]", "(", "ch02/01_main-chapter-code/ch02.ipynb", ")", "<", "br/", ">", "-", "[", "dataloader.ipynb", "]", "(", "ch02/01_main-chapter-code/dataloader.ipynb", ")", "(", "summary", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "ch02/01_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./ch02", "]", "(", "./ch02", ")", "|", "|", "ch", "3", ":", "coding", "attention", "mechanism", "|", "-", "[", "ch03.ipynb", "]", "(", "ch03/01_main-chapter-code/ch03.ipynb", ")", "<", "br/", ">", "-", "[", "multihead-attention.ipynb", "]", "(", "ch03/01_main-chapter-code/multihead-attention.ipynb", ")", "(", "summary", ")", "|", "[", "./ch03", "]", "(", "./ch03", ")", "|", "|", "ch", "4", ":", "implementing", "gpt", "model", "scratch", "|", "-", "[", "ch04.ipynb", "]", "(", "ch04/01_main-chapter-code/ch04.ipynb", ")", "<", "br/", ">", "-", "[", "gpt.py", "]", "(", "ch04/01_main-chapter-code/gpt.py", ")", "(", "summary", ")", "|", "[", "./ch04", "]", "(", "./ch04", ")", "|", "|", "ch", "5", ":", "pretraining", "unlabeled", "data", "|", "q1", "2024", "|", "...", "|", "|", "ch", "6", ":", "finetuning", "text", "classification", "|", "q2", "2024", "|", "...", "|", "|", "ch", "7", ":", "finetuning", "human", "feedback", "|", "q2", "2024", "|", "...", "|", "|", "ch", "8", ":", "using", "large", "language", "model", "practice", "|", "q2/3", "2024", "|", "...", "|", "|", "appendix", ":", "introduction", "pytorch", "*", "|", "-", "[", "code-part1.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part1.ipynb", ")", "<", "br/", ">", "-", "[", "code-part2.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part2.ipynb", ")", "<", "br/", ">", "-", "[", "ddp-script.py", "]", "(", "appendix-a/03_main-chapter-code/ddp-script.py", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "appendix-a/03_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./appendix-a", "]", "(", "./appendix-a", ")", "|", "(", "*", "please", "see", "[", "]", "(", "appendix-a/01_optional-python-setup-preferences", ")", "[", "]", "(", "appendix-a/02_installing-python-libraries", ")", "folder", "need", "guidance", "installing", "python", "python", "package", ".", ")", "<", "br", ">", "<", "br", ">", "<", "img", "src=", "''", "images/mental-model.jpg", "''", "width=", "''", "600px", "''", ">", "(", "mental", "model", "summarizing", "content", "covered", "book", ".", ")" ], [ "table content please note ` readme.md ` file markdown ( ` .md ` ) file .", "downloaded code bundle manning website viewing local computer , recommend using markdown editor previewer proper viewing .", "n't installed markdown editor yet , [ marktext ] ( http : //www.marktext.cc ) good free option .", "alternatively , view file github [ http : //github.com/rasbt/llms-from-scratch ] ( http : //github.com/rasbt/llms-from-scratch ) .", "< br > < br > | chapter title | main code ( quick access ) | code + supplementary | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| | ch 1 : understanding large language model | code | code | | ch 2 : working text data | - [ ch02.ipynb ] ( ch02/01_main-chapter-code/ch02.ipynb ) < br/ > - [ dataloader.ipynb ] ( ch02/01_main-chapter-code/dataloader.ipynb ) ( summary ) < br/ > - [ exercise-solutions.ipynb ] ( ch02/01_main-chapter-code/exercise-solutions.ipynb ) | [ ./ch02 ] ( ./ch02 ) | | ch 3 : coding attention mechanism | - [ ch03.ipynb ] ( ch03/01_main-chapter-code/ch03.ipynb ) < br/ > - [ multihead-attention.ipynb ] ( ch03/01_main-chapter-code/multihead-attention.ipynb ) ( summary ) | [ ./ch03 ] ( ./ch03 ) | | ch 4 : implementing gpt model scratch | - [ ch04.ipynb ] ( ch04/01_main-chapter-code/ch04.ipynb ) < br/ > - [ gpt.py ] ( ch04/01_main-chapter-code/gpt.py ) ( summary ) | [ ./ch04 ] ( ./ch04 ) | | ch 5 : pretraining unlabeled data | q1 2024 | ... | | ch 6 : finetuning text classification | q2 2024 | ... | | ch 7 : finetuning human feedback | q2 2024 | ... | | ch 8 : using large language model practice | q2/3 2024 | ... | | appendix : introduction pytorch * | - [ code-part1.ipynb ] ( appendix-a/03_main-chapter-code/code-part1.ipynb ) < br/ > - [ code-part2.ipynb ] ( appendix-a/03_main-chapter-code/code-part2.ipynb ) < br/ > - [ ddp-script.py ] ( appendix-a/03_main-chapter-code/ddp-script.py ) < br/ > - [ exercise-solutions.ipynb ] ( appendix-a/03_main-chapter-code/exercise-solutions.ipynb ) | [ ./appendix-a ] ( ./appendix-a ) | ( * please see [ ] ( appendix-a/01_optional-python-setup-preferences ) [ ] ( appendix-a/02_installing-python-libraries ) folder need guidance installing python python package . )", "< br > < br > < img src= '' images/mental-model.jpg '' width= '' 600px '' > ( mental model summarizing content covered book . )" ] ], "level of complexity": -1 }, { "url": "https://github.com/mlc-ai/web-llm", "readme_url": "https://raw.githubusercontent.com/mlc-ai/web-llm/main/README.md", "topic": [ "chatgpt", "deep-learning", "language-model", "llm", "tvm", "webgpu", "webml" ], "text": "Customized Model Weights\n\nWebLLM works as a companion project of [MLC LLM](https://github.com/mlc-ai/mlc-llm).\nIt reuses the model artifact and builds flow of MLC LLM, please check out\n[MLC LLM document](https://llm.mlc.ai/docs/deploy/javascript.html)\non how to add new model weights and libraries to WebLLM.\n\nHere, we go over the high-level idea. There are two elements of the WebLLM package that enables new models and weight variants.\n\n- model_url: Contains a URL to model artifacts, such as weights and meta-data.\n- model_lib_url: A URL to the web assembly library (i.e. wasm file) that contains the executables to accelerate the model computations.\n\nBoth are customizable in the WebLLM.\n\n```typescript\nasync main() {\n const myLlamaUrl = \"/url/to/my/llama\";\n const appConfig = {\n \"model_list\": [\n {\n \"model_url\": myLlamaUrl,\n \"local_id\": \"MyLlama-3b-v1-q4f32_0\"\n \"model_lib_url\": \"/url/to/myllama3b.wasm\",\n }\n ],\n };\n // override default\n const chatOpts = {\n \"repetition_penalty\": 1.01\n };\n\n const chat = new ChatModule();\n // load a prebuilt model\n // with a chat option override and app config\n // under the hood, it will load the model from myLlamaUrl\n // and cache it in the browser cache\n // The chat will also load the model library from \"/url/to/myllama3b.wasm\",\n // assuming that it is compatible to the model in myLlamaUrl.\n await chat.reload(\"MyLlama-3b-v1-q4f32_0\", chatOpts, appConfig);\n}\n```\n\nIn many cases, we only want to supply the model weight variant, but\nnot necessarily a new model (e.g. `NeuralHermes-Mistral` can reuse `Mistral`'s\nmodel library; `WizardMath` can reuse `Llama-2`'s model library). For\nan example of how a model library is shared by different model variants,\nsee `examples/simple-chat/src/gh-config.js`. We also provide\na plethora of prebuilt model libraries, including:\n\n- `Llama-2-7b-chat-hf-q4f32_1`: Llama-7b models.\n- `RedPajama-INCITE-Chat-3B-v1-q4f32_1`: RedPajama-3B variants.\n- `Mistral-7B-Instruct-v0.1-q4f16_1`: Mistral-7B variants.\n- and many more at [binary-mlc-llm-libs](https://github.com/mlc-ai/binary-mlc-llm-libs).\n\n", "sentence": [ [ "customized", "model", "weight", "webllm", "work", "companion", "project", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "reuses", "model", "artifact", "build", "flow", "mlc", "llm", ",", "please", "check", "[", "mlc", "llm", "document", "]", "(", "http", ":", "//llm.mlc.ai/docs/deploy/javascript.html", ")", "add", "new", "model", "weight", "library", "webllm", ".", ",", "go", "high-level", "idea", ".", "two", "element", "webllm", "package", "enables", "new", "model", "weight", "variant", ".", "-", "model_url", ":", "contains", "url", "model", "artifact", ",", "weight", "meta-data", ".", "-", "model_lib_url", ":", "url", "web", "assembly", "library", "(", "i.e", ".", "wasm", "file", ")", "contains", "executables", "accelerate", "model", "computation", ".", "customizable", "webllm", ".", "``", "`", "typescript", "async", "main", "(", ")", "{", "const", "myllamaurl", "=", "``", "/url/to/my/llama", "''", ";", "const", "appconfig", "=", "{", "``", "model_list", "''", ":", "[", "{", "``", "model_url", "''", ":", "myllamaurl", ",", "``", "local_id", "''", ":", "``", "myllama-3b-v1-q4f32_0", "''", "``", "model_lib_url", "''", ":", "``", "/url/to/myllama3b.wasm", "''", ",", "}", "]", ",", "}", ";", "//", "override", "default", "const", "chatopts", "=", "{", "``", "repetition_penalty", "''", ":", "1.01", "}", ";", "const", "chat", "=", "new", "chatmodule", "(", ")", ";", "//", "load", "prebuilt", "model", "//", "chat", "option", "override", "app", "config", "//", "hood", ",", "load", "model", "myllamaurl", "//", "cache", "browser", "cache", "//", "chat", "also", "load", "model", "library", "``", "/url/to/myllama3b.wasm", "''", ",", "//", "assuming", "compatible", "model", "myllamaurl", ".", "await", "chat.reload", "(", "``", "myllama-3b-v1-q4f32_0", "''", ",", "chatopts", ",", "appconfig", ")", ";", "}", "``", "`", "many", "case", ",", "want", "supply", "model", "weight", "variant", ",", "necessarily", "new", "model", "(", "e.g", ".", "`", "neuralhermes-mistral", "`", "reuse", "`", "mistral", "`", "'s", "model", "library", ";", "`", "wizardmath", "`", "reuse", "`", "llama-2", "`", "'s", "model", "library", ")", ".", "example", "model", "library", "shared", "different", "model", "variant", ",", "see", "`", "examples/simple-chat/src/gh-config.js", "`", ".", "also", "provide", "plethora", "prebuilt", "model", "library", ",", "including", ":", "-", "`", "llama-2-7b-chat-hf-q4f32_1", "`", ":", "llama-7b", "model", ".", "-", "`", "redpajama-incite-chat-3b-v1-q4f32_1", "`", ":", "redpajama-3b", "variant", ".", "-", "`", "mistral-7b-instruct-v0.1-q4f16_1", "`", ":", "mistral-7b", "variant", ".", "-", "many", "[", "binary-mlc-llm-libs", "]", "(", "http", ":", "//github.com/mlc-ai/binary-mlc-llm-libs", ")", "." ], [ "customized model weight webllm work companion project [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "reuses model artifact build flow mlc llm , please check [ mlc llm document ] ( http : //llm.mlc.ai/docs/deploy/javascript.html ) add new model weight library webllm .", ", go high-level idea .", "two element webllm package enables new model weight variant .", "- model_url : contains url model artifact , weight meta-data .", "- model_lib_url : url web assembly library ( i.e .", "wasm file ) contains executables accelerate model computation .", "customizable webllm .", "`` ` typescript async main ( ) { const myllamaurl = `` /url/to/my/llama '' ; const appconfig = { `` model_list '' : [ { `` model_url '' : myllamaurl , `` local_id '' : `` myllama-3b-v1-q4f32_0 '' `` model_lib_url '' : `` /url/to/myllama3b.wasm '' , } ] , } ; // override default const chatopts = { `` repetition_penalty '' : 1.01 } ; const chat = new chatmodule ( ) ; // load prebuilt model // chat option override app config // hood , load model myllamaurl // cache browser cache // chat also load model library `` /url/to/myllama3b.wasm '' , // assuming compatible model myllamaurl .", "await chat.reload ( `` myllama-3b-v1-q4f32_0 '' , chatopts , appconfig ) ; } `` ` many case , want supply model weight variant , necessarily new model ( e.g .", "` neuralhermes-mistral ` reuse ` mistral ` 's model library ; ` wizardmath ` reuse ` llama-2 ` 's model library ) .", "example model library shared different model variant , see ` examples/simple-chat/src/gh-config.js ` .", "also provide plethora prebuilt model library , including : - ` llama-2-7b-chat-hf-q4f32_1 ` : llama-7b model .", "- ` redpajama-incite-chat-3b-v1-q4f32_1 ` : redpajama-3b variant .", "- ` mistral-7b-instruct-v0.1-q4f16_1 ` : mistral-7b variant .", "- many [ binary-mlc-llm-libs ] ( http : //github.com/mlc-ai/binary-mlc-llm-libs ) ." ] ], "token": [ [ "customized", "model", "weight", "webllm", "work", "companion", "project", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "reuses", "model", "artifact", "build", "flow", "mlc", "llm", ",", "please", "check", "[", "mlc", "llm", "document", "]", "(", "http", ":", "//llm.mlc.ai/docs/deploy/javascript.html", ")", "add", "new", "model", "weight", "library", "webllm", ".", ",", "go", "high-level", "idea", ".", "two", "element", "webllm", "package", "enables", "new", "model", "weight", "variant", ".", "-", "model_url", ":", "contains", "url", "model", "artifact", ",", "weight", "meta-data", ".", "-", "model_lib_url", ":", "url", "web", "assembly", "library", "(", "i.e", ".", "wasm", "file", ")", "contains", "executables", "accelerate", "model", "computation", ".", "customizable", "webllm", ".", "``", "`", "typescript", "async", "main", "(", ")", "{", "const", "myllamaurl", "=", "``", "/url/to/my/llama", "''", ";", "const", "appconfig", "=", "{", "``", "model_list", "''", ":", "[", "{", "``", "model_url", "''", ":", "myllamaurl", ",", "``", "local_id", "''", ":", "``", "myllama-3b-v1-q4f32_0", "''", "``", "model_lib_url", "''", ":", "``", "/url/to/myllama3b.wasm", "''", ",", "}", "]", ",", "}", ";", "//", "override", "default", "const", "chatopts", "=", "{", "``", "repetition_penalty", "''", ":", "1.01", "}", ";", "const", "chat", "=", "new", "chatmodule", "(", ")", ";", "//", "load", "prebuilt", "model", "//", "chat", "option", "override", "app", "config", "//", "hood", ",", "load", "model", "myllamaurl", "//", "cache", "browser", "cache", "//", "chat", "also", "load", "model", "library", "``", "/url/to/myllama3b.wasm", "''", ",", "//", "assuming", "compatible", "model", "myllamaurl", ".", "await", "chat.reload", "(", "``", "myllama-3b-v1-q4f32_0", "''", ",", "chatopts", ",", "appconfig", ")", ";", "}", "``", "`", "many", "case", ",", "want", "supply", "model", "weight", "variant", ",", "necessarily", "new", "model", "(", "e.g", ".", "`", "neuralhermes-mistral", "`", "reuse", "`", "mistral", "`", "'s", "model", "library", ";", "`", "wizardmath", "`", "reuse", "`", "llama-2", "`", "'s", "model", "library", ")", ".", "example", "model", "library", "shared", "different", "model", "variant", ",", "see", "`", "examples/simple-chat/src/gh-config.js", "`", ".", "also", "provide", "plethora", "prebuilt", "model", "library", ",", "including", ":", "-", "`", "llama-2-7b-chat-hf-q4f32_1", "`", ":", "llama-7b", "model", ".", "-", "`", "redpajama-incite-chat-3b-v1-q4f32_1", "`", ":", "redpajama-3b", "variant", ".", "-", "`", "mistral-7b-instruct-v0.1-q4f16_1", "`", ":", "mistral-7b", "variant", ".", "-", "many", "[", "binary-mlc-llm-libs", "]", "(", "http", ":", "//github.com/mlc-ai/binary-mlc-llm-libs", ")", "." ], [ "customized model weight webllm work companion project [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "reuses model artifact build flow mlc llm , please check [ mlc llm document ] ( http : //llm.mlc.ai/docs/deploy/javascript.html ) add new model weight library webllm .", ", go high-level idea .", "two element webllm package enables new model weight variant .", "- model_url : contains url model artifact , weight meta-data .", "- model_lib_url : url web assembly library ( i.e .", "wasm file ) contains executables accelerate model computation .", "customizable webllm .", "`` ` typescript async main ( ) { const myllamaurl = `` /url/to/my/llama '' ; const appconfig = { `` model_list '' : [ { `` model_url '' : myllamaurl , `` local_id '' : `` myllama-3b-v1-q4f32_0 '' `` model_lib_url '' : `` /url/to/myllama3b.wasm '' , } ] , } ; // override default const chatopts = { `` repetition_penalty '' : 1.01 } ; const chat = new chatmodule ( ) ; // load prebuilt model // chat option override app config // hood , load model myllamaurl // cache browser cache // chat also load model library `` /url/to/myllama3b.wasm '' , // assuming compatible model myllamaurl .", "await chat.reload ( `` myllama-3b-v1-q4f32_0 '' , chatopts , appconfig ) ; } `` ` many case , want supply model weight variant , necessarily new model ( e.g .", "` neuralhermes-mistral ` reuse ` mistral ` 's model library ; ` wizardmath ` reuse ` llama-2 ` 's model library ) .", "example model library shared different model variant , see ` examples/simple-chat/src/gh-config.js ` .", "also provide plethora prebuilt model library , including : - ` llama-2-7b-chat-hf-q4f32_1 ` : llama-7b model .", "- ` redpajama-incite-chat-3b-v1-q4f32_1 ` : redpajama-3b variant .", "- ` mistral-7b-instruct-v0.1-q4f16_1 ` : mistral-7b variant .", "- many [ binary-mlc-llm-libs ] ( http : //github.com/mlc-ai/binary-mlc-llm-libs ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/mlc-ai/web-llm", "readme_url": "https://raw.githubusercontent.com/mlc-ai/web-llm/main/README.md", "topic": [ "chatgpt", "deep-learning", "language-model", "llm", "tvm", "webgpu", "webml" ], "text": "Build WebLLM Package From Source\n\nNOTE: you don't need to build by yourself unless you would\nlike to change the WebLLM package, follow [use WebLLM](#use-web-llm-package) instead.\n\nWebLLM package is a web runtime designed for [MLC LLM](https://github.com/mlc-ai/mlc-llm).\n\n1. Install all the prerequisites for compilation:\n 1. [emscripten](https://emscripten.org). It is an LLVM-based compiler that compiles C/C++ source code to WebAssembly.\n - Follow the [installation instruction](https://emscripten.org/docs/getting_started/downloads.html#installation-instructions-using-the-emsdk-recommended) to install the latest emsdk.\n - Source `emsdk_env.sh` by `source path/to/emsdk_env.sh`, so that `emcc` is reachable from PATH and the command `emcc` works.\n 4. Install jekyll by following the [official guides](https://jekyllrb.com/docs/installation/). It is the package we use for website. This is not needed if you're using nextjs (see next-simple-chat in the examples).\n 5. Install jekyll-remote-theme by command. Try [gem mirror](https://gems.ruby-china.com/) if install blocked.\n ```shell\n gem install jekyll-remote-theme\n ```\n We can verify the successful installation by trying out `emcc` and `jekyll` in terminal, respectively.\n\n2. Setup necessary environment\n\n Prepare all the necessary dependencies for web build:\n ```shell\n ./scripts/prep_deps.sh\n ```\n\n3. Buld WebLLM Package\n\n ```shell\n npm run build\n ```\n\n4. Validate some of the sub-packages\n\n You can then go to the subfolders in [examples](examples) to validate some of the sub-packages.\n We use Parcelv2 for bundling. Although Parcel is not very good at tracking parent directory\n changes sometimes. When you make a change in the WebLLM package, try to edit the `package.json`\n of the subfolder and save it, which will trigger Parcel to rebuild.\n\n\n", "sentence": [ [ "build", "webllm", "package", "source", "note", ":", "n't", "need", "build", "unless", "would", "like", "change", "webllm", "package", ",", "follow", "[", "use", "webllm", "]", "(", "#", "use-web-llm-package", ")", "instead", ".", "webllm", "package", "web", "runtime", "designed", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "1", ".", "install", "prerequisite", "compilation", ":", "1", ".", "[", "emscripten", "]", "(", "http", ":", "//emscripten.org", ")", ".", "llvm-based", "compiler", "compiles", "c/c++", "source", "code", "webassembly", ".", "-", "follow", "[", "installation", "instruction", "]", "(", "http", ":", "//emscripten.org/docs/getting_started/downloads.html", "#", "installation-instructions-using-the-emsdk-recommended", ")", "install", "latest", "emsdk", ".", "-", "source", "`", "emsdk_env.sh", "`", "`", "source", "path/to/emsdk_env.sh", "`", ",", "`", "emcc", "`", "reachable", "path", "command", "`", "emcc", "`", "work", ".", "4", ".", "install", "jekyll", "following", "[", "official", "guide", "]", "(", "http", ":", "//jekyllrb.com/docs/installation/", ")", ".", "package", "use", "website", ".", "needed", "'re", "using", "nextjs", "(", "see", "next-simple-chat", "example", ")", ".", "5", ".", "install", "jekyll-remote-theme", "command", ".", "try", "[", "gem", "mirror", "]", "(", "http", ":", "//gems.ruby-china.com/", ")", "install", "blocked", ".", "``", "`", "shell", "gem", "install", "jekyll-remote-theme", "``", "`", "verify", "successful", "installation", "trying", "`", "emcc", "`", "`", "jekyll", "`", "terminal", ",", "respectively", ".", "2", ".", "setup", "necessary", "environment", "prepare", "necessary", "dependency", "web", "build", ":", "``", "`", "shell", "./scripts/prep_deps.sh", "``", "`", "3", ".", "buld", "webllm", "package", "``", "`", "shell", "npm", "run", "build", "``", "`", "4", ".", "validate", "sub-packages", "go", "subfolders", "[", "example", "]", "(", "example", ")", "validate", "sub-packages", ".", "use", "parcelv2", "bundling", ".", "although", "parcel", "good", "tracking", "parent", "directory", "change", "sometimes", ".", "make", "change", "webllm", "package", ",", "try", "edit", "`", "package.json", "`", "subfolder", "save", ",", "trigger", "parcel", "rebuild", "." ], [ "build webllm package source note : n't need build unless would like change webllm package , follow [ use webllm ] ( # use-web-llm-package ) instead .", "webllm package web runtime designed [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "1 .", "install prerequisite compilation : 1 .", "[ emscripten ] ( http : //emscripten.org ) .", "llvm-based compiler compiles c/c++ source code webassembly .", "- follow [ installation instruction ] ( http : //emscripten.org/docs/getting_started/downloads.html # installation-instructions-using-the-emsdk-recommended ) install latest emsdk .", "- source ` emsdk_env.sh ` ` source path/to/emsdk_env.sh ` , ` emcc ` reachable path command ` emcc ` work .", "4 .", "install jekyll following [ official guide ] ( http : //jekyllrb.com/docs/installation/ ) .", "package use website .", "needed 're using nextjs ( see next-simple-chat example ) .", "5 .", "install jekyll-remote-theme command .", "try [ gem mirror ] ( http : //gems.ruby-china.com/ ) install blocked .", "`` ` shell gem install jekyll-remote-theme `` ` verify successful installation trying ` emcc ` ` jekyll ` terminal , respectively .", "2 .", "setup necessary environment prepare necessary dependency web build : `` ` shell ./scripts/prep_deps.sh `` ` 3 .", "buld webllm package `` ` shell npm run build `` ` 4 .", "validate sub-packages go subfolders [ example ] ( example ) validate sub-packages .", "use parcelv2 bundling .", "although parcel good tracking parent directory change sometimes .", "make change webllm package , try edit ` package.json ` subfolder save , trigger parcel rebuild ." ] ], "token": [ [ "build", "webllm", "package", "source", "note", ":", "n't", "need", "build", "unless", "would", "like", "change", "webllm", "package", ",", "follow", "[", "use", "webllm", "]", "(", "#", "use-web-llm-package", ")", "instead", ".", "webllm", "package", "web", "runtime", "designed", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "1", ".", "install", "prerequisite", "compilation", ":", "1", ".", "[", "emscripten", "]", "(", "http", ":", "//emscripten.org", ")", ".", "llvm-based", "compiler", "compiles", "c/c++", "source", "code", "webassembly", ".", "-", "follow", "[", "installation", "instruction", "]", "(", "http", ":", "//emscripten.org/docs/getting_started/downloads.html", "#", "installation-instructions-using-the-emsdk-recommended", ")", "install", "latest", "emsdk", ".", "-", "source", "`", "emsdk_env.sh", "`", "`", "source", "path/to/emsdk_env.sh", "`", ",", "`", "emcc", "`", "reachable", "path", "command", "`", "emcc", "`", "work", ".", "4", ".", "install", "jekyll", "following", "[", "official", "guide", "]", "(", "http", ":", "//jekyllrb.com/docs/installation/", ")", ".", "package", "use", "website", ".", "needed", "'re", "using", "nextjs", "(", "see", "next-simple-chat", "example", ")", ".", "5", ".", "install", "jekyll-remote-theme", "command", ".", "try", "[", "gem", "mirror", "]", "(", "http", ":", "//gems.ruby-china.com/", ")", "install", "blocked", ".", "``", "`", "shell", "gem", "install", "jekyll-remote-theme", "``", "`", "verify", "successful", "installation", "trying", "`", "emcc", "`", "`", "jekyll", "`", "terminal", ",", "respectively", ".", "2", ".", "setup", "necessary", "environment", "prepare", "necessary", "dependency", "web", "build", ":", "``", "`", "shell", "./scripts/prep_deps.sh", "``", "`", "3", ".", "buld", "webllm", "package", "``", "`", "shell", "npm", "run", "build", "``", "`", "4", ".", "validate", "sub-packages", "go", "subfolders", "[", "example", "]", "(", "example", ")", "validate", "sub-packages", ".", "use", "parcelv2", "bundling", ".", "although", "parcel", "good", "tracking", "parent", "directory", "change", "sometimes", ".", "make", "change", "webllm", "package", ",", "try", "edit", "`", "package.json", "`", "subfolder", "save", ",", "trigger", "parcel", "rebuild", "." ], [ "build webllm package source note : n't need build unless would like change webllm package , follow [ use webllm ] ( # use-web-llm-package ) instead .", "webllm package web runtime designed [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "1 .", "install prerequisite compilation : 1 .", "[ emscripten ] ( http : //emscripten.org ) .", "llvm-based compiler compiles c/c++ source code webassembly .", "- follow [ installation instruction ] ( http : //emscripten.org/docs/getting_started/downloads.html # installation-instructions-using-the-emsdk-recommended ) install latest emsdk .", "- source ` emsdk_env.sh ` ` source path/to/emsdk_env.sh ` , ` emcc ` reachable path command ` emcc ` work .", "4 .", "install jekyll following [ official guide ] ( http : //jekyllrb.com/docs/installation/ ) .", "package use website .", "needed 're using nextjs ( see next-simple-chat example ) .", "5 .", "install jekyll-remote-theme command .", "try [ gem mirror ] ( http : //gems.ruby-china.com/ ) install blocked .", "`` ` shell gem install jekyll-remote-theme `` ` verify successful installation trying ` emcc ` ` jekyll ` terminal , respectively .", "2 .", "setup necessary environment prepare necessary dependency web build : `` ` shell ./scripts/prep_deps.sh `` ` 3 .", "buld webllm package `` ` shell npm run build `` ` 4 .", "validate sub-packages go subfolders [ example ] ( example ) validate sub-packages .", "use parcelv2 bundling .", "although parcel good tracking parent directory change sometimes .", "make change webllm package , try edit ` package.json ` subfolder save , trigger parcel rebuild ." ] ], "level of complexity": -1 }, { "url": "https://github.com/nebuly-ai/nebuly", "readme_url": "https://raw.githubusercontent.com/nebuly-ai/nebuly/main/README.md", "topic": [ "ai", "analytics", "artificial-intelligence", "deeplearning", "large-language-models", "llm" ], "text": "Installation\n\nThe easiest way to install Nebuly\u2019s SDK is via\u00a0`pip`:\n\n```\npip install nebuly\n```\n\nOnce installed, authenticate to Nebuly platform and start building.\n\n", "sentence": [ [ "installation", "easiest", "way", "install", "nebuly", "\u2019", "sdk", "via", "`", "pip", "`", ":", "``", "`", "pip", "install", "nebuly", "``", "`", "installed", ",", "authenticate", "nebuly", "platform", "start", "building", "." ], [ "installation easiest way install nebuly \u2019 sdk via ` pip ` : `` ` pip install nebuly `` ` installed , authenticate nebuly platform start building ." ] ], "token": [ [ "installation", "easiest", "way", "install", "nebuly", "\u2019", "sdk", "via", "`", "pip", "`", ":", "``", "`", "pip", "install", "nebuly", "``", "`", "installed", ",", "authenticate", "nebuly", "platform", "start", "building", "." ], [ "installation easiest way install nebuly \u2019 sdk via ` pip ` : `` ` pip install nebuly `` ` installed , authenticate nebuly platform start building ." ] ], "level of complexity": 0 }, { "url": "https://github.com/LlamaFamily/Llama2-Chinese", "readme_url": "https://raw.githubusercontent.com/LlamaFamily/Llama2-Chinese/main/README.md", "topic": [ "finetune", "llama", "llama2", "llm", "lora", "pretrain" ], "text": "\ud83c\udf44 \u6a21\u578b\u91cf\u5316\n\u6211\u4eec\u5bf9\u4e2d\u6587\u5fae\u8c03\u7684\u6a21\u578b\u53c2\u6570\u8fdb\u884c\u4e86\u91cf\u5316\uff0c\u65b9\u4fbf\u4ee5\u66f4\u5c11\u7684\u8ba1\u7b97\u8d44\u6e90\u8fd0\u884c\u3002\u76ee\u524d\u5df2\u7ecf\u5728[Hugging Face](https://huggingface.co/FlagAlpha)\u4e0a\u4f20\u4e8613B\u4e2d\u6587\u5fae\u8c03\u6a21\u578b[FlagAlpha/Llama2-Chinese-13b-Chat](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat)\u76844bit\u538b\u7f29\u7248\u672c[FlagAlpha/Llama2-Chinese-13b-Chat-4bit](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat-4bit)\uff0c\u5177\u4f53\u8c03\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a\n\n\u73af\u5883\u51c6\u5907\uff1a\n```\npip install git+https://github.com/PanQiWei/AutoGPTQ.git\n```\n\n```python\nfrom transformers import AutoTokenizer\nfrom auto_gptq import AutoGPTQForCausalLM\nmodel = AutoGPTQForCausalLM.from_quantized('FlagAlpha/Llama2-Chinese-13b-Chat-4bit', device=\"cuda:0\")\ntokenizer = AutoTokenizer.from_pretrained('FlagAlpha/Llama2-Chinese-13b-Chat-4bit',use_fast=False)\ninput_ids = tokenizer(['Human: \u600e\u4e48\u767b\u4e0a\u706b\u661f\\nAssistant: '], return_tensors=\"pt\",add_special_tokens=False).input_ids.to('cuda') \ngenerate_input = {\n \"input_ids\":input_ids,\n \"max_new_tokens\":512,\n \"do_sample\":True,\n \"top_k\":50,\n \"top_p\":0.95,\n \"temperature\":0.3,\n \"repetition_penalty\":1.3,\n \"eos_token_id\":tokenizer.eos_token_id,\n \"bos_token_id\":tokenizer.bos_token_id,\n \"pad_token_id\":tokenizer.pad_token_id\n}\ngenerate_ids = model.generate(**generate_input)\ntext = tokenizer.decode(generate_ids[0])\nprint(text)\n```\n\n", "sentence": [ [ "\ud83c\udf44", "\u6a21\u578b\u91cf\u5316", "\u6211\u4eec\u5bf9\u4e2d\u6587\u5fae\u8c03\u7684\u6a21\u578b\u53c2\u6570\u8fdb\u884c\u4e86\u91cf\u5316\uff0c\u65b9\u4fbf\u4ee5\u66f4\u5c11\u7684\u8ba1\u7b97\u8d44\u6e90\u8fd0\u884c\u3002\u76ee\u524d\u5df2\u7ecf\u5728", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/flagalpha", ")", "\u4e0a\u4f20\u4e8613b\u4e2d\u6587\u5fae\u8c03\u6a21\u578b", "[", "flagalpha/llama2-chinese-13b-chat", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat", ")", "\u76844bit\u538b\u7f29\u7248\u672c", "[", "flagalpha/llama2-chinese-13b-chat-4bit", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit", ")", "\uff0c\u5177\u4f53\u8c03\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a", "\u73af\u5883\u51c6\u5907\uff1a", "``", "`", "pip", "install", "git+https", ":", "//github.com/panqiwei/autogptq.git", "``", "`", "``", "`", "python", "transformer", "import", "autotokenizer", "auto_gptq", "import", "autogptqforcausallm", "model", "=", "autogptqforcausallm.from_quantized", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "device=", "''", "cuda:0", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "use_fast=false", ")", "input_ids", "=", "tokenizer", "(", "[", "'", "<", ">", "human", ":", "\u600e\u4e48\u767b\u4e0a\u706b\u661f\\n", "<", "/s", ">", "<", ">", "assistant", ":", "'", "]", ",", "return_tensors=", "''", "pt", "''", ",", "add_special_tokens=false", ")", ".input_ids.to", "(", "'cuda", "'", ")", "generate_input", "=", "{", "``", "input_ids", "''", ":", "input_ids", ",", "``", "max_new_tokens", "''", ":512", ",", "``", "do_sample", "''", ":", "true", ",", "``", "top_k", "''", ":50", ",", "``", "top_p", "''", ":0.95", ",", "``", "temperature", "''", ":0.3", ",", "``", "repetition_penalty", "''", ":1.3", ",", "``", "eos_token_id", "''", ":", "tokenizer.eos_token_id", ",", "``", "bos_token_id", "''", ":", "tokenizer.bos_token_id", ",", "``", "pad_token_id", "''", ":", "tokenizer.pad_token_id", "}", "generate_ids", "=", "model.generate", "(", "*", "*", "generate_input", ")", "text", "=", "tokenizer.decode", "(", "generate_ids", "[", "0", "]", ")", "print", "(", "text", ")", "``", "`" ], [ "\ud83c\udf44 \u6a21\u578b\u91cf\u5316 \u6211\u4eec\u5bf9\u4e2d\u6587\u5fae\u8c03\u7684\u6a21\u578b\u53c2\u6570\u8fdb\u884c\u4e86\u91cf\u5316\uff0c\u65b9\u4fbf\u4ee5\u66f4\u5c11\u7684\u8ba1\u7b97\u8d44\u6e90\u8fd0\u884c\u3002\u76ee\u524d\u5df2\u7ecf\u5728 [ hugging face ] ( http : //huggingface.co/flagalpha ) \u4e0a\u4f20\u4e8613b\u4e2d\u6587\u5fae\u8c03\u6a21\u578b [ flagalpha/llama2-chinese-13b-chat ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat ) \u76844bit\u538b\u7f29\u7248\u672c [ flagalpha/llama2-chinese-13b-chat-4bit ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit ) \uff0c\u5177\u4f53\u8c03\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a \u73af\u5883\u51c6\u5907\uff1a `` ` pip install git+https : //github.com/panqiwei/autogptq.git `` ` `` ` python transformer import autotokenizer auto_gptq import autogptqforcausallm model = autogptqforcausallm.from_quantized ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , device= '' cuda:0 '' ) tokenizer = autotokenizer.from_pretrained ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , use_fast=false ) input_ids = tokenizer ( [ ' < > human : \u600e\u4e48\u767b\u4e0a\u706b\u661f\\n < /s > < > assistant : ' ] , return_tensors= '' pt '' , add_special_tokens=false ) .input_ids.to ( 'cuda ' ) generate_input = { `` input_ids '' : input_ids , `` max_new_tokens '' :512 , `` do_sample '' : true , `` top_k '' :50 , `` top_p '' :0.95 , `` temperature '' :0.3 , `` repetition_penalty '' :1.3 , `` eos_token_id '' : tokenizer.eos_token_id , `` bos_token_id '' : tokenizer.bos_token_id , `` pad_token_id '' : tokenizer.pad_token_id } generate_ids = model.generate ( * * generate_input ) text = tokenizer.decode ( generate_ids [ 0 ] ) print ( text ) `` `" ] ], "token": [ [ "\ud83c\udf44", "\u6a21\u578b\u91cf\u5316", "\u6211\u4eec\u5bf9\u4e2d\u6587\u5fae\u8c03\u7684\u6a21\u578b\u53c2\u6570\u8fdb\u884c\u4e86\u91cf\u5316\uff0c\u65b9\u4fbf\u4ee5\u66f4\u5c11\u7684\u8ba1\u7b97\u8d44\u6e90\u8fd0\u884c\u3002\u76ee\u524d\u5df2\u7ecf\u5728", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/flagalpha", ")", "\u4e0a\u4f20\u4e8613b\u4e2d\u6587\u5fae\u8c03\u6a21\u578b", "[", "flagalpha/llama2-chinese-13b-chat", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat", ")", "\u76844bit\u538b\u7f29\u7248\u672c", "[", "flagalpha/llama2-chinese-13b-chat-4bit", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit", ")", "\uff0c\u5177\u4f53\u8c03\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a", "\u73af\u5883\u51c6\u5907\uff1a", "``", "`", "pip", "install", "git+https", ":", "//github.com/panqiwei/autogptq.git", "``", "`", "``", "`", "python", "transformer", "import", "autotokenizer", "auto_gptq", "import", "autogptqforcausallm", "model", "=", "autogptqforcausallm.from_quantized", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "device=", "''", "cuda:0", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "use_fast=false", ")", "input_ids", "=", "tokenizer", "(", "[", "'", "<", ">", "human", ":", "\u600e\u4e48\u767b\u4e0a\u706b\u661f\\n", "<", "/s", ">", "<", ">", "assistant", ":", "'", "]", ",", "return_tensors=", "''", "pt", "''", ",", "add_special_tokens=false", ")", ".input_ids.to", "(", "'cuda", "'", ")", "generate_input", "=", "{", "``", "input_ids", "''", ":", "input_ids", ",", "``", "max_new_tokens", "''", ":512", ",", "``", "do_sample", "''", ":", "true", ",", "``", "top_k", "''", ":50", ",", "``", "top_p", "''", ":0.95", ",", "``", "temperature", "''", ":0.3", ",", "``", "repetition_penalty", "''", ":1.3", ",", "``", "eos_token_id", "''", ":", "tokenizer.eos_token_id", ",", "``", "bos_token_id", "''", ":", "tokenizer.bos_token_id", ",", "``", "pad_token_id", "''", ":", "tokenizer.pad_token_id", "}", "generate_ids", "=", "model.generate", "(", "*", "*", "generate_input", ")", "text", "=", "tokenizer.decode", "(", "generate_ids", "[", "0", "]", ")", "print", "(", "text", ")", "``", "`" ], [ "\ud83c\udf44 \u6a21\u578b\u91cf\u5316 \u6211\u4eec\u5bf9\u4e2d\u6587\u5fae\u8c03\u7684\u6a21\u578b\u53c2\u6570\u8fdb\u884c\u4e86\u91cf\u5316\uff0c\u65b9\u4fbf\u4ee5\u66f4\u5c11\u7684\u8ba1\u7b97\u8d44\u6e90\u8fd0\u884c\u3002\u76ee\u524d\u5df2\u7ecf\u5728 [ hugging face ] ( http : //huggingface.co/flagalpha ) \u4e0a\u4f20\u4e8613b\u4e2d\u6587\u5fae\u8c03\u6a21\u578b [ flagalpha/llama2-chinese-13b-chat ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat ) \u76844bit\u538b\u7f29\u7248\u672c [ flagalpha/llama2-chinese-13b-chat-4bit ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit ) \uff0c\u5177\u4f53\u8c03\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a \u73af\u5883\u51c6\u5907\uff1a `` ` pip install git+https : //github.com/panqiwei/autogptq.git `` ` `` ` python transformer import autotokenizer auto_gptq import autogptqforcausallm model = autogptqforcausallm.from_quantized ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , device= '' cuda:0 '' ) tokenizer = autotokenizer.from_pretrained ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , use_fast=false ) input_ids = tokenizer ( [ ' < > human : \u600e\u4e48\u767b\u4e0a\u706b\u661f\\n < /s > < > assistant : ' ] , return_tensors= '' pt '' , add_special_tokens=false ) .input_ids.to ( 'cuda ' ) generate_input = { `` input_ids '' : input_ids , `` max_new_tokens '' :512 , `` do_sample '' : true , `` top_k '' :50 , `` top_p '' :0.95 , `` temperature '' :0.3 , `` repetition_penalty '' :1.3 , `` eos_token_id '' : tokenizer.eos_token_id , `` bos_token_id '' : tokenizer.bos_token_id , `` pad_token_id '' : tokenizer.pad_token_id } generate_ids = model.generate ( * * generate_input ) text = tokenizer.decode ( generate_ids [ 0 ] ) print ( text ) `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Docker\n\nWe provide a docker container that helps you start running OpenLLM:\n\n```bash\ndocker run --rm -it -p 3000:3000 ghcr.io/bentoml/openllm start facebook/opt-1.3b --backend pt\n```\n\n> [!NOTE]\n> Given you have access to GPUs and have setup [nvidia-docker](https://github.com/NVIDIA/nvidia-container-toolkit), you can additionally pass in `--gpus`\n> to use GPU for faster inference and optimization\n>```bash\n> docker run --rm --gpus all -p 3000:3000 -it ghcr.io/bentoml/openllm start HuggingFaceH4/zephyr-7b-beta --backend vllm\n> ```\n\n\n", "sentence": [ [ "docker", "provide", "docker", "container", "help", "start", "running", "openllm", ":", "``", "`", "bash", "docker", "run", "--", "rm", "-it", "-p", "3000:3000", "ghcr.io/bentoml/openllm", "start", "facebook/opt-1.3b", "--", "backend", "pt", "``", "`", ">", "[", "!", "note", "]", ">", "given", "access", "gpus", "setup", "[", "nvidia-docker", "]", "(", "http", ":", "//github.com/nvidia/nvidia-container-toolkit", ")", ",", "additionally", "pas", "`", "--", "gpus", "`", ">", "use", "gpu", "faster", "inference", "optimization", ">", "``", "`", "bash", ">", "docker", "run", "--", "rm", "--", "gpus", "-p", "3000:3000", "-it", "ghcr.io/bentoml/openllm", "start", "huggingfaceh4/zephyr-7b-beta", "--", "backend", "vllm", ">", "``", "`" ], [ "docker provide docker container help start running openllm : `` ` bash docker run -- rm -it -p 3000:3000 ghcr.io/bentoml/openllm start facebook/opt-1.3b -- backend pt `` ` > [ ! note ] > given access gpus setup [ nvidia-docker ] ( http : //github.com/nvidia/nvidia-container-toolkit ) , additionally pas ` -- gpus ` > use gpu faster inference optimization > `` ` bash > docker run -- rm -- gpus -p 3000:3000 -it ghcr.io/bentoml/openllm start huggingfaceh4/zephyr-7b-beta -- backend vllm > `` `" ] ], "token": [ [ "docker", "provide", "docker", "container", "help", "start", "running", "openllm", ":", "``", "`", "bash", "docker", "run", "--", "rm", "-it", "-p", "3000:3000", "ghcr.io/bentoml/openllm", "start", "facebook/opt-1.3b", "--", "backend", "pt", "``", "`", ">", "[", "!", "note", "]", ">", "given", "access", "gpus", "setup", "[", "nvidia-docker", "]", "(", "http", ":", "//github.com/nvidia/nvidia-container-toolkit", ")", ",", "additionally", "pas", "`", "--", "gpus", "`", ">", "use", "gpu", "faster", "inference", "optimization", ">", "``", "`", "bash", ">", "docker", "run", "--", "rm", "--", "gpus", "-p", "3000:3000", "-it", "ghcr.io/bentoml/openllm", "start", "huggingfaceh4/zephyr-7b-beta", "--", "backend", "vllm", ">", "``", "`" ], [ "docker provide docker container help start running openllm : `` ` bash docker run -- rm -it -p 3000:3000 ghcr.io/bentoml/openllm start facebook/opt-1.3b -- backend pt `` ` > [ ! note ] > given access gpus setup [ nvidia-docker ] ( http : //github.com/nvidia/nvidia-container-toolkit ) , additionally pas ` -- gpus ` > use gpu faster inference optimization > `` ` bash > docker run -- rm -- gpus -p 3000:3000 -it ghcr.io/bentoml/openllm start huggingfaceh4/zephyr-7b-beta -- backend vllm > `` `" ] ], "level of complexity": 1 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "\ud83c\udfc3 Get started\n\nThe following provides instructions for how to get started with OpenLLM locally.\n", "sentence": [ [ "\ud83c\udfc3", "get", "started", "following", "provides", "instruction", "get", "started", "openllm", "locally", "." ], [ "\ud83c\udfc3 get started following provides instruction get started openllm locally ." ] ], "token": [ [ "\ud83c\udfc3", "get", "started", "following", "provides", "instruction", "get", "started", "openllm", "locally", "." ], [ "\ud83c\udfc3 get started following provides instruction get started openllm locally ." ] ], "level of complexity": -1 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Prerequisites\n\nYou have installed Python 3.8 (or later) and\u00a0`pip`. We highly recommend using a [Virtual Environment](https://docs.python.org/3/library/venv.html) to prevent package conflicts.\n\n", "sentence": [ [ "prerequisite", "installed", "python", "3.8", "(", "later", ")", "`", "pip", "`", ".", "highly", "recommend", "using", "[", "virtual", "environment", "]", "(", "http", ":", "//docs.python.org/3/library/venv.html", ")", "prevent", "package", "conflict", "." ], [ "prerequisite installed python 3.8 ( later ) ` pip ` .", "highly recommend using [ virtual environment ] ( http : //docs.python.org/3/library/venv.html ) prevent package conflict ." ] ], "token": [ [ "prerequisite", "installed", "python", "3.8", "(", "later", ")", "`", "pip", "`", ".", "highly", "recommend", "using", "[", "virtual", "environment", "]", "(", "http", ":", "//docs.python.org/3/library/venv.html", ")", "prevent", "package", "conflict", "." ], [ "prerequisite installed python 3.8 ( later ) ` pip ` .", "highly recommend using [ virtual environment ] ( http : //docs.python.org/3/library/venv.html ) prevent package conflict ." ] ], "level of complexity": -1 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Install OpenLLM\n\nInstall OpenLLM by using `pip` as follows:\n\n```bash\npip install openllm\n```\n\nTo verify the installation, run:\n\n```bash\n$ openllm -h\n\nUsage: openllm [OPTIONS] COMMAND [ARGS]...\n\n \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2557 \u2588\u2588\u2557\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2557\n \u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2551\n \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2554\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2554\u2588\u2588\u2588\u2588\u2554\u2588\u2588\u2551\n \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u255d \u2588\u2588\u2554\u2550\u2550\u255d \u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u255a\u2588\u2588\u2554\u255d\u2588\u2588\u2551\n \u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2550\u255d \u2588\u2588\u2551\n \u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d.\n\n An open platform for operating large language models in production.\n Fine-tune, serve, deploy, and monitor any LLMs with ease.\n\nOptions:\n -v, --version Show the version and exit.\n -h, --help Show this message and exit.\n\nCommands:\n build Package a given models into a BentoLLM.\n import Setup LLM interactively.\n models List all supported models.\n prune Remove all saved models, (and optionally bentos) built with OpenLLM locally.\n query Query a LLM interactively, from a terminal.\n start Start a LLMServer for any supported LLM.\n start-grpc Start a gRPC LLMServer for any supported LLM.\n\nExtensions:\n build-base-container Base image builder for BentoLLM.\n dive-bentos Dive into a BentoLLM.\n get-containerfile Return Containerfile of any given Bento.\n get-prompt Get the default prompt used by OpenLLM.\n list-bentos List available bentos built by OpenLLM.\n list-models This is equivalent to openllm models...\n playground OpenLLM Playground.\n```\n\n", "sentence": [ [ "install", "openllm", "install", "openllm", "using", "`", "pip", "`", "follows", ":", "``", "`", "bash", "pip", "install", "openllm", "``", "`", "verify", "installation", ",", "run", ":", "``", "`", "bash", "$", "openllm", "-h", "usage", ":", "openllm", "[", "option", "]", "command", "[", "args", "]", "...", "\u2588\u2588\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2557", "\u2588\u2588\u2557\u2588\u2588\u2557", "\u2588\u2588\u2557", "\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2557", "\u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2551\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2554\u2588\u2588\u2557", "\u2588\u2588\u2551\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2554\u2588\u2588\u2588\u2588\u2554\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u255d", "\u2588\u2588\u2554\u2550\u2550\u255d", "\u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2551\u255a\u2588\u2588\u2554\u255d\u2588\u2588\u2551", "\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551", "\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551", "\u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551", "\u255a\u2550\u255d", "\u2588\u2588\u2551", "\u255a\u2550\u2550\u2550\u2550\u2550\u255d", "\u255a\u2550\u255d", "\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d", "\u255a\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d", "\u255a\u2550\u255d", ".", "open", "platform", "operating", "large", "language", "model", "production", ".", "fine-tune", ",", "serve", ",", "deploy", ",", "monitor", "llm", "ease", ".", "option", ":", "-v", ",", "--", "version", "show", "version", "exit", ".", "-h", ",", "--", "help", "show", "message", "exit", ".", "command", ":", "build", "package", "given", "model", "bentollm", ".", "import", "setup", "llm", "interactively", ".", "model", "list", "supported", "model", ".", "prune", "remove", "saved", "model", ",", "(", "optionally", "bentos", ")", "built", "openllm", "locally", ".", "query", "query", "llm", "interactively", ",", "terminal", ".", "start", "start", "llmserver", "supported", "llm", ".", "start-grpc", "start", "grpc", "llmserver", "supported", "llm", ".", "extension", ":", "build-base-container", "base", "image", "builder", "bentollm", ".", "dive-bentos", "dive", "bentollm", ".", "get-containerfile", "return", "containerfile", "given", "bento", ".", "get-prompt", "get", "default", "prompt", "used", "openllm", ".", "list-bentos", "list", "available", "bentos", "built", "openllm", ".", "list-models", "equivalent", "openllm", "model", "...", "playground", "openllm", "playground", ".", "``", "`" ], [ "install openllm install openllm using ` pip ` follows : `` ` bash pip install openllm `` ` verify installation , run : `` ` bash $ openllm -h usage : openllm [ option ] command [ args ] ... \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2557 \u2588\u2588\u2557\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2554\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2554\u2588\u2588\u2588\u2588\u2554\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u255d \u2588\u2588\u2554\u2550\u2550\u255d \u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u255a\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2550\u255d \u2588\u2588\u2551 \u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d .", "open platform operating large language model production .", "fine-tune , serve , deploy , monitor llm ease .", "option : -v , -- version show version exit .", "-h , -- help show message exit .", "command : build package given model bentollm .", "import setup llm interactively .", "model list supported model .", "prune remove saved model , ( optionally bentos ) built openllm locally .", "query query llm interactively , terminal .", "start start llmserver supported llm .", "start-grpc start grpc llmserver supported llm .", "extension : build-base-container base image builder bentollm .", "dive-bentos dive bentollm .", "get-containerfile return containerfile given bento .", "get-prompt get default prompt used openllm .", "list-bentos list available bentos built openllm .", "list-models equivalent openllm model ... playground openllm playground .", "`` `" ] ], "token": [ [ "install", "openllm", "install", "openllm", "using", "`", "pip", "`", "follows", ":", "``", "`", "bash", "pip", "install", "openllm", "``", "`", "verify", "installation", ",", "run", ":", "``", "`", "bash", "$", "openllm", "-h", "usage", ":", "openllm", "[", "option", "]", "command", "[", "args", "]", "...", "\u2588\u2588\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2557", "\u2588\u2588\u2557\u2588\u2588\u2557", "\u2588\u2588\u2557", "\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2557", "\u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2551\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2588\u2588\u2588\u2557", "\u2588\u2588\u2554\u2588\u2588\u2557", "\u2588\u2588\u2551\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2554\u2588\u2588\u2588\u2588\u2554\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u255d", "\u2588\u2588\u2554\u2550\u2550\u255d", "\u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2551", "\u2588\u2588\u2551", "\u2588\u2588\u2551\u255a\u2588\u2588\u2554\u255d\u2588\u2588\u2551", "\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551", "\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551", "\u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551", "\u255a\u2550\u255d", "\u2588\u2588\u2551", "\u255a\u2550\u2550\u2550\u2550\u2550\u255d", "\u255a\u2550\u255d", "\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d", "\u255a\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d", "\u255a\u2550\u255d", ".", "open", "platform", "operating", "large", "language", "model", "production", ".", "fine-tune", ",", "serve", ",", "deploy", ",", "monitor", "llm", "ease", ".", "option", ":", "-v", ",", "--", "version", "show", "version", "exit", ".", "-h", ",", "--", "help", "show", "message", "exit", ".", "command", ":", "build", "package", "given", "model", "bentollm", ".", "import", "setup", "llm", "interactively", ".", "model", "list", "supported", "model", ".", "prune", "remove", "saved", "model", ",", "(", "optionally", "bentos", ")", "built", "openllm", "locally", ".", "query", "query", "llm", "interactively", ",", "terminal", ".", "start", "start", "llmserver", "supported", "llm", ".", "start-grpc", "start", "grpc", "llmserver", "supported", "llm", ".", "extension", ":", "build-base-container", "base", "image", "builder", "bentollm", ".", "dive-bentos", "dive", "bentollm", ".", "get-containerfile", "return", "containerfile", "given", "bento", ".", "get-prompt", "get", "default", "prompt", "used", "openllm", ".", "list-bentos", "list", "available", "bentos", "built", "openllm", ".", "list-models", "equivalent", "openllm", "model", "...", "playground", "openllm", "playground", ".", "``", "`" ], [ "install openllm install openllm using ` pip ` follows : `` ` bash pip install openllm `` ` verify installation , run : `` ` bash $ openllm -h usage : openllm [ option ] command [ args ] ... \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2557 \u2588\u2588\u2557\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2554\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2554\u2588\u2588\u2588\u2588\u2554\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u255d \u2588\u2588\u2554\u2550\u2550\u255d \u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u255a\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2550\u255d \u2588\u2588\u2551 \u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d .", "open platform operating large language model production .", "fine-tune , serve , deploy , monitor llm ease .", "option : -v , -- version show version exit .", "-h , -- help show message exit .", "command : build package given model bentollm .", "import setup llm interactively .", "model list supported model .", "prune remove saved model , ( optionally bentos ) built openllm locally .", "query query llm interactively , terminal .", "start start llmserver supported llm .", "start-grpc start grpc llmserver supported llm .", "extension : build-base-container base image builder bentollm .", "dive-bentos dive bentollm .", "get-containerfile return containerfile given bento .", "get-prompt get default prompt used openllm .", "list-bentos list available bentos built openllm .", "list-models equivalent openllm model ... playground openllm playground .", "`` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "\ud83e\udde9 Supported models\n\nOpenLLM currently supports the following models. By default, OpenLLM doesn't include dependencies to run all models. The extra model-specific dependencies can be installed with the instructions below.\n\n\n

\n\nBaichuan\n\n\n", "sentence": [ [ "\ud83e\udde9", "supported", "model", "openllm", "currently", "support", "following", "model", ".", "default", ",", "openllm", "n't", "include", "dependency", "run", "model", ".", "extra", "model-specific", "dependency", "installed", "instruction", ".", "<", "!", "--", "update-readme.py", ":", "start", "--", ">", "<", "detail", ">", "<", "summary", ">", "baichuan", "<", "/summary", ">" ], [ "\ud83e\udde9 supported model openllm currently support following model .", "default , openllm n't include dependency run model .", "extra model-specific dependency installed instruction .", "< ! -- update-readme.py : start -- > < detail > < summary > baichuan < /summary >" ] ], "token": [ [ "\ud83e\udde9", "supported", "model", "openllm", "currently", "support", "following", "model", ".", "default", ",", "openllm", "n't", "include", "dependency", "run", "model", ".", "extra", "model-specific", "dependency", "installed", "instruction", ".", "<", "!", "--", "update-readme.py", ":", "start", "--", ">", "<", "detail", ">", "<", "summary", ">", "baichuan", "<", "/summary", ">" ], [ "\ud83e\udde9 supported model openllm currently support following model .", "default , openllm n't include dependency run model .", "extra model-specific dependency installed instruction .", "< ! -- update-readme.py : start -- > < detail > < summary > baichuan < /summary >" ] ], "level of complexity": -1 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Quickstart\n\n\n\n> **Note:** Baichuan requires to install with:\n> ```bash\n> pip install \"openllm[baichuan]\"\n> ```\n\n\nRun the following command to quickly spin up a Baichuan server:\n\n```bash\nTRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b\n```\nIn a different terminal, run the following command to interact with the server:\n\n```bash\nexport OPENLLM_ENDPOINT=http://localhost:3000\nopenllm query 'What are large language models?'\n```\n\n\n> **Note:** Any Baichuan variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=baichuan) to see more Baichuan-compatible models.\n\n\n\n", "sentence": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "baichuan", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "baichuan", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "baichuan", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "baichuan", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=baichuan", ")", "see", "baichuan-compatible", "model", "." ], [ "quickstart > * * note : * * baichuan requires install : > `` ` bash > pip install `` openllm [ baichuan ] '' > `` ` run following command quickly spin baichuan server : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * baichuan variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=baichuan ) see baichuan-compatible model ." ] ], "token": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "baichuan", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "baichuan", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "baichuan", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "baichuan", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=baichuan", ")", "see", "baichuan-compatible", "model", "." ], [ "quickstart > * * note : * * baichuan requires install : > `` ` bash > pip install `` openllm [ baichuan ] '' > `` ` run following command quickly spin baichuan server : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * baichuan variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=baichuan ) see baichuan-compatible model ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nTRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nTRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend pt\n```\n\n
\n\n
\n\nChatGLM\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "chatglm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend pt `` ` < /details > < detail > < summary > chatglm < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "chatglm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend pt `` ` < /details > < detail > < summary > chatglm < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Quickstart\n\n\n\n> **Note:** ChatGLM requires to install with:\n> ```bash\n> pip install \"openllm[chatglm]\"\n> ```\n\n\nRun the following command to quickly spin up a ChatGLM server:\n\n```bash\nTRUST_REMOTE_CODE=True openllm start thudm/chatglm-6b\n```\nIn a different terminal, run the following command to interact with the server:\n\n```bash\nexport OPENLLM_ENDPOINT=http://localhost:3000\nopenllm query 'What are large language models?'\n```\n\n\n> **Note:** Any ChatGLM variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=chatglm) to see more ChatGLM-compatible models.\n\n\n\n", "sentence": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "chatglm", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "chatglm", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "chatglm", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "chatglm", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=chatglm", ")", "see", "chatglm-compatible", "model", "." ], [ "quickstart > * * note : * * chatglm requires install : > `` ` bash > pip install `` openllm [ chatglm ] '' > `` ` run following command quickly spin chatglm server : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * chatglm variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=chatglm ) see chatglm-compatible model ." ] ], "token": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "chatglm", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "chatglm", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "chatglm", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "chatglm", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=chatglm", ")", "see", "chatglm-compatible", "model", "." ], [ "quickstart > * * note : * * chatglm requires install : > `` ` bash > pip install `` openllm [ chatglm ] '' > `` ` run following command quickly spin chatglm server : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * chatglm variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=chatglm ) see chatglm-compatible model ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nTRUST_REMOTE_CODE=True openllm start thudm/chatglm-6b --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nTRUST_REMOTE_CODE=True openllm start thudm/chatglm-6b --backend pt\n```\n\n
\n\n
\n\nDollyV2\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "dollyv2", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend pt `` ` < /details > < detail > < summary > dollyv2 < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "dollyv2", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend pt `` ` < /details > < detail > < summary > dollyv2 < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start databricks/dolly-v2-3b --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start databricks/dolly-v2-3b --backend pt\n```\n\n
\n\n
\n\nFalcon\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "falcon", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start databricks/dolly-v2-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start databricks/dolly-v2-3b -- backend pt `` ` < /details > < detail > < summary > falcon < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "falcon", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start databricks/dolly-v2-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start databricks/dolly-v2-3b -- backend pt `` ` < /details > < detail > < summary > falcon < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Quickstart\n\n\n\n> **Note:** Falcon requires to install with:\n> ```bash\n> pip install \"openllm[falcon]\"\n> ```\n\n\nRun the following command to quickly spin up a Falcon server:\n\n```bash\nopenllm start tiiuae/falcon-7b\n```\nIn a different terminal, run the following command to interact with the server:\n\n```bash\nexport OPENLLM_ENDPOINT=http://localhost:3000\nopenllm query 'What are large language models?'\n```\n\n\n> **Note:** Any Falcon variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=falcon) to see more Falcon-compatible models.\n\n\n\n", "sentence": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "falcon", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "falcon", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "falcon", "server", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "falcon", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=falcon", ")", "see", "falcon-compatible", "model", "." ], [ "quickstart > * * note : * * falcon requires install : > `` ` bash > pip install `` openllm [ falcon ] '' > `` ` run following command quickly spin falcon server : `` ` bash openllm start tiiuae/falcon-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * falcon variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=falcon ) see falcon-compatible model ." ] ], "token": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "falcon", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "falcon", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "falcon", "server", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "falcon", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=falcon", ")", "see", "falcon-compatible", "model", "." ], [ "quickstart > * * note : * * falcon requires install : > `` ` bash > pip install `` openllm [ falcon ] '' > `` ` run following command quickly spin falcon server : `` ` bash openllm start tiiuae/falcon-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * falcon variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=falcon ) see falcon-compatible model ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start tiiuae/falcon-7b --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start tiiuae/falcon-7b --backend pt\n```\n\n
\n\n
\n\nFlanT5\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "flant5", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start tiiuae/falcon-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start tiiuae/falcon-7b -- backend pt `` ` < /details > < detail > < summary > flant5 < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "flant5", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start tiiuae/falcon-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start tiiuae/falcon-7b -- backend pt `` ` < /details > < detail > < summary > flant5 < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start eleutherai/gpt-neox-20b --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start eleutherai/gpt-neox-20b --backend pt\n```\n\n
\n\n
\n\nLlama\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "llama", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start eleutherai/gpt-neox-20b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start eleutherai/gpt-neox-20b -- backend pt `` ` < /details > < detail > < summary > llama < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "llama", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start eleutherai/gpt-neox-20b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start eleutherai/gpt-neox-20b -- backend pt `` ` < /details > < detail > < summary > llama < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start meta-llama/Llama-2-70b-chat-hf --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start meta-llama/Llama-2-70b-chat-hf --backend pt\n```\n\n
\n\n
\n\nMistral\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mistral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend pt `` ` < /details > < detail > < summary > mistral < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mistral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend pt `` ` < /details > < detail > < summary > mistral < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start HuggingFaceH4/zephyr-7b-alpha --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start HuggingFaceH4/zephyr-7b-alpha --backend pt\n```\n\n
\n\n
\n\nMixtral\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mixtral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend pt `` ` < /details > < detail > < summary > mixtral < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mixtral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend pt `` ` < /details > < detail > < summary > mixtral < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start mistralai/Mixtral-8x7B-Instruct-v0.1 --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start mistralai/Mixtral-8x7B-Instruct-v0.1 --backend pt\n```\n\n
\n\n
\n\nMPT\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mpt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend pt `` ` < /details > < detail > < summary > mpt < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mpt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend pt `` ` < /details > < detail > < summary > mpt < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Quickstart\n\n\n\n> **Note:** MPT requires to install with:\n> ```bash\n> pip install \"openllm[mpt]\"\n> ```\n\n\nRun the following command to quickly spin up a MPT server:\n\n```bash\nTRUST_REMOTE_CODE=True openllm start mosaicml/mpt-7b-instruct\n```\nIn a different terminal, run the following command to interact with the server:\n\n```bash\nexport OPENLLM_ENDPOINT=http://localhost:3000\nopenllm query 'What are large language models?'\n```\n\n\n> **Note:** Any MPT variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=mpt) to see more MPT-compatible models.\n\n\n\n", "sentence": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "mpt", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "mpt", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "mpt", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b-instruct", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "mpt", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=mpt", ")", "see", "mpt-compatible", "model", "." ], [ "quickstart > * * note : * * mpt requires install : > `` ` bash > pip install `` openllm [ mpt ] '' > `` ` run following command quickly spin mpt server : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b-instruct `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * mpt variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=mpt ) see mpt-compatible model ." ] ], "token": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "mpt", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "mpt", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "mpt", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b-instruct", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "mpt", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=mpt", ")", "see", "mpt-compatible", "model", "." ], [ "quickstart > * * note : * * mpt requires install : > `` ` bash > pip install `` openllm [ mpt ] '' > `` ` run following command quickly spin mpt server : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b-instruct `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * mpt variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=mpt ) see mpt-compatible model ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nTRUST_REMOTE_CODE=True openllm start mosaicml/mpt-7b --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nTRUST_REMOTE_CODE=True openllm start mosaicml/mpt-7b --backend pt\n```\n\n
\n\n
\n\nOPT\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "opt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend pt `` ` < /details > < detail > < summary > opt < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "opt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend pt `` ` < /details > < detail > < summary > opt < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start facebook/opt-125m --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start facebook/opt-125m --backend pt\n```\n\n
\n\n
\n\nPhi\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "phi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start facebook/opt-125m -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start facebook/opt-125m -- backend pt `` ` < /details > < detail > < summary > phi < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "phi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start facebook/opt-125m -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start facebook/opt-125m -- backend pt `` ` < /details > < detail > < summary > phi < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nTRUST_REMOTE_CODE=True openllm start microsoft/phi-1_5 --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nTRUST_REMOTE_CODE=True openllm start microsoft/phi-1_5 --backend pt\n```\n\n
\n\n
\n\nQwen\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "qwen", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend pt `` ` < /details > < detail > < summary > qwen < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "qwen", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend pt `` ` < /details > < detail > < summary > qwen < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Quickstart\n\n\n\n> **Note:** Qwen requires to install with:\n> ```bash\n> pip install \"openllm[qwen]\"\n> ```\n\n\nRun the following command to quickly spin up a Qwen server:\n\n```bash\nTRUST_REMOTE_CODE=True openllm start qwen/Qwen-7B-Chat\n```\nIn a different terminal, run the following command to interact with the server:\n\n```bash\nexport OPENLLM_ENDPOINT=http://localhost:3000\nopenllm query 'What are large language models?'\n```\n\n\n> **Note:** Any Qwen variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=qwen) to see more Qwen-compatible models.\n\n\n\n", "sentence": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "qwen", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "qwen", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "qwen", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "qwen", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=qwen", ")", "see", "qwen-compatible", "model", "." ], [ "quickstart > * * note : * * qwen requires install : > `` ` bash > pip install `` openllm [ qwen ] '' > `` ` run following command quickly spin qwen server : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * qwen variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=qwen ) see qwen-compatible model ." ] ], "token": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "qwen", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "qwen", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "qwen", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "qwen", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=qwen", ")", "see", "qwen-compatible", "model", "." ], [ "quickstart > * * note : * * qwen requires install : > `` ` bash > pip install `` openllm [ qwen ] '' > `` ` run following command quickly spin qwen server : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * qwen variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=qwen ) see qwen-compatible model ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nTRUST_REMOTE_CODE=True openllm start qwen/Qwen-7B-Chat --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nTRUST_REMOTE_CODE=True openllm start qwen/Qwen-7B-Chat --backend pt\n```\n\n
\n\n
\n\nStableLM\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "stablelm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend pt `` ` < /details > < detail > < summary > stablelm < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "stablelm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend pt `` ` < /details > < detail > < summary > stablelm < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start stabilityai/stablelm-tuned-alpha-3b --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start stabilityai/stablelm-tuned-alpha-3b --backend pt\n```\n\n
\n\n
\n\nStarCoder\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "starcoder", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend pt `` ` < /details > < detail > < summary > starcoder < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "starcoder", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend pt `` ` < /details > < detail > < summary > starcoder < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Quickstart\n\n\n\n> **Note:** StarCoder requires to install with:\n> ```bash\n> pip install \"openllm[starcoder]\"\n> ```\n\n\nRun the following command to quickly spin up a StarCoder server:\n\n```bash\nopenllm start bigcode/starcoder\n```\nIn a different terminal, run the following command to interact with the server:\n\n```bash\nexport OPENLLM_ENDPOINT=http://localhost:3000\nopenllm query 'What are large language models?'\n```\n\n\n> **Note:** Any StarCoder variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=starcoder) to see more StarCoder-compatible models.\n\n\n\n", "sentence": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "starcoder", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "starcoder", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "starcoder", "server", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "starcoder", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=starcoder", ")", "see", "starcoder-compatible", "model", "." ], [ "quickstart > * * note : * * starcoder requires install : > `` ` bash > pip install `` openllm [ starcoder ] '' > `` ` run following command quickly spin starcoder server : `` ` bash openllm start bigcode/starcoder `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * starcoder variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=starcoder ) see starcoder-compatible model ." ] ], "token": [ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "starcoder", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "starcoder", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "starcoder", "server", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "starcoder", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=starcoder", ")", "see", "starcoder-compatible", "model", "." ], [ "quickstart > * * note : * * starcoder requires install : > `` ` bash > pip install `` openllm [ starcoder ] '' > `` ` run following command quickly spin starcoder server : `` ` bash openllm start bigcode/starcoder `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * starcoder variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=starcoder ) see starcoder-compatible model ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nopenllm start bigcode/starcoder --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nopenllm start bigcode/starcoder --backend pt\n```\n\n
\n\n
\n\nYi\n\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "yi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start bigcode/starcoder -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start bigcode/starcoder -- backend pt `` ` < /details > < detail > < summary > yi < /summary >" ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "yi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start bigcode/starcoder -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start bigcode/starcoder -- backend pt `` ` < /details > < detail > < summary > yi < /summary >" ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Supported backends\n\nOpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch.\n\n\n\n> **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`.\n\n\n\n- vLLM (Recommended):\n\n\nTo install vLLM, run `pip install \"openllm[vllm]\"`\n\n```bash\nTRUST_REMOTE_CODE=True openllm start 01-ai/Yi-6B --backend vllm\n```\n\n\n> **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving.\n\n\n\n> **Note:** Currently, adapters are yet to be supported with vLLM.\n\n\n- PyTorch:\n\n\n```bash\nTRUST_REMOTE_CODE=True openllm start 01-ai/Yi-6B --backend pt\n```\n\n
\n\n\n\nMore models will be integrated with OpenLLM and we welcome your contributions if you want to incorporate your custom LLMs into the ecosystem. Check out [Adding a New Model Guide](https://github.com/bentoml/OpenLLM/blob/main/ADDING_NEW_MODEL.md) to learn more.\n\n", "sentence": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "!", "--", "update-readme.py", ":", "stop", "--", ">", "model", "integrated", "openllm", "welcome", "contribution", "want", "incorporate", "custom", "llm", "ecosystem", ".", "check", "[", "adding", "new", "model", "guide", "]", "(", "http", ":", "//github.com/bentoml/openllm/blob/main/adding_new_model.md", ")", "learn", "." ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend pt `` ` < /details > < ! -- update-readme.py : stop -- > model integrated openllm welcome contribution want incorporate custom llm ecosystem .", "check [ adding new model guide ] ( http : //github.com/bentoml/openllm/blob/main/adding_new_model.md ) learn ." ] ], "token": [ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "!", "--", "update-readme.py", ":", "stop", "--", ">", "model", "integrated", "openllm", "welcome", "contribution", "want", "incorporate", "custom", "llm", "ecosystem", ".", "check", "[", "adding", "new", "model", "guide", "]", "(", "http", ":", "//github.com/bentoml/openllm/blob/main/adding_new_model.md", ")", "learn", "." ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend pt `` ` < /details > < ! -- update-readme.py : stop -- > model integrated openllm welcome contribution want incorporate custom llm ecosystem .", "check [ adding new model guide ] ( http : //github.com/bentoml/openllm/blob/main/adding_new_model.md ) learn ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "PyTorch backend\n\nWith PyTorch backend, OpenLLM supports `int8`, `int4`, and `gptq`.\n\nFor using int8 and int4 quantization through `bitsandbytes`, you can use the following command:\n\n```bash\nTRUST_REMOTE_CODE=True openllm start microsoft/phi-2 --quantize int8\n```\n\nTo run inference with\u00a0`gptq`, simply pass\u00a0`--quantize gptq`:\n\n```bash\nopenllm start TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq\n```\n\n> [!NOTE]\n> In order to run GPTQ, make sure you run\u00a0`pip install \"openllm[gptq]\"`\n> first to install the dependency. From the GPTQ paper, it is recommended to quantized the weights before serving.\n> See\u00a0[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)\u00a0for more information on GPTQ quantization.\n\n", "sentence": [ [ "pytorch", "backend", "pytorch", "backend", ",", "openllm", "support", "`", "int8", "`", ",", "`", "int4", "`", ",", "`", "gptq", "`", ".", "using", "int8", "int4", "quantization", "`", "bitsandbytes", "`", ",", "use", "following", "command", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-2", "--", "quantize", "int8", "``", "`", "run", "inference", "`", "gptq", "`", ",", "simply", "pas", "`", "--", "quantize", "gptq", "`", ":", "``", "`", "bash", "openllm", "start", "thebloke/llama-2-7b-chat-gptq", "--", "quantize", "gptq", "``", "`", ">", "[", "!", "note", "]", ">", "order", "run", "gptq", ",", "make", "sure", "run", "`", "pip", "install", "``", "openllm", "[", "gptq", "]", "''", "`", ">", "first", "install", "dependency", ".", "gptq", "paper", ",", "recommended", "quantized", "weight", "serving", ".", ">", "see", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "information", "gptq", "quantization", "." ], [ "pytorch backend pytorch backend , openllm support ` int8 ` , ` int4 ` , ` gptq ` .", "using int8 int4 quantization ` bitsandbytes ` , use following command : `` ` bash trust_remote_code=true openllm start microsoft/phi-2 -- quantize int8 `` ` run inference ` gptq ` , simply pas ` -- quantize gptq ` : `` ` bash openllm start thebloke/llama-2-7b-chat-gptq -- quantize gptq `` ` > [ ! note ] > order run gptq , make sure run ` pip install `` openllm [ gptq ] '' ` > first install dependency .", "gptq paper , recommended quantized weight serving .", "> see [ autogptq ] ( http : //github.com/panqiwei/autogptq ) information gptq quantization ." ] ], "token": [ [ "pytorch", "backend", "pytorch", "backend", ",", "openllm", "support", "`", "int8", "`", ",", "`", "int4", "`", ",", "`", "gptq", "`", ".", "using", "int8", "int4", "quantization", "`", "bitsandbytes", "`", ",", "use", "following", "command", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-2", "--", "quantize", "int8", "``", "`", "run", "inference", "`", "gptq", "`", ",", "simply", "pas", "`", "--", "quantize", "gptq", "`", ":", "``", "`", "bash", "openllm", "start", "thebloke/llama-2-7b-chat-gptq", "--", "quantize", "gptq", "``", "`", ">", "[", "!", "note", "]", ">", "order", "run", "gptq", ",", "make", "sure", "run", "`", "pip", "install", "``", "openllm", "[", "gptq", "]", "''", "`", ">", "first", "install", "dependency", ".", "gptq", "paper", ",", "recommended", "quantized", "weight", "serving", ".", ">", "see", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "information", "gptq", "quantization", "." ], [ "pytorch backend pytorch backend , openllm support ` int8 ` , ` int4 ` , ` gptq ` .", "using int8 int4 quantization ` bitsandbytes ` , use following command : `` ` bash trust_remote_code=true openllm start microsoft/phi-2 -- quantize int8 `` ` run inference ` gptq ` , simply pas ` -- quantize gptq ` : `` ` bash openllm start thebloke/llama-2-7b-chat-gptq -- quantize gptq `` ` > [ ! note ] > order run gptq , make sure run ` pip install `` openllm [ gptq ] '' ` > first install dependency .", "gptq paper , recommended quantized weight serving .", "> see [ autogptq ] ( http : //github.com/panqiwei/autogptq ) information gptq quantization ." ] ], "level of complexity": 0 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "\ud83d\udc0d Python SDK\n\nEach LLM can be instantiated with `openllm.LLM`:\n\n```python\nimport openllm\n\nllm = openllm.LLM('microsoft/phi-2')\n```\n\nThe main inference API is the streaming `generate_iterator` method:\n\n```python\nasync for generation in llm.generate_iterator('What is the meaning of life?'):\n print(generation.outputs[0].text)\n```\n\n> [!NOTE]\n> The motivation behind making `llm.generate_iterator` an async generator is to provide support for Continuous batching with vLLM backend. By having the async endpoints, each prompt\n> will be added correctly to the request queue to process with vLLM backend.\n\nThere is also a _one-shot_ `generate` method:\n\n```python\nawait llm.generate('What is the meaning of life?')\n```\n\nThis method is easy to use for one-shot generation use case, but merely served as an example how to use `llm.generate_iterator` as it uses `generate_iterator` under the hood.\n\n> [!IMPORTANT]\n> If you need to call your code in a synchronous context, you can use `asyncio.run` that wraps an async function:\n>\n> ```python\n> import asyncio\n> async def generate(prompt, **attrs): return await llm.generate(prompt, **attrs)\n> asyncio.run(generate(\"The meaning of life is\", temperature=0.23))\n> ```\n\n", "sentence": [ [ "\ud83d\udc0d", "python", "sdk", "llm", "instantiated", "`", "openllm.llm", "`", ":", "``", "`", "python", "import", "openllm", "llm", "=", "openllm.llm", "(", "'microsoft/phi-2", "'", ")", "``", "`", "main", "inference", "api", "streaming", "`", "generate_iterator", "`", "method", ":", "``", "`", "python", "async", "generation", "llm.generate_iterator", "(", "'what", "meaning", "life", "?", "'", ")", ":", "print", "(", "generation.outputs", "[", "0", "]", ".text", ")", "``", "`", ">", "[", "!", "note", "]", ">", "motivation", "behind", "making", "`", "llm.generate_iterator", "`", "async", "generator", "provide", "support", "continuous", "batching", "vllm", "backend", ".", "async", "endpoint", ",", "prompt", ">", "added", "correctly", "request", "queue", "process", "vllm", "backend", ".", "also", "_one-shot_", "`", "generate", "`", "method", ":", "``", "`", "python", "await", "llm.generate", "(", "'what", "meaning", "life", "?", "'", ")", "``", "`", "method", "easy", "use", "one-shot", "generation", "use", "case", ",", "merely", "served", "example", "use", "`", "llm.generate_iterator", "`", "us", "`", "generate_iterator", "`", "hood", ".", ">", "[", "!", "important", "]", ">", "need", "call", "code", "synchronous", "context", ",", "use", "`", "asyncio.run", "`", "wrap", "async", "function", ":", ">", ">", "``", "`", "python", ">", "import", "asyncio", ">", "async", "def", "generate", "(", "prompt", ",", "*", "*", "attrs", ")", ":", "return", "await", "llm.generate", "(", "prompt", ",", "*", "*", "attrs", ")", ">", "asyncio.run", "(", "generate", "(", "``", "meaning", "life", "''", ",", "temperature=0.23", ")", ")", ">", "``", "`" ], [ "\ud83d\udc0d python sdk llm instantiated ` openllm.llm ` : `` ` python import openllm llm = openllm.llm ( 'microsoft/phi-2 ' ) `` ` main inference api streaming ` generate_iterator ` method : `` ` python async generation llm.generate_iterator ( 'what meaning life ?", "' ) : print ( generation.outputs [ 0 ] .text ) `` ` > [ ! note ] > motivation behind making ` llm.generate_iterator ` async generator provide support continuous batching vllm backend .", "async endpoint , prompt > added correctly request queue process vllm backend .", "also _one-shot_ ` generate ` method : `` ` python await llm.generate ( 'what meaning life ? ' )", "`` ` method easy use one-shot generation use case , merely served example use ` llm.generate_iterator ` us ` generate_iterator ` hood .", "> [ ! important ] > need call code synchronous context , use ` asyncio.run ` wrap async function : > > `` ` python > import asyncio > async def generate ( prompt , * * attrs ) : return await llm.generate ( prompt , * * attrs ) > asyncio.run ( generate ( `` meaning life '' , temperature=0.23 ) ) > `` `" ] ], "token": [ [ "\ud83d\udc0d", "python", "sdk", "llm", "instantiated", "`", "openllm.llm", "`", ":", "``", "`", "python", "import", "openllm", "llm", "=", "openllm.llm", "(", "'microsoft/phi-2", "'", ")", "``", "`", "main", "inference", "api", "streaming", "`", "generate_iterator", "`", "method", ":", "``", "`", "python", "async", "generation", "llm.generate_iterator", "(", "'what", "meaning", "life", "?", "'", ")", ":", "print", "(", "generation.outputs", "[", "0", "]", ".text", ")", "``", "`", ">", "[", "!", "note", "]", ">", "motivation", "behind", "making", "`", "llm.generate_iterator", "`", "async", "generator", "provide", "support", "continuous", "batching", "vllm", "backend", ".", "async", "endpoint", ",", "prompt", ">", "added", "correctly", "request", "queue", "process", "vllm", "backend", ".", "also", "_one-shot_", "`", "generate", "`", "method", ":", "``", "`", "python", "await", "llm.generate", "(", "'what", "meaning", "life", "?", "'", ")", "``", "`", "method", "easy", "use", "one-shot", "generation", "use", "case", ",", "merely", "served", "example", "use", "`", "llm.generate_iterator", "`", "us", "`", "generate_iterator", "`", "hood", ".", ">", "[", "!", "important", "]", ">", "need", "call", "code", "synchronous", "context", ",", "use", "`", "asyncio.run", "`", "wrap", "async", "function", ":", ">", ">", "``", "`", "python", ">", "import", "asyncio", ">", "async", "def", "generate", "(", "prompt", ",", "*", "*", "attrs", ")", ":", "return", "await", "llm.generate", "(", "prompt", ",", "*", "*", "attrs", ")", ">", "asyncio.run", "(", "generate", "(", "``", "meaning", "life", "''", ",", "temperature=0.23", ")", ")", ">", "``", "`" ], [ "\ud83d\udc0d python sdk llm instantiated ` openllm.llm ` : `` ` python import openllm llm = openllm.llm ( 'microsoft/phi-2 ' ) `` ` main inference api streaming ` generate_iterator ` method : `` ` python async generation llm.generate_iterator ( 'what meaning life ?", "' ) : print ( generation.outputs [ 0 ] .text ) `` ` > [ ! note ] > motivation behind making ` llm.generate_iterator ` async generator provide support continuous batching vllm backend .", "async endpoint , prompt > added correctly request queue process vllm backend .", "also _one-shot_ ` generate ` method : `` ` python await llm.generate ( 'what meaning life ? ' )", "`` ` method easy use one-shot generation use case , merely served example use ` llm.generate_iterator ` us ` generate_iterator ` hood .", "> [ ! important ] > need call code synchronous context , use ` asyncio.run ` wrap async function : > > `` ` python > import asyncio > async def generate ( prompt , * * attrs ) : return await llm.generate ( prompt , * * attrs ) > asyncio.run ( generate ( `` meaning life '' , temperature=0.23 ) ) > `` `" ] ], "level of complexity": -1 }, { "url": "https://github.com/bentoml/OpenLLM", "readme_url": "https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md", "topic": [ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ], "text": "Transformers Agents\n\nOpenLLM seamlessly integrates with\n[Transformers Agents](https://huggingface.co/docs/transformers/transformers_agents).\n\n> [!WARNING]\n> The Transformers Agent is still at an experimental stage. It is\n> recommended to install OpenLLM with `pip install -r nightly-requirements.txt`\n> to get the latest API update for HuggingFace agent.\n\n```python\nimport transformers\n\nagent = transformers.HfAgent('http://localhost:3000/hf/agent') ", "sentence": [ [ "transformer", "agent", "openllm", "seamlessly", "integrates", "[", "transformer", "agent", "]", "(", "http", ":", "//huggingface.co/docs/transformers/transformers_agents", ")", ".", ">", "[", "!", "warning", "]", ">", "transformer", "agent", "still", "experimental", "stage", ".", ">", "recommended", "install", "openllm", "`", "pip", "install", "-r", "nightly-requirements.txt", "`", ">", "get", "latest", "api", "update", "huggingface", "agent", ".", "``", "`", "python", "import", "transformer", "agent", "=", "transformers.hfagent", "(", "'http", ":", "//localhost:3000/hf/agent", "'", ")" ], [ "transformer agent openllm seamlessly integrates [ transformer agent ] ( http : //huggingface.co/docs/transformers/transformers_agents ) .", "> [ ! warning ] > transformer agent still experimental stage .", "> recommended install openllm ` pip install -r nightly-requirements.txt ` > get latest api update huggingface agent .", "`` ` python import transformer agent = transformers.hfagent ( 'http : //localhost:3000/hf/agent ' )" ] ], "token": [ [ "transformer", "agent", "openllm", "seamlessly", "integrates", "[", "transformer", "agent", "]", "(", "http", ":", "//huggingface.co/docs/transformers/transformers_agents", ")", ".", ">", "[", "!", "warning", "]", ">", "transformer", "agent", "still", "experimental", "stage", ".", ">", "recommended", "install", "openllm", "`", "pip", "install", "-r", "nightly-requirements.txt", "`", ">", "get", "latest", "api", "update", "huggingface", "agent", ".", "``", "`", "python", "import", "transformer", "agent", "=", "transformers.hfagent", "(", "'http", ":", "//localhost:3000/hf/agent", "'", ")" ], [ "transformer agent openllm seamlessly integrates [ transformer agent ] ( http : //huggingface.co/docs/transformers/transformers_agents ) .", "> [ ! warning ] > transformer agent still experimental stage .", "> recommended install openllm ` pip install -r nightly-requirements.txt ` > get latest api update huggingface agent .", "`` ` python import transformer agent = transformers.hfagent ( 'http : //localhost:3000/hf/agent ' )" ] ], "level of complexity": 0 }, { "url": "https://github.com/cpacker/MemGPT", "readme_url": "https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md", "topic": [ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ], "text": "Quick setup\r\nJoin Discord and message the MemGPT bot (in the `#memgpt` channel). Then run the following commands (messaged to \"MemGPT Bot\"):\r\n* `/profile` (to create your profile)\r\n* `/key` (to enter your OpenAI key)\r\n* `/create` (to create a MemGPT chatbot)\r\n\r\nMake sure your privacy settings on this server are open so that MemGPT Bot can DM you: \\\r\nMemGPT \u2192 Privacy Settings \u2192 Direct Messages set to ON\r\n
\r\n \"set\r\n
\r\n\r\nYou can see the full list of available commands when you enter `/` into the message box.\r\n
\r\n \"MemGPT\r\n
\r\n\r\n", "sentence": [ [ "quick", "setup", "join", "<", "href=", "''", "http", ":", "//discord.gg/9geqrxmvye", "''", ">", "discord", "<", "/a", ">", "<", "/strong", ">", "message", "memgpt", "bot", "(", "`", "#", "memgpt", "`", "channel", ")", ".", "run", "following", "command", "(", "messaged", "``", "memgpt", "bot", "''", ")", ":", "*", "`", "/profile", "`", "(", "create", "profile", ")", "*", "`", "/key", "`", "(", "enter", "openai", "key", ")", "*", "`", "/create", "`", "(", "create", "memgpt", "chatbot", ")", "make", "sure", "privacy", "setting", "server", "open", "memgpt", "bot", "dm", ":", "\\", "memgpt", "\u2192", "privacy", "setting", "\u2192", "direct", "message", "set", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/dm_settings.png", "''", "alt=", "''", "set", "dm", "setting", "memgpt", "server", "open", "memgpt", "memgpt", "bot", "message", "''", "width=", "''", "400", "''", ">", "<", "/div", ">", "see", "full", "list", "available", "command", "enter", "`", "/", "`", "message", "box", ".", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/slash_commands.png", "''", "alt=", "''", "memgpt", "bot", "slash", "command", "''", "width=", "''", "400", "''", ">", "<", "/div", ">" ], [ "quick setup join < href= '' http : //discord.gg/9geqrxmvye '' > discord < /a > < /strong > message memgpt bot ( ` # memgpt ` channel ) .", "run following command ( messaged `` memgpt bot '' ) : * ` /profile ` ( create profile ) * ` /key ` ( enter openai key ) * ` /create ` ( create memgpt chatbot ) make sure privacy setting server open memgpt bot dm : \\ memgpt \u2192 privacy setting \u2192 direct message set < div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/dm_settings.png '' alt= '' set dm setting memgpt server open memgpt memgpt bot message '' width= '' 400 '' > < /div > see full list available command enter ` / ` message box .", "< div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/slash_commands.png '' alt= '' memgpt bot slash command '' width= '' 400 '' > < /div >" ] ], "token": [ [ "quick", "setup", "join", "<", "href=", "''", "http", ":", "//discord.gg/9geqrxmvye", "''", ">", "discord", "<", "/a", ">", "<", "/strong", ">", "message", "memgpt", "bot", "(", "`", "#", "memgpt", "`", "channel", ")", ".", "run", "following", "command", "(", "messaged", "``", "memgpt", "bot", "''", ")", ":", "*", "`", "/profile", "`", "(", "create", "profile", ")", "*", "`", "/key", "`", "(", "enter", "openai", "key", ")", "*", "`", "/create", "`", "(", "create", "memgpt", "chatbot", ")", "make", "sure", "privacy", "setting", "server", "open", "memgpt", "bot", "dm", ":", "\\", "memgpt", "\u2192", "privacy", "setting", "\u2192", "direct", "message", "set", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/dm_settings.png", "''", "alt=", "''", "set", "dm", "setting", "memgpt", "server", "open", "memgpt", "memgpt", "bot", "message", "''", "width=", "''", "400", "''", ">", "<", "/div", ">", "see", "full", "list", "available", "command", "enter", "`", "/", "`", "message", "box", ".", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/slash_commands.png", "''", "alt=", "''", "memgpt", "bot", "slash", "command", "''", "width=", "''", "400", "''", ">", "<", "/div", ">" ], [ "quick setup join < href= '' http : //discord.gg/9geqrxmvye '' > discord < /a > < /strong > message memgpt bot ( ` # memgpt ` channel ) .", "run following command ( messaged `` memgpt bot '' ) : * ` /profile ` ( create profile ) * ` /key ` ( enter openai key ) * ` /create ` ( create memgpt chatbot ) make sure privacy setting server open memgpt bot dm : \\ memgpt \u2192 privacy setting \u2192 direct message set < div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/dm_settings.png '' alt= '' set dm setting memgpt server open memgpt memgpt bot message '' width= '' 400 '' > < /div > see full list available command enter ` / ` message box .", "< div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/slash_commands.png '' alt= '' memgpt bot slash command '' width= '' 400 '' > < /div >" ] ], "level of complexity": -1 }, { "url": "https://github.com/cpacker/MemGPT", "readme_url": "https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md", "topic": [ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ], "text": "Running MemGPT locally\r\nInstall MemGPT:\r\n```sh\r\npip install -U pymemgpt\r\n```\r\n\r\nNow, you can run MemGPT and start chatting with a MemGPT agent with:\r\n```sh\r\nmemgpt run\r\n```\r\n\r\nIf you're running MemGPT for the first time, you'll see two quickstart options:\r\n\r\n1. **OpenAI**: select this if you'd like to run MemGPT with OpenAI models like GPT-4 (requires an OpenAI API key)\r\n2. **MemGPT Free Endpoint**: select this if you'd like to try MemGPT on a top open LLM for free (currently variants of Mixtral 8x7b!)\r\n\r\nNeither of these options require you to have an LLM running on your own machine. If you'd like to run MemGPT with your custom LLM setup (or on OpenAI Azure), select **Other** to proceed to the advanced setup.\r\n\r\n", "sentence": [ [ "running", "memgpt", "locally", "install", "memgpt", ":", "``", "`", "sh", "pip", "install", "-u", "pymemgpt", "``", "`", ",", "run", "memgpt", "start", "chatting", "memgpt", "agent", ":", "``", "`", "sh", "memgpt", "run", "``", "`", "'re", "running", "memgpt", "first", "time", ",", "'ll", "see", "two", "quickstart", "option", ":", "1", ".", "*", "*", "openai", "*", "*", ":", "select", "'d", "like", "run", "memgpt", "openai", "model", "like", "gpt-4", "(", "requires", "openai", "api", "key", ")", "2", ".", "*", "*", "memgpt", "free", "endpoint", "*", "*", ":", "select", "'d", "like", "try", "memgpt", "top", "open", "llm", "free", "(", "currently", "variant", "mixtral", "8x7b", "!", ")", "neither", "option", "require", "llm", "running", "machine", ".", "'d", "like", "run", "memgpt", "custom", "llm", "setup", "(", "openai", "azure", ")", ",", "select", "*", "*", "*", "*", "proceed", "advanced", "setup", "." ], [ "running memgpt locally install memgpt : `` ` sh pip install -u pymemgpt `` ` , run memgpt start chatting memgpt agent : `` ` sh memgpt run `` ` 're running memgpt first time , 'll see two quickstart option : 1 .", "* * openai * * : select 'd like run memgpt openai model like gpt-4 ( requires openai api key ) 2 .", "* * memgpt free endpoint * * : select 'd like try memgpt top open llm free ( currently variant mixtral 8x7b ! )", "neither option require llm running machine .", "'d like run memgpt custom llm setup ( openai azure ) , select * * * * proceed advanced setup ." ] ], "token": [ [ "running", "memgpt", "locally", "install", "memgpt", ":", "``", "`", "sh", "pip", "install", "-u", "pymemgpt", "``", "`", ",", "run", "memgpt", "start", "chatting", "memgpt", "agent", ":", "``", "`", "sh", "memgpt", "run", "``", "`", "'re", "running", "memgpt", "first", "time", ",", "'ll", "see", "two", "quickstart", "option", ":", "1", ".", "*", "*", "openai", "*", "*", ":", "select", "'d", "like", "run", "memgpt", "openai", "model", "like", "gpt-4", "(", "requires", "openai", "api", "key", ")", "2", ".", "*", "*", "memgpt", "free", "endpoint", "*", "*", ":", "select", "'d", "like", "try", "memgpt", "top", "open", "llm", "free", "(", "currently", "variant", "mixtral", "8x7b", "!", ")", "neither", "option", "require", "llm", "running", "machine", ".", "'d", "like", "run", "memgpt", "custom", "llm", "setup", "(", "openai", "azure", ")", ",", "select", "*", "*", "*", "*", "proceed", "advanced", "setup", "." ], [ "running memgpt locally install memgpt : `` ` sh pip install -u pymemgpt `` ` , run memgpt start chatting memgpt agent : `` ` sh memgpt run `` ` 're running memgpt first time , 'll see two quickstart option : 1 .", "* * openai * * : select 'd like run memgpt openai model like gpt-4 ( requires openai api key ) 2 .", "* * memgpt free endpoint * * : select 'd like try memgpt top open llm free ( currently variant mixtral 8x7b ! )", "neither option require llm running machine .", "'d like run memgpt custom llm setup ( openai azure ) , select * * * * proceed advanced setup ." ] ], "level of complexity": 0 }, { "url": "https://github.com/cpacker/MemGPT", "readme_url": "https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md", "topic": [ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ], "text": "Advanced setup\r\nYou can reconfigure MemGPT's default settings by running:\r\n```sh\r\nmemgpt configure\r\n```\r\n\r\n", "sentence": [ [ "advanced", "setup", "reconfigure", "memgpt", "'s", "default", "setting", "running", ":", "``", "`", "sh", "memgpt", "configure", "``", "`" ], [ "advanced setup reconfigure memgpt 's default setting running : `` ` sh memgpt configure `` `" ] ], "token": [ [ "advanced", "setup", "reconfigure", "memgpt", "'s", "default", "setting", "running", ":", "``", "`", "sh", "memgpt", "configure", "``", "`" ], [ "advanced setup reconfigure memgpt 's default setting running : `` ` sh memgpt configure `` `" ] ], "level of complexity": -1 }, { "url": "https://github.com/cpacker/MemGPT", "readme_url": "https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md", "topic": [ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ], "text": "Installing from source\r\nTo install MemGPT from source, start by cloning the repo:\r\n```sh\r\ngit clone git@github.com:cpacker/MemGPT.git\r\n```\r\n\r\nThen navigate to the main `MemGPT` directory, and do:\r\n```sh\r\npip install -e .\r\n```\r\n\r\nNow, you should be able to run `memgpt` from the command-line using the downloaded source code.\r\n\r\nIf you are having dependency issues using `pip install -e .`, we recommend you install the package using Poetry (see below). Installing MemGPT from source using Poetry will ensure that you are using exact package versions that have been tested for the production build.\r\n\r\n
\r\n \r\n Installing from source (using Poetry)\r\n \r\n\r\nFirst, install Poetry using [the official instructions here](https://python-poetry.org/docs/#installing-with-the-official-installer).\r\n\r\nThen, you can install MemGPT from source with:\r\n```sh\r\ngit clone git@github.com:cpacker/MemGPT.git\r\npoetry shell\r\npoetry install\r\n```\r\n
\r\n\r\n", "sentence": [ [ "installing", "source", "install", "memgpt", "source", ",", "start", "cloning", "repo", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "``", "`", "navigate", "main", "`", "memgpt", "`", "directory", ",", ":", "``", "`", "sh", "pip", "install", "-e", ".", "``", "`", ",", "able", "run", "`", "memgpt", "`", "command-line", "using", "downloaded", "source", "code", ".", "dependency", "issue", "using", "`", "pip", "install", "-e", ".", "`", ",", "recommend", "install", "package", "using", "poetry", "(", "see", ")", ".", "installing", "memgpt", "source", "using", "poetry", "ensure", "using", "exact", "package", "version", "tested", "production", "build", ".", "<", "detail", ">", "<", "summary", ">", "<", "strong", ">", "installing", "source", "(", "using", "poetry", ")", "<", "/strong", ">", "<", "/summary", ">", "first", ",", "install", "poetry", "using", "[", "official", "instruction", "]", "(", "http", ":", "//python-poetry.org/docs/", "#", "installing-with-the-official-installer", ")", ".", ",", "install", "memgpt", "source", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "poetry", "shell", "poetry", "install", "``", "`", "<", "/details", ">" ], [ "installing source install memgpt source , start cloning repo : `` ` sh git clone git @ github.com : cpacker/memgpt.git `` ` navigate main ` memgpt ` directory , : `` ` sh pip install -e .", "`` ` , able run ` memgpt ` command-line using downloaded source code .", "dependency issue using ` pip install -e . ` , recommend install package using poetry ( see ) .", "installing memgpt source using poetry ensure using exact package version tested production build .", "< detail > < summary > < strong > installing source ( using poetry ) < /strong > < /summary > first , install poetry using [ official instruction ] ( http : //python-poetry.org/docs/ # installing-with-the-official-installer ) .", ", install memgpt source : `` ` sh git clone git @ github.com : cpacker/memgpt.git poetry shell poetry install `` ` < /details >" ] ], "token": [ [ "installing", "source", "install", "memgpt", "source", ",", "start", "cloning", "repo", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "``", "`", "navigate", "main", "`", "memgpt", "`", "directory", ",", ":", "``", "`", "sh", "pip", "install", "-e", ".", "``", "`", ",", "able", "run", "`", "memgpt", "`", "command-line", "using", "downloaded", "source", "code", ".", "dependency", "issue", "using", "`", "pip", "install", "-e", ".", "`", ",", "recommend", "install", "package", "using", "poetry", "(", "see", ")", ".", "installing", "memgpt", "source", "using", "poetry", "ensure", "using", "exact", "package", "version", "tested", "production", "build", ".", "<", "detail", ">", "<", "summary", ">", "<", "strong", ">", "installing", "source", "(", "using", "poetry", ")", "<", "/strong", ">", "<", "/summary", ">", "first", ",", "install", "poetry", "using", "[", "official", "instruction", "]", "(", "http", ":", "//python-poetry.org/docs/", "#", "installing-with-the-official-installer", ")", ".", ",", "install", "memgpt", "source", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "poetry", "shell", "poetry", "install", "``", "`", "<", "/details", ">" ], [ "installing source install memgpt source , start cloning repo : `` ` sh git clone git @ github.com : cpacker/memgpt.git `` ` navigate main ` memgpt ` directory , : `` ` sh pip install -e .", "`` ` , able run ` memgpt ` command-line using downloaded source code .", "dependency issue using ` pip install -e . ` , recommend install package using poetry ( see ) .", "installing memgpt source using poetry ensure using exact package version tested production build .", "< detail > < summary > < strong > installing source ( using poetry ) < /strong > < /summary > first , install poetry using [ official instruction ] ( http : //python-poetry.org/docs/ # installing-with-the-official-installer ) .", ", install memgpt source : `` ` sh git clone git @ github.com : cpacker/memgpt.git poetry shell poetry install `` ` < /details >" ] ], "level of complexity": 0 }, { "url": "https://github.com/embedchain/embedchain", "readme_url": "https://raw.githubusercontent.com/embedchain/embedchain/main/README.md", "topic": [ "ai", "application", "chatbots", "chatgpt", "embeddings", "llm", "python", "rag", "vector-database" ], "text": "\ud83d\udd27 Quick install\n\n", "sentence": [ [ "\ud83d\udd27", "quick", "install" ], [ "\ud83d\udd27 quick install" ] ], "token": [ [ "\ud83d\udd27", "quick", "install" ], [ "\ud83d\udd27 quick install" ] ], "level of complexity": -1 }, { "url": "https://github.com/embedchain/embedchain", "readme_url": "https://raw.githubusercontent.com/embedchain/embedchain/main/README.md", "topic": [ "ai", "application", "chatbots", "chatgpt", "embeddings", "llm", "python", "rag", "vector-database" ], "text": "Python API\n\n```bash\npip install embedchain\n```\n\n", "sentence": [ [ "python", "api", "``", "`", "bash", "pip", "install", "embedchain", "``", "`" ], [ "python api `` ` bash pip install embedchain `` `" ] ], "token": [ [ "python", "api", "``", "`", "bash", "pip", "install", "embedchain", "``", "`" ], [ "python api `` ` bash pip install embedchain `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/embedchain/embedchain", "readme_url": "https://raw.githubusercontent.com/embedchain/embedchain/main/README.md", "topic": [ "ai", "application", "chatbots", "chatgpt", "embeddings", "llm", "python", "rag", "vector-database" ], "text": "\ud83d\udcd6 Documentation\nComprehensive guides and API documentation are available to help you get the most out of Embedchain:\n\n- [Introduction](https://docs.embedchain.ai/get-started/introduction#what-is-embedchain)\n- [Getting Started](https://docs.embedchain.ai/get-started/quickstart)\n- [Examples](https://docs.embedchain.ai/examples)\n- [Supported data types](https://docs.embedchain.ai/components/data-sources/overview)\n\n", "sentence": [ [ "\ud83d\udcd6", "documentation", "comprehensive", "guide", "api", "documentation", "available", "help", "get", "embedchain", ":", "-", "[", "introduction", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/introduction", "#", "what-is-embedchain", ")", "-", "[", "getting", "started", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/quickstart", ")", "-", "[", "example", "]", "(", "http", ":", "//docs.embedchain.ai/examples", ")", "-", "[", "supported", "data", "type", "]", "(", "http", ":", "//docs.embedchain.ai/components/data-sources/overview", ")" ], [ "\ud83d\udcd6 documentation comprehensive guide api documentation available help get embedchain : - [ introduction ] ( http : //docs.embedchain.ai/get-started/introduction # what-is-embedchain ) - [ getting started ] ( http : //docs.embedchain.ai/get-started/quickstart ) - [ example ] ( http : //docs.embedchain.ai/examples ) - [ supported data type ] ( http : //docs.embedchain.ai/components/data-sources/overview )" ] ], "token": [ [ "\ud83d\udcd6", "documentation", "comprehensive", "guide", "api", "documentation", "available", "help", "get", "embedchain", ":", "-", "[", "introduction", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/introduction", "#", "what-is-embedchain", ")", "-", "[", "getting", "started", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/quickstart", ")", "-", "[", "example", "]", "(", "http", ":", "//docs.embedchain.ai/examples", ")", "-", "[", "supported", "data", "type", "]", "(", "http", ":", "//docs.embedchain.ai/components/data-sources/overview", ")" ], [ "\ud83d\udcd6 documentation comprehensive guide api documentation available help get embedchain : - [ introduction ] ( http : //docs.embedchain.ai/get-started/introduction # what-is-embedchain ) - [ getting started ] ( http : //docs.embedchain.ai/get-started/quickstart ) - [ example ] ( http : //docs.embedchain.ai/examples ) - [ supported data type ] ( http : //docs.embedchain.ai/components/data-sources/overview )" ] ], "level of complexity": -1 }, { "url": "https://github.com/activeloopai/deeplake", "readme_url": "https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md", "topic": [ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ], "text": "\ud83d\ude80 How to install Deep Lake\nDeep Lake can be installed using pip:\n```sh\npip3 install deeplake\n```\n**By default, Deep Lake does not install dependencies for audio, video, google-cloud, and other features. Details on all installation options are [available here](https://docs.deeplake.ai/en/latest/Installation.html).**\n\n", "sentence": [ [ "\ud83d\ude80", "install", "deep", "lake", "deep", "lake", "installed", "using", "pip", ":", "``", "`", "sh", "pip3", "install", "deeplake", "``", "`", "*", "*", "default", ",", "deep", "lake", "install", "dependency", "audio", ",", "video", ",", "google-cloud", ",", "feature", ".", "detail", "installation", "option", "[", "available", "]", "(", "http", ":", "//docs.deeplake.ai/en/latest/installation.html", ")", ".", "*", "*" ], [ "\ud83d\ude80 install deep lake deep lake installed using pip : `` ` sh pip3 install deeplake `` ` * * default , deep lake install dependency audio , video , google-cloud , feature .", "detail installation option [ available ] ( http : //docs.deeplake.ai/en/latest/installation.html ) .", "* *" ] ], "token": [ [ "\ud83d\ude80", "install", "deep", "lake", "deep", "lake", "installed", "using", "pip", ":", "``", "`", "sh", "pip3", "install", "deeplake", "``", "`", "*", "*", "default", ",", "deep", "lake", "install", "dependency", "audio", ",", "video", ",", "google-cloud", ",", "feature", ".", "detail", "installation", "option", "[", "available", "]", "(", "http", ":", "//docs.deeplake.ai/en/latest/installation.html", ")", ".", "*", "*" ], [ "\ud83d\ude80 install deep lake deep lake installed using pip : `` ` sh pip3 install deeplake `` ` * * default , deep lake install dependency audio , video , google-cloud , feature .", "detail installation option [ available ] ( http : //docs.deeplake.ai/en/latest/installation.html ) .", "* *" ] ], "level of complexity": -1 }, { "url": "https://github.com/activeloopai/deeplake", "readme_url": "https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md", "topic": [ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ], "text": "- [Vector Store Getting Started Guide](https://docs.activeloop.ai/getting-started/vector-store)\n", "sentence": [ [ "-", "[", "vector", "store", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/vector-store", ")" ], [ "- [ vector store getting started guide ] ( http : //docs.activeloop.ai/getting-started/vector-store )" ] ], "token": [ [ "-", "[", "vector", "store", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/vector-store", ")" ], [ "- [ vector store getting started guide ] ( http : //docs.activeloop.ai/getting-started/vector-store )" ] ], "level of complexity": -1 }, { "url": "https://github.com/activeloopai/deeplake", "readme_url": "https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md", "topic": [ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ], "text": "- [Deep Learning Getting Started Guide](https://docs.activeloop.ai/getting-started/deep-learning)\n", "sentence": [ [ "-", "[", "deep", "learning", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/deep-learning", ")" ], [ "- [ deep learning getting started guide ] ( http : //docs.activeloop.ai/getting-started/deep-learning )" ] ], "token": [ [ "-", "[", "deep", "learning", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/deep-learning", ")" ], [ "- [ deep learning getting started guide ] ( http : //docs.activeloop.ai/getting-started/deep-learning )" ] ], "level of complexity": -1 }, { "url": "https://github.com/activeloopai/deeplake", "readme_url": "https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md", "topic": [ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ], "text": "\ud83d\udcda Documentation\n\nGetting started guides, examples, tutorials, API reference, and other useful information can be found on our [documentation page](http://docs.activeloop.ai/?utm_source=github&utm_medium=repo&utm_campaign=readme).\n\n", "sentence": [ [ "\ud83d\udcda", "documentation", "getting", "started", "guide", ",", "example", ",", "tutorial", ",", "api", "reference", ",", "useful", "information", "found", "[", "documentation", "page", "]", "(", "http", ":", "//docs.activeloop.ai/", "?", "utm_source=github", "&", "utm_medium=repo", "&", "utm_campaign=readme", ")", "." ], [ "\ud83d\udcda documentation getting started guide , example , tutorial , api reference , useful information found [ documentation page ] ( http : //docs.activeloop.ai/ ? utm_source=github & utm_medium=repo & utm_campaign=readme ) ." ] ], "token": [ [ "\ud83d\udcda", "documentation", "getting", "started", "guide", ",", "example", ",", "tutorial", ",", "api", "reference", ",", "useful", "information", "found", "[", "documentation", "page", "]", "(", "http", ":", "//docs.activeloop.ai/", "?", "utm_source=github", "&", "utm_medium=repo", "&", "utm_campaign=readme", ")", "." ], [ "\ud83d\udcda documentation getting started guide , example , tutorial , api reference , useful information found [ documentation page ] ( http : //docs.activeloop.ai/ ? utm_source=github & utm_medium=repo & utm_campaign=readme ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/activeloopai/deeplake", "readme_url": "https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md", "topic": [ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ], "text": "Disclaimers\n\n
\n Dataset Licenses\n \nDeep Lake users may have access to a variety of publicly available datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have a license to use the datasets. It is your responsibility to determine whether you have permission to use the datasets under their license.\n\nIf you're a dataset owner and do not want your dataset to be included in this library, please get in touch through a [GitHub issue](https://github.com/activeloopai/deeplake/issues/new). Thank you for your contribution to the ML community!\n\n
\n\n
\n Usage Tracking\n\nBy default, we collect usage data using Bugout (here's the [code](https://github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py#L24) that does it). It does not collect user data other than anonymized IP address data, and it only logs the Deep Lake library's own actions. This helps our team understand how the tool is used and how to build features that matter to you! After you register with Activeloop, data is no longer anonymous. You can always opt-out of reporting by setting an environmental variable ```BUGGER_OFF``` to ```True```:\n\n
\n\n", "sentence": [ [ "disclaimer", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "dataset", "license", "<", "/b", ">", "<", "/summary", ">", "deep", "lake", "user", "may", "access", "variety", "publicly", "available", "datasets", ".", "host", "distribute", "datasets", ",", "vouch", "quality", "fairness", ",", "claim", "license", "use", "datasets", ".", "responsibility", "determine", "whether", "permission", "use", "datasets", "license", ".", "'re", "dataset", "owner", "want", "dataset", "included", "library", ",", "please", "get", "touch", "[", "github", "issue", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/issues/new", ")", ".", "thank", "contribution", "ml", "community", "!", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "usage", "tracking", "<", "/b", ">", "<", "/summary", ">", "default", ",", "collect", "usage", "data", "using", "bugout", "(", "'s", "[", "code", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py", "#", "l24", ")", ")", ".", "collect", "user", "data", "anonymized", "ip", "address", "data", ",", "log", "deep", "lake", "library", "'s", "action", ".", "help", "team", "understand", "tool", "used", "build", "feature", "matter", "!", "register", "activeloop", ",", "data", "longer", "anonymous", ".", "always", "opt-out", "reporting", "setting", "environmental", "variable", "``", "`", "bugger_off", "``", "`", "``", "`", "true", "``", "`", ":", "<", "/details", ">" ], [ "disclaimer < detail > < summary > < b > dataset license < /b > < /summary > deep lake user may access variety publicly available datasets .", "host distribute datasets , vouch quality fairness , claim license use datasets .", "responsibility determine whether permission use datasets license .", "'re dataset owner want dataset included library , please get touch [ github issue ] ( http : //github.com/activeloopai/deeplake/issues/new ) .", "thank contribution ml community !", "< /details > < detail > < summary > < b > usage tracking < /b > < /summary > default , collect usage data using bugout ( 's [ code ] ( http : //github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py # l24 ) ) .", "collect user data anonymized ip address data , log deep lake library 's action .", "help team understand tool used build feature matter !", "register activeloop , data longer anonymous .", "always opt-out reporting setting environmental variable `` ` bugger_off `` ` `` ` true `` ` : < /details >" ] ], "token": [ [ "disclaimer", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "dataset", "license", "<", "/b", ">", "<", "/summary", ">", "deep", "lake", "user", "may", "access", "variety", "publicly", "available", "datasets", ".", "host", "distribute", "datasets", ",", "vouch", "quality", "fairness", ",", "claim", "license", "use", "datasets", ".", "responsibility", "determine", "whether", "permission", "use", "datasets", "license", ".", "'re", "dataset", "owner", "want", "dataset", "included", "library", ",", "please", "get", "touch", "[", "github", "issue", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/issues/new", ")", ".", "thank", "contribution", "ml", "community", "!", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "usage", "tracking", "<", "/b", ">", "<", "/summary", ">", "default", ",", "collect", "usage", "data", "using", "bugout", "(", "'s", "[", "code", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py", "#", "l24", ")", ")", ".", "collect", "user", "data", "anonymized", "ip", "address", "data", ",", "log", "deep", "lake", "library", "'s", "action", ".", "help", "team", "understand", "tool", "used", "build", "feature", "matter", "!", "register", "activeloop", ",", "data", "longer", "anonymous", ".", "always", "opt-out", "reporting", "setting", "environmental", "variable", "``", "`", "bugger_off", "``", "`", "``", "`", "true", "``", "`", ":", "<", "/details", ">" ], [ "disclaimer < detail > < summary > < b > dataset license < /b > < /summary > deep lake user may access variety publicly available datasets .", "host distribute datasets , vouch quality fairness , claim license use datasets .", "responsibility determine whether permission use datasets license .", "'re dataset owner want dataset included library , please get touch [ github issue ] ( http : //github.com/activeloopai/deeplake/issues/new ) .", "thank contribution ml community !", "< /details > < detail > < summary > < b > usage tracking < /b > < /summary > default , collect usage data using bugout ( 's [ code ] ( http : //github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py # l24 ) ) .", "collect user data anonymized ip address data , log deep lake library 's action .", "help team understand tool used build feature matter !", "register activeloop , data longer anonymous .", "always opt-out reporting setting environmental variable `` ` bugger_off `` ` `` ` true `` ` : < /details >" ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/TypeChat", "readme_url": "https://raw.githubusercontent.com/microsoft/TypeChat/main/README.md", "topic": [ "ai", "llm", "natural-language", "types" ], "text": "Getting Started\n\nInstall TypeChat:\n\n```\nnpm install typechat\n```\n\nYou can also build TypeChat from source:\n\n```\ngit clone https://github.com/microsoft/TypeChat\ncd TypeChat\nnpm run build\n```\n\nTo see TypeChat in action, we recommend exploring the [TypeChat example projects](./examples). You can try them on your local machine or in a GitHub Codespace.\n\nTo learn more about TypeChat, visit the [documentation](https://microsoft.github.io/TypeChat) which includes more information on TypeChat and how to get started.\n\n", "sentence": [ [ "getting", "started", "install", "typechat", ":", "``", "`", "npm", "install", "typechat", "``", "`", "also", "build", "typechat", "source", ":", "``", "`", "git", "clone", "http", ":", "//github.com/microsoft/typechat", "cd", "typechat", "npm", "run", "build", "``", "`", "see", "typechat", "action", ",", "recommend", "exploring", "[", "typechat", "example", "project", "]", "(", "./examples", ")", ".", "try", "local", "machine", "github", "codespace", ".", "learn", "typechat", ",", "visit", "[", "documentation", "]", "(", "http", ":", "//microsoft.github.io/typechat", ")", "includes", "information", "typechat", "get", "started", "." ], [ "getting started install typechat : `` ` npm install typechat `` ` also build typechat source : `` ` git clone http : //github.com/microsoft/typechat cd typechat npm run build `` ` see typechat action , recommend exploring [ typechat example project ] ( ./examples ) .", "try local machine github codespace .", "learn typechat , visit [ documentation ] ( http : //microsoft.github.io/typechat ) includes information typechat get started ." ] ], "token": [ [ "getting", "started", "install", "typechat", ":", "``", "`", "npm", "install", "typechat", "``", "`", "also", "build", "typechat", "source", ":", "``", "`", "git", "clone", "http", ":", "//github.com/microsoft/typechat", "cd", "typechat", "npm", "run", "build", "``", "`", "see", "typechat", "action", ",", "recommend", "exploring", "[", "typechat", "example", "project", "]", "(", "./examples", ")", ".", "try", "local", "machine", "github", "codespace", ".", "learn", "typechat", ",", "visit", "[", "documentation", "]", "(", "http", ":", "//microsoft.github.io/typechat", ")", "includes", "information", "typechat", "get", "started", "." ], [ "getting started install typechat : `` ` npm install typechat `` ` also build typechat source : `` ` git clone http : //github.com/microsoft/typechat cd typechat npm run build `` ` see typechat action , recommend exploring [ typechat example project ] ( ./examples ) .", "try local machine github codespace .", "learn typechat , visit [ documentation ] ( http : //microsoft.github.io/typechat ) includes information typechat get started ." ] ], "level of complexity": 2 }, { "url": "https://github.com/mistralai/mistral-src", "readme_url": "https://raw.githubusercontent.com/mistralai/mistral-src/main/README.md", "topic": [ "llm", "llm-inference", "mistralai" ], "text": "Installation\n\n```\npip install -r requirements.txt\n```\n\n", "sentence": [ [ "installation", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`" ], [ "installation `` ` pip install -r requirements.txt `` `" ] ], "token": [ [ "installation", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`" ], [ "installation `` ` pip install -r requirements.txt `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Llama 2 Fine-tuning / Inference Recipes, Examples, Benchmarks and Demo Apps\n\n**[Update Feb. 5, 2024] We added support for Code Llama 70B instruct in our example [inference script](./examples/code_llama/code_instruct_example.py). For details on formatting the prompt for Code Llama 70B instruct model please refer to [this document](./docs/inference.md)**.\n\n**[Update Dec. 28, 2023] We added support for Llama Guard as a safety checker for our example inference script and also with standalone inference with an example script and prompt formatting. More details [here](./examples/llama_guard/README.md). For details on formatting data for fine tuning Llama Guard, we provide a script and sample usage [here](./src/llama_recipes/data/llama_guard/README.md).**\n\n**[Update Dec 14, 2023] We recently released a series of Llama 2 demo apps [here](./demo_apps). These apps show how to run Llama (locally, in the cloud, or on-prem), how to use Azure Llama 2 API (Model-as-a-Service), how to ask Llama questions in general or about custom data (PDF, DB, or live), how to integrate Llama with WhatsApp and Messenger, and how to implement an end-to-end chatbot with RAG (Retrieval Augmented Generation).**\n\nThe 'llama-recipes' repository is a companion to the [Llama 2 model](https://github.com/facebookresearch/llama). The goal of this repository is to provide examples to quickly get started with fine-tuning for domain adaptation and how to run inference for the fine-tuned models. For ease of use, the examples use Hugging Face converted versions of the models. See steps for conversion of the model [here](#model-conversion-to-hugging-face).\n\nIn addition, we also provide a number of demo apps, to showcase the Llama 2 usage along with other ecosystem solutions to run Llama 2 locally, in the cloud, and on-prem.\n\nLlama 2 is a new technology that carries potential risks with use. Testing conducted to date has not \u2014 and could not \u2014 cover all scenarios. In order to help developers address these risks, we have created the [Responsible Use Guide](https://github.com/facebookresearch/llama/blob/main/Responsible-Use-Guide.pdf). More details can be found in our research paper as well. For downloading the models, follow the instructions on [Llama 2 repo](https://github.com/facebookresearch/llama).\n\n\n", "sentence": [ [ "llama", "2", "fine-tuning", "/", "inference", "recipe", ",", "example", ",", "benchmark", "demo", "apps", "*", "*", "[", "update", "feb.", "5", ",", "2024", "]", "added", "support", "code", "llama", "70b", "instruct", "example", "[", "inference", "script", "]", "(", "./examples/code_llama/code_instruct_example.py", ")", ".", "detail", "formatting", "prompt", "code", "llama", "70b", "instruct", "model", "please", "refer", "[", "document", "]", "(", "./docs/inference.md", ")", "*", "*", ".", "*", "*", "[", "update", "dec.", "28", ",", "2023", "]", "added", "support", "llama", "guard", "safety", "checker", "example", "inference", "script", "also", "standalone", "inference", "example", "script", "prompt", "formatting", ".", "detail", "[", "]", "(", "./examples/llama_guard/readme.md", ")", ".", "detail", "formatting", "data", "fine", "tuning", "llama", "guard", ",", "provide", "script", "sample", "usage", "[", "]", "(", "./src/llama_recipes/data/llama_guard/readme.md", ")", ".", "*", "*", "*", "*", "[", "update", "dec", "14", ",", "2023", "]", "recently", "released", "series", "llama", "2", "demo", "apps", "[", "]", "(", "./demo_apps", ")", ".", "apps", "show", "run", "llama", "(", "locally", ",", "cloud", ",", "on-prem", ")", ",", "use", "azure", "llama", "2", "api", "(", "model-as-a-service", ")", ",", "ask", "llama", "question", "general", "custom", "data", "(", "pdf", ",", "db", ",", "live", ")", ",", "integrate", "llama", "whatsapp", "messenger", ",", "implement", "end-to-end", "chatbot", "rag", "(", "retrieval", "augmented", "generation", ")", ".", "*", "*", "'llama-recipes", "'", "repository", "companion", "[", "llama", "2", "model", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", ".", "goal", "repository", "provide", "example", "quickly", "get", "started", "fine-tuning", "domain", "adaptation", "run", "inference", "fine-tuned", "model", ".", "ease", "use", ",", "example", "use", "hugging", "face", "converted", "version", "model", ".", "see", "step", "conversion", "model", "[", "]", "(", "#", "model-conversion-to-hugging-face", ")", ".", "addition", ",", "also", "provide", "number", "demo", "apps", ",", "showcase", "llama", "2", "usage", "along", "ecosystem", "solution", "run", "llama", "2", "locally", ",", "cloud", ",", "on-prem", ".", "llama", "2", "new", "technology", "carry", "potential", "risk", "use", ".", "testing", "conducted", "date", "\u2014", "could", "\u2014", "cover", "scenario", ".", "order", "help", "developer", "address", "risk", ",", "created", "[", "responsible", "use", "guide", "]", "(", "http", ":", "//github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf", ")", ".", "detail", "found", "research", "paper", "well", ".", "downloading", "model", ",", "follow", "instruction", "[", "llama", "2", "repo", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", "." ], [ "llama 2 fine-tuning / inference recipe , example , benchmark demo apps * * [ update feb. 5 , 2024 ] added support code llama 70b instruct example [ inference script ] ( ./examples/code_llama/code_instruct_example.py ) .", "detail formatting prompt code llama 70b instruct model please refer [ document ] ( ./docs/inference.md ) * * .", "* * [ update dec. 28 , 2023 ] added support llama guard safety checker example inference script also standalone inference example script prompt formatting .", "detail [ ] ( ./examples/llama_guard/readme.md ) .", "detail formatting data fine tuning llama guard , provide script sample usage [ ] ( ./src/llama_recipes/data/llama_guard/readme.md ) .", "* * * * [ update dec 14 , 2023 ] recently released series llama 2 demo apps [ ] ( ./demo_apps ) .", "apps show run llama ( locally , cloud , on-prem ) , use azure llama 2 api ( model-as-a-service ) , ask llama question general custom data ( pdf , db , live ) , integrate llama whatsapp messenger , implement end-to-end chatbot rag ( retrieval augmented generation ) .", "* * 'llama-recipes ' repository companion [ llama 2 model ] ( http : //github.com/facebookresearch/llama ) .", "goal repository provide example quickly get started fine-tuning domain adaptation run inference fine-tuned model .", "ease use , example use hugging face converted version model .", "see step conversion model [ ] ( # model-conversion-to-hugging-face ) .", "addition , also provide number demo apps , showcase llama 2 usage along ecosystem solution run llama 2 locally , cloud , on-prem .", "llama 2 new technology carry potential risk use .", "testing conducted date \u2014 could \u2014 cover scenario .", "order help developer address risk , created [ responsible use guide ] ( http : //github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf ) .", "detail found research paper well .", "downloading model , follow instruction [ llama 2 repo ] ( http : //github.com/facebookresearch/llama ) ." ] ], "token": [ [ "llama", "2", "fine-tuning", "/", "inference", "recipe", ",", "example", ",", "benchmark", "demo", "apps", "*", "*", "[", "update", "feb.", "5", ",", "2024", "]", "added", "support", "code", "llama", "70b", "instruct", "example", "[", "inference", "script", "]", "(", "./examples/code_llama/code_instruct_example.py", ")", ".", "detail", "formatting", "prompt", "code", "llama", "70b", "instruct", "model", "please", "refer", "[", "document", "]", "(", "./docs/inference.md", ")", "*", "*", ".", "*", "*", "[", "update", "dec.", "28", ",", "2023", "]", "added", "support", "llama", "guard", "safety", "checker", "example", "inference", "script", "also", "standalone", "inference", "example", "script", "prompt", "formatting", ".", "detail", "[", "]", "(", "./examples/llama_guard/readme.md", ")", ".", "detail", "formatting", "data", "fine", "tuning", "llama", "guard", ",", "provide", "script", "sample", "usage", "[", "]", "(", "./src/llama_recipes/data/llama_guard/readme.md", ")", ".", "*", "*", "*", "*", "[", "update", "dec", "14", ",", "2023", "]", "recently", "released", "series", "llama", "2", "demo", "apps", "[", "]", "(", "./demo_apps", ")", ".", "apps", "show", "run", "llama", "(", "locally", ",", "cloud", ",", "on-prem", ")", ",", "use", "azure", "llama", "2", "api", "(", "model-as-a-service", ")", ",", "ask", "llama", "question", "general", "custom", "data", "(", "pdf", ",", "db", ",", "live", ")", ",", "integrate", "llama", "whatsapp", "messenger", ",", "implement", "end-to-end", "chatbot", "rag", "(", "retrieval", "augmented", "generation", ")", ".", "*", "*", "'llama-recipes", "'", "repository", "companion", "[", "llama", "2", "model", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", ".", "goal", "repository", "provide", "example", "quickly", "get", "started", "fine-tuning", "domain", "adaptation", "run", "inference", "fine-tuned", "model", ".", "ease", "use", ",", "example", "use", "hugging", "face", "converted", "version", "model", ".", "see", "step", "conversion", "model", "[", "]", "(", "#", "model-conversion-to-hugging-face", ")", ".", "addition", ",", "also", "provide", "number", "demo", "apps", ",", "showcase", "llama", "2", "usage", "along", "ecosystem", "solution", "run", "llama", "2", "locally", ",", "cloud", ",", "on-prem", ".", "llama", "2", "new", "technology", "carry", "potential", "risk", "use", ".", "testing", "conducted", "date", "\u2014", "could", "\u2014", "cover", "scenario", ".", "order", "help", "developer", "address", "risk", ",", "created", "[", "responsible", "use", "guide", "]", "(", "http", ":", "//github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf", ")", ".", "detail", "found", "research", "paper", "well", ".", "downloading", "model", ",", "follow", "instruction", "[", "llama", "2", "repo", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", "." ], [ "llama 2 fine-tuning / inference recipe , example , benchmark demo apps * * [ update feb. 5 , 2024 ] added support code llama 70b instruct example [ inference script ] ( ./examples/code_llama/code_instruct_example.py ) .", "detail formatting prompt code llama 70b instruct model please refer [ document ] ( ./docs/inference.md ) * * .", "* * [ update dec. 28 , 2023 ] added support llama guard safety checker example inference script also standalone inference example script prompt formatting .", "detail [ ] ( ./examples/llama_guard/readme.md ) .", "detail formatting data fine tuning llama guard , provide script sample usage [ ] ( ./src/llama_recipes/data/llama_guard/readme.md ) .", "* * * * [ update dec 14 , 2023 ] recently released series llama 2 demo apps [ ] ( ./demo_apps ) .", "apps show run llama ( locally , cloud , on-prem ) , use azure llama 2 api ( model-as-a-service ) , ask llama question general custom data ( pdf , db , live ) , integrate llama whatsapp messenger , implement end-to-end chatbot rag ( retrieval augmented generation ) .", "* * 'llama-recipes ' repository companion [ llama 2 model ] ( http : //github.com/facebookresearch/llama ) .", "goal repository provide example quickly get started fine-tuning domain adaptation run inference fine-tuned model .", "ease use , example use hugging face converted version model .", "see step conversion model [ ] ( # model-conversion-to-hugging-face ) .", "addition , also provide number demo apps , showcase llama 2 usage along ecosystem solution run llama 2 locally , cloud , on-prem .", "llama 2 new technology carry potential risk use .", "testing conducted date \u2014 could \u2014 cover scenario .", "order help developer address risk , created [ responsible use guide ] ( http : //github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf ) .", "detail found research paper well .", "downloading model , follow instruction [ llama 2 repo ] ( http : //github.com/facebookresearch/llama ) ." ] ], "level of complexity": -1 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Table of Contents\n1. [Quick start](#quick-start)\n2. [Model Conversion](#model-conversion-to-hugging-face)\n3. [Fine-tuning](#fine-tuning)\n - [Single GPU](#single-gpu)\n - [Multi GPU One Node](#multiple-gpus-one-node)\n - [Multi GPU Multi Node](#multi-gpu-multi-node)\n4. [Inference](./docs/inference.md)\n5. [Demo Apps](#demo-apps)\n6. [Repository Organization](#repository-organization)\n7. [License and Acceptable Use Policy](#license)\n\n", "sentence": [ [ "table", "content", "1", ".", "[", "quick", "start", "]", "(", "#", "quick-start", ")", "2", ".", "[", "model", "conversion", "]", "(", "#", "model-conversion-to-hugging-face", ")", "3", ".", "[", "fine-tuning", "]", "(", "#", "fine-tuning", ")", "-", "[", "single", "gpu", "]", "(", "#", "single-gpu", ")", "-", "[", "multi", "gpu", "one", "node", "]", "(", "#", "multiple-gpus-one-node", ")", "-", "[", "multi", "gpu", "multi", "node", "]", "(", "#", "multi-gpu-multi-node", ")", "4", ".", "[", "inference", "]", "(", "./docs/inference.md", ")", "5", ".", "[", "demo", "apps", "]", "(", "#", "demo-apps", ")", "6", ".", "[", "repository", "organization", "]", "(", "#", "repository-organization", ")", "7", ".", "[", "license", "acceptable", "use", "policy", "]", "(", "#", "license", ")" ], [ "table content 1 .", "[ quick start ] ( # quick-start ) 2 .", "[ model conversion ] ( # model-conversion-to-hugging-face ) 3 .", "[ fine-tuning ] ( # fine-tuning ) - [ single gpu ] ( # single-gpu ) - [ multi gpu one node ] ( # multiple-gpus-one-node ) - [ multi gpu multi node ] ( # multi-gpu-multi-node ) 4 .", "[ inference ] ( ./docs/inference.md ) 5 .", "[ demo apps ] ( # demo-apps ) 6 .", "[ repository organization ] ( # repository-organization ) 7 .", "[ license acceptable use policy ] ( # license )" ] ], "token": [ [ "table", "content", "1", ".", "[", "quick", "start", "]", "(", "#", "quick-start", ")", "2", ".", "[", "model", "conversion", "]", "(", "#", "model-conversion-to-hugging-face", ")", "3", ".", "[", "fine-tuning", "]", "(", "#", "fine-tuning", ")", "-", "[", "single", "gpu", "]", "(", "#", "single-gpu", ")", "-", "[", "multi", "gpu", "one", "node", "]", "(", "#", "multiple-gpus-one-node", ")", "-", "[", "multi", "gpu", "multi", "node", "]", "(", "#", "multi-gpu-multi-node", ")", "4", ".", "[", "inference", "]", "(", "./docs/inference.md", ")", "5", ".", "[", "demo", "apps", "]", "(", "#", "demo-apps", ")", "6", ".", "[", "repository", "organization", "]", "(", "#", "repository-organization", ")", "7", ".", "[", "license", "acceptable", "use", "policy", "]", "(", "#", "license", ")" ], [ "table content 1 .", "[ quick start ] ( # quick-start ) 2 .", "[ model conversion ] ( # model-conversion-to-hugging-face ) 3 .", "[ fine-tuning ] ( # fine-tuning ) - [ single gpu ] ( # single-gpu ) - [ multi gpu one node ] ( # multiple-gpus-one-node ) - [ multi gpu multi node ] ( # multi-gpu-multi-node ) 4 .", "[ inference ] ( ./docs/inference.md ) 5 .", "[ demo apps ] ( # demo-apps ) 6 .", "[ repository organization ] ( # repository-organization ) 7 .", "[ license acceptable use policy ] ( # license )" ] ], "level of complexity": -1 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Quick Start\n\n[Llama 2 Jupyter Notebook](./examples/quickstart.ipynb): This jupyter notebook steps you through how to finetune a Llama 2 model on the text summarization task using the [samsum](https://huggingface.co/datasets/samsum). The notebook uses parameter efficient finetuning (PEFT) and int8 quantization to finetune a 7B on a single GPU like an A10 with 24GB gpu memory.\n\n", "sentence": [ [ "quick", "start", "[", "llama", "2", "jupyter", "notebook", "]", "(", "./examples/quickstart.ipynb", ")", ":", "jupyter", "notebook", "step", "finetune", "llama", "2", "model", "text", "summarization", "task", "using", "[", "samsum", "]", "(", "http", ":", "//huggingface.co/datasets/samsum", ")", ".", "notebook", "us", "parameter", "efficient", "finetuning", "(", "peft", ")", "int8", "quantization", "finetune", "7b", "single", "gpu", "like", "a10", "24gb", "gpu", "memory", "." ], [ "quick start [ llama 2 jupyter notebook ] ( ./examples/quickstart.ipynb ) : jupyter notebook step finetune llama 2 model text summarization task using [ samsum ] ( http : //huggingface.co/datasets/samsum ) .", "notebook us parameter efficient finetuning ( peft ) int8 quantization finetune 7b single gpu like a10 24gb gpu memory ." ] ], "token": [ [ "quick", "start", "[", "llama", "2", "jupyter", "notebook", "]", "(", "./examples/quickstart.ipynb", ")", ":", "jupyter", "notebook", "step", "finetune", "llama", "2", "model", "text", "summarization", "task", "using", "[", "samsum", "]", "(", "http", ":", "//huggingface.co/datasets/samsum", ")", ".", "notebook", "us", "parameter", "efficient", "finetuning", "(", "peft", ")", "int8", "quantization", "finetune", "7b", "single", "gpu", "like", "a10", "24gb", "gpu", "memory", "." ], [ "quick start [ llama 2 jupyter notebook ] ( ./examples/quickstart.ipynb ) : jupyter notebook step finetune llama 2 model text summarization task using [ samsum ] ( http : //huggingface.co/datasets/samsum ) .", "notebook us parameter efficient finetuning ( peft ) int8 quantization finetune 7b single gpu like a10 24gb gpu memory ." ] ], "level of complexity": -1 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Installation\nLlama-recipes provides a pip distribution for easy install and usage in other projects. Alternatively, it can be installed from source.\n\n", "sentence": [ [ "installation", "llama-recipes", "provides", "pip", "distribution", "easy", "install", "usage", "project", ".", "alternatively", ",", "installed", "source", "." ], [ "installation llama-recipes provides pip distribution easy install usage project .", "alternatively , installed source ." ] ], "token": [ [ "installation", "llama-recipes", "provides", "pip", "distribution", "easy", "install", "usage", "project", ".", "alternatively", ",", "installed", "source", "." ], [ "installation llama-recipes provides pip distribution easy install usage project .", "alternatively , installed source ." ] ], "level of complexity": 2 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Install with pip\n```\npip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes\n```\n\n", "sentence": [ [ "install", "pip", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "``", "`" ], [ "install pip `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes `` `" ] ], "token": [ [ "install", "pip", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "``", "`" ], [ "install pip `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Install with optional dependencies\nLlama-recipes offers the installation of optional packages. There are three optional dependency groups.\nTo run the unit tests we can install the required dependencies with:\n```\npip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes[tests]\n```\nFor the vLLM example we need additional requirements that can be installed with:\n```\npip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes[vllm]\n```\nTo use the sensitive topics safety checker install with:\n```\npip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes[auditnlg]\n```\nOptional dependencies can also be combines with [option1,option2].\n\n", "sentence": [ [ "install", "optional", "dependency", "llama-recipes", "offer", "installation", "optional", "package", ".", "three", "optional", "dependency", "group", ".", "run", "unit", "test", "install", "required", "dependency", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "test", "]", "``", "`", "vllm", "example", "need", "additional", "requirement", "installed", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "vllm", "]", "``", "`", "use", "sensitive", "topic", "safety", "checker", "install", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "auditnlg", "]", "``", "`", "optional", "dependency", "also", "combine", "[", "option1", ",", "option2", "]", "." ], [ "install optional dependency llama-recipes offer installation optional package .", "three optional dependency group .", "run unit test install required dependency : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ test ] `` ` vllm example need additional requirement installed : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ vllm ] `` ` use sensitive topic safety checker install : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ auditnlg ] `` ` optional dependency also combine [ option1 , option2 ] ." ] ], "token": [ [ "install", "optional", "dependency", "llama-recipes", "offer", "installation", "optional", "package", ".", "three", "optional", "dependency", "group", ".", "run", "unit", "test", "install", "required", "dependency", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "test", "]", "``", "`", "vllm", "example", "need", "additional", "requirement", "installed", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "vllm", "]", "``", "`", "use", "sensitive", "topic", "safety", "checker", "install", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "auditnlg", "]", "``", "`", "optional", "dependency", "also", "combine", "[", "option1", ",", "option2", "]", "." ], [ "install optional dependency llama-recipes offer installation optional package .", "three optional dependency group .", "run unit test install required dependency : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ test ] `` ` vllm example need additional requirement installed : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ vllm ] `` ` use sensitive topic safety checker install : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ auditnlg ] `` ` optional dependency also combine [ option1 , option2 ] ." ] ], "level of complexity": 0 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Install from source\nTo install from source e.g. for development use these commands. We're using hatchling as our build backend which requires an up-to-date pip as well as setuptools package.\n```\ngit clone git@github.com:facebookresearch/llama-recipes.git\ncd llama-recipes\npip install -U pip setuptools\npip install --extra-index-url https://download.pytorch.org/whl/test/cu118 -e .\n```\nFor development and contributing to llama-recipes please install all optional dependencies:\n```\ngit clone git@github.com:facebookresearch/llama-recipes.git\ncd llama-recipes\npip install -U pip setuptools\npip install --extra-index-url https://download.pytorch.org/whl/test/cu118 -e .[tests,auditnlg,vllm]\n```\n\n\u26a0\ufe0f **Note** \u26a0\ufe0f Some features (especially fine-tuning with FSDP + PEFT) currently require PyTorch nightlies to be installed. Please make sure to install the nightlies if you're using these features following [this guide](https://pytorch.org/get-started/locally/).\n\n**Note** All the setting defined in [config files](src/llama_recipes/configs/) can be passed as args through CLI when running the script, there is no need to change from config files directly.\n\n**For more in depth information checkout the following:**\n\n* [Single GPU Fine-tuning](./docs/single_gpu.md)\n* [Multi-GPU Fine-tuning](./docs/multi_gpu.md)\n* [LLM Fine-tuning](./docs/LLM_finetuning.md)\n* [Adding custom datasets](./docs/Dataset.md)\n* [Inference](./docs/inference.md)\n* [Evaluation Harness](./eval/README.md)\n* [FAQs](./docs/FAQ.md)\n\n", "sentence": [ [ "install", "source", "install", "source", "e.g", ".", "development", "use", "command", ".", "'re", "using", "hatchling", "build", "backend", "requires", "up-to-date", "pip", "well", "setuptools", "package", ".", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "``", "`", "development", "contributing", "llama-recipes", "please", "install", "optional", "dependency", ":", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "[", "test", ",", "auditnlg", ",", "vllm", "]", "``", "`", "\u26a0\ufe0f", "*", "*", "note", "*", "*", "\u26a0\ufe0f", "feature", "(", "especially", "fine-tuning", "fsdp", "+", "peft", ")", "currently", "require", "pytorch", "nightlies", "installed", ".", "please", "make", "sure", "install", "nightlies", "'re", "using", "feature", "following", "[", "guide", "]", "(", "http", ":", "//pytorch.org/get-started/locally/", ")", ".", "*", "*", "note", "*", "*", "setting", "defined", "[", "config", "file", "]", "(", "src/llama_recipes/configs/", ")", "passed", "args", "cli", "running", "script", ",", "need", "change", "config", "file", "directly", ".", "*", "*", "depth", "information", "checkout", "following", ":", "*", "*", "*", "[", "single", "gpu", "fine-tuning", "]", "(", "./docs/single_gpu.md", ")", "*", "[", "multi-gpu", "fine-tuning", "]", "(", "./docs/multi_gpu.md", ")", "*", "[", "llm", "fine-tuning", "]", "(", "./docs/llm_finetuning.md", ")", "*", "[", "adding", "custom", "datasets", "]", "(", "./docs/dataset.md", ")", "*", "[", "inference", "]", "(", "./docs/inference.md", ")", "*", "[", "evaluation", "harness", "]", "(", "./eval/readme.md", ")", "*", "[", "faq", "]", "(", "./docs/faq.md", ")" ], [ "install source install source e.g .", "development use command .", "'re using hatchling build backend requires up-to-date pip well setuptools package .", "`` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "`` ` development contributing llama-recipes please install optional dependency : `` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "[ test , auditnlg , vllm ] `` ` \u26a0\ufe0f * * note * * \u26a0\ufe0f feature ( especially fine-tuning fsdp + peft ) currently require pytorch nightlies installed .", "please make sure install nightlies 're using feature following [ guide ] ( http : //pytorch.org/get-started/locally/ ) .", "* * note * * setting defined [ config file ] ( src/llama_recipes/configs/ ) passed args cli running script , need change config file directly .", "* * depth information checkout following : * * * [ single gpu fine-tuning ] ( ./docs/single_gpu.md ) * [ multi-gpu fine-tuning ] ( ./docs/multi_gpu.md ) * [ llm fine-tuning ] ( ./docs/llm_finetuning.md ) * [ adding custom datasets ] ( ./docs/dataset.md ) * [ inference ] ( ./docs/inference.md ) * [ evaluation harness ] ( ./eval/readme.md ) * [ faq ] ( ./docs/faq.md )" ] ], "token": [ [ "install", "source", "install", "source", "e.g", ".", "development", "use", "command", ".", "'re", "using", "hatchling", "build", "backend", "requires", "up-to-date", "pip", "well", "setuptools", "package", ".", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "``", "`", "development", "contributing", "llama-recipes", "please", "install", "optional", "dependency", ":", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "[", "test", ",", "auditnlg", ",", "vllm", "]", "``", "`", "\u26a0\ufe0f", "*", "*", "note", "*", "*", "\u26a0\ufe0f", "feature", "(", "especially", "fine-tuning", "fsdp", "+", "peft", ")", "currently", "require", "pytorch", "nightlies", "installed", ".", "please", "make", "sure", "install", "nightlies", "'re", "using", "feature", "following", "[", "guide", "]", "(", "http", ":", "//pytorch.org/get-started/locally/", ")", ".", "*", "*", "note", "*", "*", "setting", "defined", "[", "config", "file", "]", "(", "src/llama_recipes/configs/", ")", "passed", "args", "cli", "running", "script", ",", "need", "change", "config", "file", "directly", ".", "*", "*", "depth", "information", "checkout", "following", ":", "*", "*", "*", "[", "single", "gpu", "fine-tuning", "]", "(", "./docs/single_gpu.md", ")", "*", "[", "multi-gpu", "fine-tuning", "]", "(", "./docs/multi_gpu.md", ")", "*", "[", "llm", "fine-tuning", "]", "(", "./docs/llm_finetuning.md", ")", "*", "[", "adding", "custom", "datasets", "]", "(", "./docs/dataset.md", ")", "*", "[", "inference", "]", "(", "./docs/inference.md", ")", "*", "[", "evaluation", "harness", "]", "(", "./eval/readme.md", ")", "*", "[", "faq", "]", "(", "./docs/faq.md", ")" ], [ "install source install source e.g .", "development use command .", "'re using hatchling build backend requires up-to-date pip well setuptools package .", "`` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "`` ` development contributing llama-recipes please install optional dependency : `` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "[ test , auditnlg , vllm ] `` ` \u26a0\ufe0f * * note * * \u26a0\ufe0f feature ( especially fine-tuning fsdp + peft ) currently require pytorch nightlies installed .", "please make sure install nightlies 're using feature following [ guide ] ( http : //pytorch.org/get-started/locally/ ) .", "* * note * * setting defined [ config file ] ( src/llama_recipes/configs/ ) passed args cli running script , need change config file directly .", "* * depth information checkout following : * * * [ single gpu fine-tuning ] ( ./docs/single_gpu.md ) * [ multi-gpu fine-tuning ] ( ./docs/multi_gpu.md ) * [ llm fine-tuning ] ( ./docs/llm_finetuning.md ) * [ adding custom datasets ] ( ./docs/dataset.md ) * [ inference ] ( ./docs/inference.md ) * [ evaluation harness ] ( ./eval/readme.md ) * [ faq ] ( ./docs/faq.md )" ] ], "level of complexity": 0 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Model conversion to Hugging Face\nThe recipes and notebooks in this folder are using the Llama 2 model definition provided by Hugging Face's transformers library.\n\nGiven that the original checkpoint resides under models/7B you can install all requirements and convert the checkpoint with:\n\n```bash\n", "sentence": [ [ "model", "conversion", "hugging", "face", "recipe", "notebook", "folder", "using", "llama", "2", "model", "definition", "provided", "hugging", "face", "'s", "transformer", "library", ".", "given", "original", "checkpoint", "resides", "models/7b", "install", "requirement", "convert", "checkpoint", ":", "``", "`", "bash" ], [ "model conversion hugging face recipe notebook folder using llama 2 model definition provided hugging face 's transformer library .", "given original checkpoint resides models/7b install requirement convert checkpoint : `` ` bash" ] ], "token": [ [ "model", "conversion", "hugging", "face", "recipe", "notebook", "folder", "using", "llama", "2", "model", "definition", "provided", "hugging", "face", "'s", "transformer", "library", ".", "given", "original", "checkpoint", "resides", "models/7b", "install", "requirement", "convert", "checkpoint", ":", "``", "`", "bash" ], [ "model conversion hugging face recipe notebook folder using llama 2 model definition provided hugging face 's transformer library .", "given original checkpoint resides models/7b install requirement convert checkpoint : `` ` bash" ] ], "level of complexity": -1 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Install Hugging Face Transformers from source\npip freeze | grep transformers ", "sentence": [ [ "install", "hugging", "face", "transformer", "source", "pip", "freeze", "|", "grep", "transformer" ], [ "install hugging face transformer source pip freeze | grep transformer" ] ], "token": [ [ "install", "hugging", "face", "transformer", "source", "pip", "freeze", "|", "grep", "transformer" ], [ "install hugging face transformer source pip freeze | grep transformer" ] ], "level of complexity": 2 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "verify it is version 4.31.0 or higher\n\ngit clone git@github.com:huggingface/transformers.git\ncd transformers\npip install protobuf\npython src/transformers/models/llama/convert_llama_weights_to_hf.py \\\n --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path\n```\n\n", "sentence": [ [ "verify", "version", "4.31.0", "higher", "git", "clone", "git", "@", "github.com", ":", "huggingface/transformers.git", "cd", "transformer", "pip", "install", "protobuf", "python", "src/transformers/models/llama/convert_llama_weights_to_hf.py", "\\", "--", "input_dir", "/path/to/downloaded/llama/weights", "--", "model_size", "7b", "--", "output_dir", "/output/path", "``", "`" ], [ "verify version 4.31.0 higher git clone git @ github.com : huggingface/transformers.git cd transformer pip install protobuf python src/transformers/models/llama/convert_llama_weights_to_hf.py \\ -- input_dir /path/to/downloaded/llama/weights -- model_size 7b -- output_dir /output/path `` `" ] ], "token": [ [ "verify", "version", "4.31.0", "higher", "git", "clone", "git", "@", "github.com", ":", "huggingface/transformers.git", "cd", "transformer", "pip", "install", "protobuf", "python", "src/transformers/models/llama/convert_llama_weights_to_hf.py", "\\", "--", "input_dir", "/path/to/downloaded/llama/weights", "--", "model_size", "7b", "--", "output_dir", "/output/path", "``", "`" ], [ "verify version 4.31.0 higher git clone git @ github.com : huggingface/transformers.git cd transformer pip install protobuf python src/transformers/models/llama/convert_llama_weights_to_hf.py \\ -- input_dir /path/to/downloaded/llama/weights -- model_size 7b -- output_dir /output/path `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Single and Multi GPU Finetune\n\nIf you want to dive right into single or multi GPU fine-tuning, run the examples below on a single GPU like A10, T4, V100, A100 etc.\nAll the parameters in the examples and recipes below need to be further tuned to have desired results based on the model, method, data and task at hand.\n\n**Note:**\n* To change the dataset in the commands below pass the `dataset` arg. Current options for integrated dataset are `grammar_dataset`, `alpaca_dataset`and `samsum_dataset`. Additionally, we integrate the OpenAssistant/oasst1 dataset as an [example for a custom dataset](./examples/custom_dataset.py). A description of how to use your own dataset and how to add custom datasets can be found in [Dataset.md](./docs/Dataset.md#using-custom-datasets). For `grammar_dataset`, `alpaca_dataset` please make sure you use the suggested instructions from [here](./docs/single_gpu.md#how-to-run-with-different-datasets) to set them up.\n\n* Default dataset and other LORA config has been set to `samsum_dataset`.\n\n* Make sure to set the right path to the model in the [training config](src/llama_recipes/configs/training.py).\n\n* To save the loss and perplexity metrics for evaluation, enable this by passing `--save_metrics` to the finetuning script. The file can be plotted using the [plot_metrics.py](./examples/plot_metrics.py) script, `python examples/plot_metrics.py --file_path path/to/metrics.json`\n\n", "sentence": [ [ "single", "multi", "gpu", "finetune", "want", "dive", "right", "single", "multi", "gpu", "fine-tuning", ",", "run", "example", "single", "gpu", "like", "a10", ",", "t4", ",", "v100", ",", "a100", "etc", ".", "parameter", "example", "recipe", "need", "tuned", "desired", "result", "based", "model", ",", "method", ",", "data", "task", "hand", ".", "*", "*", "note", ":", "*", "*", "*", "change", "dataset", "command", "pas", "`", "dataset", "`", "arg", ".", "current", "option", "integrated", "dataset", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "`", "samsum_dataset", "`", ".", "additionally", ",", "integrate", "openassistant/oasst1", "dataset", "[", "example", "custom", "dataset", "]", "(", "./examples/custom_dataset.py", ")", ".", "description", "use", "dataset", "add", "custom", "datasets", "found", "[", "dataset.md", "]", "(", "./docs/dataset.md", "#", "using-custom-datasets", ")", ".", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "please", "make", "sure", "use", "suggested", "instruction", "[", "]", "(", "./docs/single_gpu.md", "#", "how-to-run-with-different-datasets", ")", "set", ".", "*", "default", "dataset", "lora", "config", "set", "`", "samsum_dataset", "`", ".", "*", "make", "sure", "set", "right", "path", "model", "[", "training", "config", "]", "(", "src/llama_recipes/configs/training.py", ")", ".", "*", "save", "loss", "perplexity", "metric", "evaluation", ",", "enable", "passing", "`", "--", "save_metrics", "`", "finetuning", "script", ".", "file", "plotted", "using", "[", "plot_metrics.py", "]", "(", "./examples/plot_metrics.py", ")", "script", ",", "`", "python", "examples/plot_metrics.py", "--", "file_path", "path/to/metrics.json", "`" ], [ "single multi gpu finetune want dive right single multi gpu fine-tuning , run example single gpu like a10 , t4 , v100 , a100 etc .", "parameter example recipe need tuned desired result based model , method , data task hand .", "* * note : * * * change dataset command pas ` dataset ` arg .", "current option integrated dataset ` grammar_dataset ` , ` alpaca_dataset ` ` samsum_dataset ` .", "additionally , integrate openassistant/oasst1 dataset [ example custom dataset ] ( ./examples/custom_dataset.py ) .", "description use dataset add custom datasets found [ dataset.md ] ( ./docs/dataset.md # using-custom-datasets ) .", "` grammar_dataset ` , ` alpaca_dataset ` please make sure use suggested instruction [ ] ( ./docs/single_gpu.md # how-to-run-with-different-datasets ) set .", "* default dataset lora config set ` samsum_dataset ` .", "* make sure set right path model [ training config ] ( src/llama_recipes/configs/training.py ) .", "* save loss perplexity metric evaluation , enable passing ` -- save_metrics ` finetuning script .", "file plotted using [ plot_metrics.py ] ( ./examples/plot_metrics.py ) script , ` python examples/plot_metrics.py -- file_path path/to/metrics.json `" ] ], "token": [ [ "single", "multi", "gpu", "finetune", "want", "dive", "right", "single", "multi", "gpu", "fine-tuning", ",", "run", "example", "single", "gpu", "like", "a10", ",", "t4", ",", "v100", ",", "a100", "etc", ".", "parameter", "example", "recipe", "need", "tuned", "desired", "result", "based", "model", ",", "method", ",", "data", "task", "hand", ".", "*", "*", "note", ":", "*", "*", "*", "change", "dataset", "command", "pas", "`", "dataset", "`", "arg", ".", "current", "option", "integrated", "dataset", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "`", "samsum_dataset", "`", ".", "additionally", ",", "integrate", "openassistant/oasst1", "dataset", "[", "example", "custom", "dataset", "]", "(", "./examples/custom_dataset.py", ")", ".", "description", "use", "dataset", "add", "custom", "datasets", "found", "[", "dataset.md", "]", "(", "./docs/dataset.md", "#", "using-custom-datasets", ")", ".", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "please", "make", "sure", "use", "suggested", "instruction", "[", "]", "(", "./docs/single_gpu.md", "#", "how-to-run-with-different-datasets", ")", "set", ".", "*", "default", "dataset", "lora", "config", "set", "`", "samsum_dataset", "`", ".", "*", "make", "sure", "set", "right", "path", "model", "[", "training", "config", "]", "(", "src/llama_recipes/configs/training.py", ")", ".", "*", "save", "loss", "perplexity", "metric", "evaluation", ",", "enable", "passing", "`", "--", "save_metrics", "`", "finetuning", "script", ".", "file", "plotted", "using", "[", "plot_metrics.py", "]", "(", "./examples/plot_metrics.py", ")", "script", ",", "`", "python", "examples/plot_metrics.py", "--", "file_path", "path/to/metrics.json", "`" ], [ "single multi gpu finetune want dive right single multi gpu fine-tuning , run example single gpu like a10 , t4 , v100 , a100 etc .", "parameter example recipe need tuned desired result based model , method , data task hand .", "* * note : * * * change dataset command pas ` dataset ` arg .", "current option integrated dataset ` grammar_dataset ` , ` alpaca_dataset ` ` samsum_dataset ` .", "additionally , integrate openassistant/oasst1 dataset [ example custom dataset ] ( ./examples/custom_dataset.py ) .", "description use dataset add custom datasets found [ dataset.md ] ( ./docs/dataset.md # using-custom-datasets ) .", "` grammar_dataset ` , ` alpaca_dataset ` please make sure use suggested instruction [ ] ( ./docs/single_gpu.md # how-to-run-with-different-datasets ) set .", "* default dataset lora config set ` samsum_dataset ` .", "* make sure set right path model [ training config ] ( src/llama_recipes/configs/training.py ) .", "* save loss perplexity metric evaluation , enable passing ` -- save_metrics ` finetuning script .", "file plotted using [ plot_metrics.py ] ( ./examples/plot_metrics.py ) script , ` python examples/plot_metrics.py -- file_path path/to/metrics.json `" ] ], "level of complexity": -1 }, { "url": "https://github.com/facebookresearch/llama-recipes", "readme_url": "https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md", "topic": [ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ], "text": "Repository Organization\nThis repository is organized in the following way:\n[benchmarks](./benchmarks): Contains a series of benchmark scripts for Llama 2 models inference on various backends.\n\n[configs](src/llama_recipes/configs/): Contains the configuration files for PEFT methods, FSDP, Datasets.\n\n[docs](docs/): Example recipes for single and multi-gpu fine-tuning recipes.\n\n[datasets](src/llama_recipes/datasets/): Contains individual scripts for each dataset to download and process. Note: Use of any of the datasets should be in compliance with the dataset's underlying licenses (including but not limited to non-commercial uses)\n\n[demo_apps](./demo_apps): Contains a series of Llama2-powered apps, from quickstart deployments to how to ask Llama questions about unstructured data, structured data, live data, and video summary.\n\n[examples](./examples/): Contains examples script for finetuning and inference of the Llama 2 model as well as how to use them safely.\n\n[inference](src/llama_recipes/inference/): Includes modules for inference for the fine-tuned models.\n\n[model_checkpointing](src/llama_recipes/model_checkpointing/): Contains FSDP checkpoint handlers.\n\n[policies](src/llama_recipes/policies/): Contains FSDP scripts to provide different policies, such as mixed precision, transformer wrapping policy and activation checkpointing along with any precision optimizer (used for running FSDP with pure bf16 mode).\n\n[utils](src/llama_recipes/utils/): Utility files for:\n\n- `train_utils.py` provides training/eval loop and more train utils.\n\n- `dataset_utils.py` to get preprocessed datasets.\n\n- `config_utils.py` to override the configs received from CLI.\n\n- `fsdp_utils.py` provides FSDP wrapping policy for PEFT methods.\n\n- `memory_utils.py` context manager to track different memory stats in train loop.\n\n", "sentence": [ [ "repository", "organization", "repository", "organized", "following", "way", ":", "[", "benchmark", "]", "(", "./benchmarks", ")", ":", "contains", "series", "benchmark", "script", "llama", "2", "model", "inference", "various", "backends", ".", "[", "configs", "]", "(", "src/llama_recipes/configs/", ")", ":", "contains", "configuration", "file", "peft", "method", ",", "fsdp", ",", "datasets", ".", "[", "doc", "]", "(", "docs/", ")", ":", "example", "recipe", "single", "multi-gpu", "fine-tuning", "recipe", ".", "[", "datasets", "]", "(", "src/llama_recipes/datasets/", ")", ":", "contains", "individual", "script", "dataset", "download", "process", ".", "note", ":", "use", "datasets", "compliance", "dataset", "'s", "underlying", "license", "(", "including", "limited", "non-commercial", "us", ")", "[", "demo_apps", "]", "(", "./demo_apps", ")", ":", "contains", "series", "llama2-powered", "apps", ",", "quickstart", "deployment", "ask", "llama", "question", "unstructured", "data", ",", "structured", "data", ",", "live", "data", ",", "video", "summary", ".", "[", "example", "]", "(", "./examples/", ")", ":", "contains", "example", "script", "finetuning", "inference", "llama", "2", "model", "well", "use", "safely", ".", "[", "inference", "]", "(", "src/llama_recipes/inference/", ")", ":", "includes", "module", "inference", "fine-tuned", "model", ".", "[", "model_checkpointing", "]", "(", "src/llama_recipes/model_checkpointing/", ")", ":", "contains", "fsdp", "checkpoint", "handler", ".", "[", "policy", "]", "(", "src/llama_recipes/policies/", ")", ":", "contains", "fsdp", "script", "provide", "different", "policy", ",", "mixed", "precision", ",", "transformer", "wrapping", "policy", "activation", "checkpointing", "along", "precision", "optimizer", "(", "used", "running", "fsdp", "pure", "bf16", "mode", ")", ".", "[", "utils", "]", "(", "src/llama_recipes/utils/", ")", ":", "utility", "file", ":", "-", "`", "train_utils.py", "`", "provides", "training/eval", "loop", "train", "utils", ".", "-", "`", "dataset_utils.py", "`", "get", "preprocessed", "datasets", ".", "-", "`", "config_utils.py", "`", "override", "configs", "received", "cli", ".", "-", "`", "fsdp_utils.py", "`", "provides", "fsdp", "wrapping", "policy", "peft", "method", ".", "-", "`", "memory_utils.py", "`", "context", "manager", "track", "different", "memory", "stats", "train", "loop", "." ], [ "repository organization repository organized following way : [ benchmark ] ( ./benchmarks ) : contains series benchmark script llama 2 model inference various backends .", "[ configs ] ( src/llama_recipes/configs/ ) : contains configuration file peft method , fsdp , datasets .", "[ doc ] ( docs/ ) : example recipe single multi-gpu fine-tuning recipe .", "[ datasets ] ( src/llama_recipes/datasets/ ) : contains individual script dataset download process .", "note : use datasets compliance dataset 's underlying license ( including limited non-commercial us ) [ demo_apps ] ( ./demo_apps ) : contains series llama2-powered apps , quickstart deployment ask llama question unstructured data , structured data , live data , video summary .", "[ example ] ( ./examples/ ) : contains example script finetuning inference llama 2 model well use safely .", "[ inference ] ( src/llama_recipes/inference/ ) : includes module inference fine-tuned model .", "[ model_checkpointing ] ( src/llama_recipes/model_checkpointing/ ) : contains fsdp checkpoint handler .", "[ policy ] ( src/llama_recipes/policies/ ) : contains fsdp script provide different policy , mixed precision , transformer wrapping policy activation checkpointing along precision optimizer ( used running fsdp pure bf16 mode ) .", "[ utils ] ( src/llama_recipes/utils/ ) : utility file : - ` train_utils.py ` provides training/eval loop train utils .", "- ` dataset_utils.py ` get preprocessed datasets .", "- ` config_utils.py ` override configs received cli .", "- ` fsdp_utils.py ` provides fsdp wrapping policy peft method .", "- ` memory_utils.py ` context manager track different memory stats train loop ." ] ], "token": [ [ "repository", "organization", "repository", "organized", "following", "way", ":", "[", "benchmark", "]", "(", "./benchmarks", ")", ":", "contains", "series", "benchmark", "script", "llama", "2", "model", "inference", "various", "backends", ".", "[", "configs", "]", "(", "src/llama_recipes/configs/", ")", ":", "contains", "configuration", "file", "peft", "method", ",", "fsdp", ",", "datasets", ".", "[", "doc", "]", "(", "docs/", ")", ":", "example", "recipe", "single", "multi-gpu", "fine-tuning", "recipe", ".", "[", "datasets", "]", "(", "src/llama_recipes/datasets/", ")", ":", "contains", "individual", "script", "dataset", "download", "process", ".", "note", ":", "use", "datasets", "compliance", "dataset", "'s", "underlying", "license", "(", "including", "limited", "non-commercial", "us", ")", "[", "demo_apps", "]", "(", "./demo_apps", ")", ":", "contains", "series", "llama2-powered", "apps", ",", "quickstart", "deployment", "ask", "llama", "question", "unstructured", "data", ",", "structured", "data", ",", "live", "data", ",", "video", "summary", ".", "[", "example", "]", "(", "./examples/", ")", ":", "contains", "example", "script", "finetuning", "inference", "llama", "2", "model", "well", "use", "safely", ".", "[", "inference", "]", "(", "src/llama_recipes/inference/", ")", ":", "includes", "module", "inference", "fine-tuned", "model", ".", "[", "model_checkpointing", "]", "(", "src/llama_recipes/model_checkpointing/", ")", ":", "contains", "fsdp", "checkpoint", "handler", ".", "[", "policy", "]", "(", "src/llama_recipes/policies/", ")", ":", "contains", "fsdp", "script", "provide", "different", "policy", ",", "mixed", "precision", ",", "transformer", "wrapping", "policy", "activation", "checkpointing", "along", "precision", "optimizer", "(", "used", "running", "fsdp", "pure", "bf16", "mode", ")", ".", "[", "utils", "]", "(", "src/llama_recipes/utils/", ")", ":", "utility", "file", ":", "-", "`", "train_utils.py", "`", "provides", "training/eval", "loop", "train", "utils", ".", "-", "`", "dataset_utils.py", "`", "get", "preprocessed", "datasets", ".", "-", "`", "config_utils.py", "`", "override", "configs", "received", "cli", ".", "-", "`", "fsdp_utils.py", "`", "provides", "fsdp", "wrapping", "policy", "peft", "method", ".", "-", "`", "memory_utils.py", "`", "context", "manager", "track", "different", "memory", "stats", "train", "loop", "." ], [ "repository organization repository organized following way : [ benchmark ] ( ./benchmarks ) : contains series benchmark script llama 2 model inference various backends .", "[ configs ] ( src/llama_recipes/configs/ ) : contains configuration file peft method , fsdp , datasets .", "[ doc ] ( docs/ ) : example recipe single multi-gpu fine-tuning recipe .", "[ datasets ] ( src/llama_recipes/datasets/ ) : contains individual script dataset download process .", "note : use datasets compliance dataset 's underlying license ( including limited non-commercial us ) [ demo_apps ] ( ./demo_apps ) : contains series llama2-powered apps , quickstart deployment ask llama question unstructured data , structured data , live data , video summary .", "[ example ] ( ./examples/ ) : contains example script finetuning inference llama 2 model well use safely .", "[ inference ] ( src/llama_recipes/inference/ ) : includes module inference fine-tuned model .", "[ model_checkpointing ] ( src/llama_recipes/model_checkpointing/ ) : contains fsdp checkpoint handler .", "[ policy ] ( src/llama_recipes/policies/ ) : contains fsdp script provide different policy , mixed precision , transformer wrapping policy activation checkpointing along precision optimizer ( used running fsdp pure bf16 mode ) .", "[ utils ] ( src/llama_recipes/utils/ ) : utility file : - ` train_utils.py ` provides training/eval loop train utils .", "- ` dataset_utils.py ` get preprocessed datasets .", "- ` config_utils.py ` override configs received cli .", "- ` fsdp_utils.py ` provides fsdp wrapping policy peft method .", "- ` memory_utils.py ` context manager track different memory stats train loop ." ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/promptflow", "readme_url": "https://raw.githubusercontent.com/microsoft/promptflow/main/README.md", "topic": [ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ], "text": "Installation\n\nTo get started quickly, you can use a pre-built development environment. **Click the button below** to open the repo in GitHub Codespaces, and then continue the readme!\n\n[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/microsoft/promptflow?quickstart=1)\n\nIf you want to get started in your local environment, first install the packages:\n\nEnsure you have a python environment, `python=3.9` is recommended.\n\n```sh\npip install promptflow promptflow-tools\n```\n\n", "sentence": [ [ "installation", "get", "started", "quickly", ",", "use", "pre-built", "development", "environment", ".", "*", "*", "click", "button", "*", "*", "open", "repo", "github", "codespaces", ",", "continue", "readme", "!", "[", "!", "[", "open", "github", "codespaces", "]", "(", "http", ":", "//github.com/codespaces/badge.svg", ")", "]", "(", "http", ":", "//codespaces.new/microsoft/promptflow", "?", "quickstart=1", ")", "want", "get", "started", "local", "environment", ",", "first", "install", "package", ":", "ensure", "python", "environment", ",", "`", "python=3.9", "`", "recommended", ".", "``", "`", "sh", "pip", "install", "promptflow", "promptflow-tools", "``", "`" ], [ "installation get started quickly , use pre-built development environment .", "* * click button * * open repo github codespaces , continue readme !", "[ !", "[ open github codespaces ] ( http : //github.com/codespaces/badge.svg ) ] ( http : //codespaces.new/microsoft/promptflow ? quickstart=1 ) want get started local environment , first install package : ensure python environment , ` python=3.9 ` recommended .", "`` ` sh pip install promptflow promptflow-tools `` `" ] ], "token": [ [ "installation", "get", "started", "quickly", ",", "use", "pre-built", "development", "environment", ".", "*", "*", "click", "button", "*", "*", "open", "repo", "github", "codespaces", ",", "continue", "readme", "!", "[", "!", "[", "open", "github", "codespaces", "]", "(", "http", ":", "//github.com/codespaces/badge.svg", ")", "]", "(", "http", ":", "//codespaces.new/microsoft/promptflow", "?", "quickstart=1", ")", "want", "get", "started", "local", "environment", ",", "first", "install", "package", ":", "ensure", "python", "environment", ",", "`", "python=3.9", "`", "recommended", ".", "``", "`", "sh", "pip", "install", "promptflow", "promptflow-tools", "``", "`" ], [ "installation get started quickly , use pre-built development environment .", "* * click button * * open repo github codespaces , continue readme !", "[ !", "[ open github codespaces ] ( http : //github.com/codespaces/badge.svg ) ] ( http : //codespaces.new/microsoft/promptflow ? quickstart=1 ) want get started local environment , first install package : ensure python environment , ` python=3.9 ` recommended .", "`` ` sh pip install promptflow promptflow-tools `` `" ] ], "level of complexity": 0 }, { "url": "https://github.com/microsoft/promptflow", "readme_url": "https://raw.githubusercontent.com/microsoft/promptflow/main/README.md", "topic": [ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ], "text": "Quick Start \u26a1\n\n**Create a chatbot with prompt flow**\n\nRun the command to initiate a prompt flow from a chat template, it creates folder named `my_chatbot` and generates required files within it:\n\n```sh\npf flow init --flow ./my_chatbot --type chat\n```\n\n**Setup a connection for your API key**\n\nFor OpenAI key, establish a connection by running the command, using the `openai.yaml` file in the `my_chatbot` folder, which stores your OpenAI key (override keys and name with --set to avoid yaml file changes):\n\n```sh\npf connection create --file ./my_chatbot/openai.yaml --set api_key= --name open_ai_connection\n```\n\nFor Azure OpenAI key, establish the connection by running the command, using the `azure_openai.yaml` file:\n\n```sh\npf connection create --file ./my_chatbot/azure_openai.yaml --set api_key= api_base= --name open_ai_connection\n```\n\n**Chat with your flow**\n\nIn the `my_chatbot` folder, there's a `flow.dag.yaml` file that outlines the flow, including inputs/outputs, nodes, connection, and the LLM model, etc\n\n> Note that in the `chat` node, we're using a connection named `open_ai_connection` (specified in `connection` field) and the `gpt-35-turbo` model (specified in `deployment_name` field). The deployment_name filed is to specify the OpenAI model, or the Azure OpenAI deployment resource.\n\nInteract with your chatbot by running: (press `Ctrl + C` to end the session)\n\n```sh\npf flow test --flow ./my_chatbot --interactive\n```\n\n**Core value: ensuring \"High Quality\u201d from prototype to production**\n\nExplore our [**15-minute tutorial**](examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md) that guides you through prompt tuning \u27a1 batch testing \u27a1 evaluation, all designed to ensure high quality ready for production.\n\nNext Step! Continue with the **Tutorial** \ud83d\udc47 section to delve deeper into prompt flow.\n\n", "sentence": [ [ "quick", "start", "\u26a1", "*", "*", "create", "chatbot", "prompt", "flow", "*", "*", "run", "command", "initiate", "prompt", "flow", "chat", "template", ",", "creates", "folder", "named", "`", "my_chatbot", "`", "generates", "required", "file", "within", ":", "``", "`", "sh", "pf", "flow", "init", "--", "flow", "./my_chatbot", "--", "type", "chat", "``", "`", "*", "*", "setup", "connection", "api", "key", "*", "*", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "openai.yaml", "`", "file", "`", "my_chatbot", "`", "folder", ",", "store", "openai", "key", "(", "override", "key", "name", "--", "set", "avoid", "yaml", "file", "change", ")", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "--", "name", "open_ai_connection", "``", "`", "azure", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "azure_openai.yaml", "`", "file", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/azure_openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "api_base=", "<", "your_api_base", ">", "--", "name", "open_ai_connection", "``", "`", "*", "*", "chat", "flow", "*", "*", "`", "my_chatbot", "`", "folder", ",", "'s", "`", "flow.dag.yaml", "`", "file", "outline", "flow", ",", "including", "inputs/outputs", ",", "node", ",", "connection", ",", "llm", "model", ",", "etc", ">", "note", "`", "chat", "`", "node", ",", "'re", "using", "connection", "named", "`", "open_ai_connection", "`", "(", "specified", "`", "connection", "`", "field", ")", "`", "gpt-35-turbo", "`", "model", "(", "specified", "`", "deployment_name", "`", "field", ")", ".", "deployment_name", "filed", "specify", "openai", "model", ",", "azure", "openai", "deployment", "resource", ".", "interact", "chatbot", "running", ":", "(", "press", "`", "ctrl", "+", "c", "`", "end", "session", ")", "``", "`", "sh", "pf", "flow", "test", "--", "flow", "./my_chatbot", "--", "interactive", "``", "`", "*", "*", "core", "value", ":", "ensuring", "``", "high", "quality", "\u201d", "prototype", "production", "*", "*", "explore", "[", "*", "*", "15-minute", "tutorial", "*", "*", "]", "(", "examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md", ")", "guide", "prompt", "tuning", "\u27a1", "batch", "testing", "\u27a1", "evaluation", ",", "designed", "ensure", "high", "quality", "ready", "production", ".", "next", "step", "!", "continue", "*", "*", "tutorial", "*", "*", "\ud83d\udc47", "section", "delve", "deeper", "prompt", "flow", "." ], [ "quick start \u26a1 * * create chatbot prompt flow * * run command initiate prompt flow chat template , creates folder named ` my_chatbot ` generates required file within : `` ` sh pf flow init -- flow ./my_chatbot -- type chat `` ` * * setup connection api key * * openai key , establish connection running command , using ` openai.yaml ` file ` my_chatbot ` folder , store openai key ( override key name -- set avoid yaml file change ) : `` ` sh pf connection create -- file ./my_chatbot/openai.yaml -- set api_key= < your_api_key > -- name open_ai_connection `` ` azure openai key , establish connection running command , using ` azure_openai.yaml ` file : `` ` sh pf connection create -- file ./my_chatbot/azure_openai.yaml -- set api_key= < your_api_key > api_base= < your_api_base > -- name open_ai_connection `` ` * * chat flow * * ` my_chatbot ` folder , 's ` flow.dag.yaml ` file outline flow , including inputs/outputs , node , connection , llm model , etc > note ` chat ` node , 're using connection named ` open_ai_connection ` ( specified ` connection ` field ) ` gpt-35-turbo ` model ( specified ` deployment_name ` field ) .", "deployment_name filed specify openai model , azure openai deployment resource .", "interact chatbot running : ( press ` ctrl + c ` end session ) `` ` sh pf flow test -- flow ./my_chatbot -- interactive `` ` * * core value : ensuring `` high quality \u201d prototype production * * explore [ * * 15-minute tutorial * * ] ( examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md ) guide prompt tuning \u27a1 batch testing \u27a1 evaluation , designed ensure high quality ready production .", "next step !", "continue * * tutorial * * \ud83d\udc47 section delve deeper prompt flow ." ] ], "token": [ [ "quick", "start", "\u26a1", "*", "*", "create", "chatbot", "prompt", "flow", "*", "*", "run", "command", "initiate", "prompt", "flow", "chat", "template", ",", "creates", "folder", "named", "`", "my_chatbot", "`", "generates", "required", "file", "within", ":", "``", "`", "sh", "pf", "flow", "init", "--", "flow", "./my_chatbot", "--", "type", "chat", "``", "`", "*", "*", "setup", "connection", "api", "key", "*", "*", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "openai.yaml", "`", "file", "`", "my_chatbot", "`", "folder", ",", "store", "openai", "key", "(", "override", "key", "name", "--", "set", "avoid", "yaml", "file", "change", ")", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "--", "name", "open_ai_connection", "``", "`", "azure", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "azure_openai.yaml", "`", "file", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/azure_openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "api_base=", "<", "your_api_base", ">", "--", "name", "open_ai_connection", "``", "`", "*", "*", "chat", "flow", "*", "*", "`", "my_chatbot", "`", "folder", ",", "'s", "`", "flow.dag.yaml", "`", "file", "outline", "flow", ",", "including", "inputs/outputs", ",", "node", ",", "connection", ",", "llm", "model", ",", "etc", ">", "note", "`", "chat", "`", "node", ",", "'re", "using", "connection", "named", "`", "open_ai_connection", "`", "(", "specified", "`", "connection", "`", "field", ")", "`", "gpt-35-turbo", "`", "model", "(", "specified", "`", "deployment_name", "`", "field", ")", ".", "deployment_name", "filed", "specify", "openai", "model", ",", "azure", "openai", "deployment", "resource", ".", "interact", "chatbot", "running", ":", "(", "press", "`", "ctrl", "+", "c", "`", "end", "session", ")", "``", "`", "sh", "pf", "flow", "test", "--", "flow", "./my_chatbot", "--", "interactive", "``", "`", "*", "*", "core", "value", ":", "ensuring", "``", "high", "quality", "\u201d", "prototype", "production", "*", "*", "explore", "[", "*", "*", "15-minute", "tutorial", "*", "*", "]", "(", "examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md", ")", "guide", "prompt", "tuning", "\u27a1", "batch", "testing", "\u27a1", "evaluation", ",", "designed", "ensure", "high", "quality", "ready", "production", ".", "next", "step", "!", "continue", "*", "*", "tutorial", "*", "*", "\ud83d\udc47", "section", "delve", "deeper", "prompt", "flow", "." ], [ "quick start \u26a1 * * create chatbot prompt flow * * run command initiate prompt flow chat template , creates folder named ` my_chatbot ` generates required file within : `` ` sh pf flow init -- flow ./my_chatbot -- type chat `` ` * * setup connection api key * * openai key , establish connection running command , using ` openai.yaml ` file ` my_chatbot ` folder , store openai key ( override key name -- set avoid yaml file change ) : `` ` sh pf connection create -- file ./my_chatbot/openai.yaml -- set api_key= < your_api_key > -- name open_ai_connection `` ` azure openai key , establish connection running command , using ` azure_openai.yaml ` file : `` ` sh pf connection create -- file ./my_chatbot/azure_openai.yaml -- set api_key= < your_api_key > api_base= < your_api_base > -- name open_ai_connection `` ` * * chat flow * * ` my_chatbot ` folder , 's ` flow.dag.yaml ` file outline flow , including inputs/outputs , node , connection , llm model , etc > note ` chat ` node , 're using connection named ` open_ai_connection ` ( specified ` connection ` field ) ` gpt-35-turbo ` model ( specified ` deployment_name ` field ) .", "deployment_name filed specify openai model , azure openai deployment resource .", "interact chatbot running : ( press ` ctrl + c ` end session ) `` ` sh pf flow test -- flow ./my_chatbot -- interactive `` ` * * core value : ensuring `` high quality \u201d prototype production * * explore [ * * 15-minute tutorial * * ] ( examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md ) guide prompt tuning \u27a1 batch testing \u27a1 evaluation , designed ensure high quality ready production .", "next step !", "continue * * tutorial * * \ud83d\udc47 section delve deeper prompt flow ." ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/promptflow", "readme_url": "https://raw.githubusercontent.com/microsoft/promptflow/main/README.md", "topic": [ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ], "text": "VS Code Extension\n\nWe also offer a VS Code extension (a flow designer) for an interactive flow development experience with UI.\n\n\"vsc\"\n\nYou can install it from the visualstudio marketplace.\n\n", "sentence": [ [ "v", "code", "extension", "also", "offer", "v", "code", "extension", "(", "flow", "designer", ")", "interactive", "flow", "development", "experience", "ui", ".", "<", "img", "src=", "''", "examples/tutorials/quick-start/media/vsc.png", "''", "alt=", "''", "vsc", "''", "width=", "''", "1000", "''", "/", ">", "install", "<", "href=", "''", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=prompt-flow.prompt-flow", "''", ">", "visualstudio", "marketplace", "<", "/a", ">", "." ], [ "v code extension also offer v code extension ( flow designer ) interactive flow development experience ui .", "< img src= '' examples/tutorials/quick-start/media/vsc.png '' alt= '' vsc '' width= '' 1000 '' / > install < href= '' http : //marketplace.visualstudio.com/items ? itemname=prompt-flow.prompt-flow '' > visualstudio marketplace < /a > ." ] ], "token": [ [ "v", "code", "extension", "also", "offer", "v", "code", "extension", "(", "flow", "designer", ")", "interactive", "flow", "development", "experience", "ui", ".", "<", "img", "src=", "''", "examples/tutorials/quick-start/media/vsc.png", "''", "alt=", "''", "vsc", "''", "width=", "''", "1000", "''", "/", ">", "install", "<", "href=", "''", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=prompt-flow.prompt-flow", "''", ">", "visualstudio", "marketplace", "<", "/a", ">", "." ], [ "v code extension also offer v code extension ( flow designer ) interactive flow development experience ui .", "< img src= '' examples/tutorials/quick-start/media/vsc.png '' alt= '' vsc '' width= '' 1000 '' / > install < href= '' http : //marketplace.visualstudio.com/items ? itemname=prompt-flow.prompt-flow '' > visualstudio marketplace < /a > ." ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/promptflow", "readme_url": "https://raw.githubusercontent.com/microsoft/promptflow/main/README.md", "topic": [ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ], "text": "Deep delve into flow development\n\n[Getting started with prompt flow](./docs/cloud/azureai/quick-start/index.md): A step by step guidance to invoke your first flow run.\n\n", "sentence": [ [ "deep", "delve", "flow", "development", "[", "getting", "started", "prompt", "flow", "]", "(", "./docs/cloud/azureai/quick-start/index.md", ")", ":", "step", "step", "guidance", "invoke", "first", "flow", "run", "." ], [ "deep delve flow development [ getting started prompt flow ] ( ./docs/cloud/azureai/quick-start/index.md ) : step step guidance invoke first flow run ." ] ], "token": [ [ "deep", "delve", "flow", "development", "[", "getting", "started", "prompt", "flow", "]", "(", "./docs/cloud/azureai/quick-start/index.md", ")", ":", "step", "step", "guidance", "invoke", "first", "flow", "run", "." ], [ "deep delve flow development [ getting started prompt flow ] ( ./docs/cloud/azureai/quick-start/index.md ) : step step guidance invoke first flow run ." ] ], "level of complexity": -1 }, { "url": "https://github.com/microsoft/promptflow", "readme_url": "https://raw.githubusercontent.com/microsoft/promptflow/main/README.md", "topic": [ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ], "text": "Learn from use cases\n\n[Tutorial: Chat with PDF](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md): An end-to-end tutorial on how to build a high quality chat application with prompt flow, including flow development and evaluation with metrics.\n> More examples can be found [here](https://microsoft.github.io/promptflow/tutorials/index.html#samples). We welcome contributions of new use cases!\n\n", "sentence": [ [ "learn", "use", "case", "[", "tutorial", ":", "chat", "pdf", "]", "(", "http", ":", "//github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md", ")", ":", "end-to-end", "tutorial", "build", "high", "quality", "chat", "application", "prompt", "flow", ",", "including", "flow", "development", "evaluation", "metric", ".", ">", "example", "found", "[", "]", "(", "http", ":", "//microsoft.github.io/promptflow/tutorials/index.html", "#", "sample", ")", ".", "welcome", "contribution", "new", "use", "case", "!" ], [ "learn use case [ tutorial : chat pdf ] ( http : //github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md ) : end-to-end tutorial build high quality chat application prompt flow , including flow development evaluation metric .", "> example found [ ] ( http : //microsoft.github.io/promptflow/tutorials/index.html # sample ) .", "welcome contribution new use case !" ] ], "token": [ [ "learn", "use", "case", "[", "tutorial", ":", "chat", "pdf", "]", "(", "http", ":", "//github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md", ")", ":", "end-to-end", "tutorial", "build", "high", "quality", "chat", "application", "prompt", "flow", ",", "including", "flow", "development", "evaluation", "metric", ".", ">", "example", "found", "[", "]", "(", "http", ":", "//microsoft.github.io/promptflow/tutorials/index.html", "#", "sample", ")", ".", "welcome", "contribution", "new", "use", "case", "!" ], [ "learn use case [ tutorial : chat pdf ] ( http : //github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md ) : end-to-end tutorial build high quality chat application prompt flow , including flow development evaluation metric .", "> example found [ ] ( http : //microsoft.github.io/promptflow/tutorials/index.html # sample ) .", "welcome contribution new use case !" ] ], "level of complexity": 2 }, { "url": "https://github.com/microsoft/promptflow", "readme_url": "https://raw.githubusercontent.com/microsoft/promptflow/main/README.md", "topic": [ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ], "text": "Setup for contributors\n\nIf you're interested in contributing, please start with our dev setup guide: [dev_setup.md](./docs/dev/dev_setup.md).\n\nNext Step! Continue with the **Contributing** \ud83d\udc47 section to contribute to prompt flow.\n\n", "sentence": [ [ "setup", "contributor", "'re", "interested", "contributing", ",", "please", "start", "dev", "setup", "guide", ":", "[", "dev_setup.md", "]", "(", "./docs/dev/dev_setup.md", ")", ".", "next", "step", "!", "continue", "*", "*", "contributing", "*", "*", "\ud83d\udc47", "section", "contribute", "prompt", "flow", "." ], [ "setup contributor 're interested contributing , please start dev setup guide : [ dev_setup.md ] ( ./docs/dev/dev_setup.md ) .", "next step !", "continue * * contributing * * \ud83d\udc47 section contribute prompt flow ." ] ], "token": [ [ "setup", "contributor", "'re", "interested", "contributing", ",", "please", "start", "dev", "setup", "guide", ":", "[", "dev_setup.md", "]", "(", "./docs/dev/dev_setup.md", ")", ".", "next", "step", "!", "continue", "*", "*", "contributing", "*", "*", "\ud83d\udc47", "section", "contribute", "prompt", "flow", "." ], [ "setup contributor 're interested contributing , please start dev setup guide : [ dev_setup.md ] ( ./docs/dev/dev_setup.md ) .", "next step !", "continue * * contributing * * \ud83d\udc47 section contribute prompt flow ." ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "Installation\n```shell\npip install shell-gpt\n```\n\nYou'll need an OpenAI API key, you can generate one [here](https://beta.openai.com/account/api-keys). \nYou will be prompted for your key which will then be stored in `~/.config/shell_gpt/.sgptrc`. \n\n", "sentence": [ [ "installation", "``", "`", "shell", "pip", "install", "shell-gpt", "``", "`", "'ll", "need", "openai", "api", "key", ",", "generate", "one", "[", "]", "(", "http", ":", "//beta.openai.com/account/api-keys", ")", ".", "prompted", "key", "stored", "`", "~/.config/shell_gpt/.sgptrc", "`", "." ], [ "installation `` ` shell pip install shell-gpt `` ` 'll need openai api key , generate one [ ] ( http : //beta.openai.com/account/api-keys ) .", "prompted key stored ` ~/.config/shell_gpt/.sgptrc ` ." ] ], "token": [ [ "installation", "``", "`", "shell", "pip", "install", "shell-gpt", "``", "`", "'ll", "need", "openai", "api", "key", ",", "generate", "one", "[", "]", "(", "http", ":", "//beta.openai.com/account/api-keys", ")", ".", "prompted", "key", "stored", "`", "~/.config/shell_gpt/.sgptrc", "`", "." ], [ "installation `` ` shell pip install shell-gpt `` ` 'll need openai api key , generate one [ ] ( http : //beta.openai.com/account/api-keys ) .", "prompted key stored ` ~/.config/shell_gpt/.sgptrc ` ." ] ], "level of complexity": 0 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "Shell integration\nThis is a **very handy feature**, which allows you to use `sgpt` shell completions directly in your terminal, without the need to type `sgpt` with prompt and arguments. Shell integration enables the use of ShellGPT with hotkeys in your terminal, supported by both Bash and ZSH shells. This feature puts `sgpt` completions directly into terminal buffer (input line), allowing for immediate editing of suggested commands.\n\nhttps://github.com/TheR1D/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1\n\nTo install shell integration, run `sgpt --install-integration` and restart your terminal to apply changes. This will add few lines to your `.bashrc` or `.zshrc` file. After that, you can use `Ctrl+l` (by default) to invoke ShellGPT. When you press `Ctrl+l` it will replace you current input line (buffer) with suggested command. You can then edit it and just press `Enter` to execute.\n\n", "sentence": [ [ "shell", "integration", "*", "*", "handy", "feature", "*", "*", ",", "allows", "use", "`", "sgpt", "`", "shell", "completion", "directly", "terminal", ",", "without", "need", "type", "`", "sgpt", "`", "prompt", "argument", ".", "shell", "integration", "enables", "use", "shellgpt", "hotkeys", "terminal", ",", "supported", "bash", "zsh", "shell", ".", "feature", "put", "`", "sgpt", "`", "completion", "directly", "terminal", "buffer", "(", "input", "line", ")", ",", "allowing", "immediate", "editing", "suggested", "command", ".", "http", ":", "//github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1", "install", "shell", "integration", ",", "run", "`", "sgpt", "--", "install-integration", "`", "restart", "terminal", "apply", "change", ".", "add", "line", "`", ".bashrc", "`", "`", ".zshrc", "`", "file", ".", ",", "use", "`", "ctrl+l", "`", "(", "default", ")", "invoke", "shellgpt", ".", "press", "`", "ctrl+l", "`", "replace", "current", "input", "line", "(", "buffer", ")", "suggested", "command", ".", "edit", "press", "`", "enter", "`", "execute", "." ], [ "shell integration * * handy feature * * , allows use ` sgpt ` shell completion directly terminal , without need type ` sgpt ` prompt argument .", "shell integration enables use shellgpt hotkeys terminal , supported bash zsh shell .", "feature put ` sgpt ` completion directly terminal buffer ( input line ) , allowing immediate editing suggested command .", "http : //github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1 install shell integration , run ` sgpt -- install-integration ` restart terminal apply change .", "add line ` .bashrc ` ` .zshrc ` file .", ", use ` ctrl+l ` ( default ) invoke shellgpt .", "press ` ctrl+l ` replace current input line ( buffer ) suggested command .", "edit press ` enter ` execute ." ] ], "token": [ [ "shell", "integration", "*", "*", "handy", "feature", "*", "*", ",", "allows", "use", "`", "sgpt", "`", "shell", "completion", "directly", "terminal", ",", "without", "need", "type", "`", "sgpt", "`", "prompt", "argument", ".", "shell", "integration", "enables", "use", "shellgpt", "hotkeys", "terminal", ",", "supported", "bash", "zsh", "shell", ".", "feature", "put", "`", "sgpt", "`", "completion", "directly", "terminal", "buffer", "(", "input", "line", ")", ",", "allowing", "immediate", "editing", "suggested", "command", ".", "http", ":", "//github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1", "install", "shell", "integration", ",", "run", "`", "sgpt", "--", "install-integration", "`", "restart", "terminal", "apply", "change", ".", "add", "line", "`", ".bashrc", "`", "`", ".zshrc", "`", "file", ".", ",", "use", "`", "ctrl+l", "`", "(", "default", ")", "invoke", "shellgpt", ".", "press", "`", "ctrl+l", "`", "replace", "current", "input", "line", "(", "buffer", ")", "suggested", "command", ".", "edit", "press", "`", "enter", "`", "execute", "." ], [ "shell integration * * handy feature * * , allows use ` sgpt ` shell completion directly terminal , without need type ` sgpt ` prompt argument .", "shell integration enables use shellgpt hotkeys terminal , supported bash zsh shell .", "feature put ` sgpt ` completion directly terminal buffer ( input line ) , allowing immediate editing suggested command .", "http : //github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1 install shell integration , run ` sgpt -- install-integration ` restart terminal apply change .", "add line ` .bashrc ` ` .zshrc ` file .", ", use ` ctrl+l ` ( default ) invoke shellgpt .", "press ` ctrl+l ` replace current input line ( buffer ) suggested command .", "edit press ` enter ` execute ." ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "Function calling \n[Function calls](https://platform.openai.com/docs/guides/function-calling) is a powerful feature OpenAI provides. It allows LLM to execute functions in your system, which can be used to accomplish a variety of tasks. To install [default functions](https://github.com/TheR1D/shell_gpt/tree/main/sgpt/default_functions/) run:\n```shell\nsgpt --install-functions\n```\n\nShellGPT has a convenient way to define functions and use them. In order to create your custom function, navigate to `~/.config/shell_gpt/functions` and create a new .py file with the function name. Inside this file, you can define your function using the following syntax:\n```python\n", "sentence": [ [ "function", "calling", "[", "function", "call", "]", "(", "http", ":", "//platform.openai.com/docs/guides/function-calling", ")", "powerful", "feature", "openai", "provides", ".", "allows", "llm", "execute", "function", "system", ",", "used", "accomplish", "variety", "task", ".", "install", "[", "default", "function", "]", "(", "http", ":", "//github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/", ")", "run", ":", "``", "`", "shell", "sgpt", "--", "install-functions", "``", "`", "shellgpt", "convenient", "way", "define", "function", "use", ".", "order", "create", "custom", "function", ",", "navigate", "`", "~/.config/shell_gpt/functions", "`", "create", "new", ".py", "file", "function", "name", ".", "inside", "file", ",", "define", "function", "using", "following", "syntax", ":", "``", "`", "python" ], [ "function calling [ function call ] ( http : //platform.openai.com/docs/guides/function-calling ) powerful feature openai provides .", "allows llm execute function system , used accomplish variety task .", "install [ default function ] ( http : //github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/ ) run : `` ` shell sgpt -- install-functions `` ` shellgpt convenient way define function use .", "order create custom function , navigate ` ~/.config/shell_gpt/functions ` create new .py file function name .", "inside file , define function using following syntax : `` ` python" ] ], "token": [ [ "function", "calling", "[", "function", "call", "]", "(", "http", ":", "//platform.openai.com/docs/guides/function-calling", ")", "powerful", "feature", "openai", "provides", ".", "allows", "llm", "execute", "function", "system", ",", "used", "accomplish", "variety", "task", ".", "install", "[", "default", "function", "]", "(", "http", ":", "//github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/", ")", "run", ":", "``", "`", "shell", "sgpt", "--", "install-functions", "``", "`", "shellgpt", "convenient", "way", "define", "function", "use", ".", "order", "create", "custom", "function", ",", "navigate", "`", "~/.config/shell_gpt/functions", "`", "create", "new", ".py", "file", "function", "name", ".", "inside", "file", ",", "define", "function", "using", "following", "syntax", ":", "``", "`", "python" ], [ "function calling [ function call ] ( http : //platform.openai.com/docs/guides/function-calling ) powerful feature openai provides .", "allows llm execute function system , used accomplish variety task .", "install [ default function ] ( http : //github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/ ) run : `` ` shell sgpt -- install-functions `` ` shellgpt convenient way define function use .", "order create custom function , navigate ` ~/.config/shell_gpt/functions ` create new .py file function name .", "inside file , define function using following syntax : `` ` python" ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "-> test.json\n```\n\nNote that if for some reason the function (execute_shell_command) will return an error, LLM might try to accomplish the task based on the output. Let's say we don't have installed `jq` in our system, and we ask LLM to parse JSON file:\n```shell\nsgpt \"parse /tmp/test.json file using jq and return only email value\"\n", "sentence": [ [ "-", ">", "test.json", "``", "`", "note", "reason", "function", "(", "execute_shell_command", ")", "return", "error", ",", "llm", "might", "try", "accomplish", "task", "based", "output", ".", "let", "'s", "say", "n't", "installed", "`", "jq", "`", "system", ",", "ask", "llm", "parse", "json", "file", ":", "``", "`", "shell", "sgpt", "``", "parse", "/tmp/test.json", "file", "using", "jq", "return", "email", "value", "''" ], [ "- > test.json `` ` note reason function ( execute_shell_command ) return error , llm might try accomplish task based output .", "let 's say n't installed ` jq ` system , ask llm parse json file : `` ` shell sgpt `` parse /tmp/test.json file using jq return email value ''" ] ], "token": [ [ "-", ">", "test.json", "``", "`", "note", "reason", "function", "(", "execute_shell_command", ")", "return", "error", ",", "llm", "might", "try", "accomplish", "task", "based", "output", ".", "let", "'s", "say", "n't", "installed", "`", "jq", "`", "system", ",", "ask", "llm", "parse", "json", "file", ":", "``", "`", "shell", "sgpt", "``", "parse", "/tmp/test.json", "file", "using", "jq", "return", "email", "value", "''" ], [ "- > test.json `` ` note reason function ( execute_shell_command ) return error , llm might try accomplish task based output .", "let 's say n't installed ` jq ` system , ask llm parse json file : `` ` shell sgpt `` parse /tmp/test.json file using jq return email value ''" ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "-> It appears that jq is not installed on the system. Let me try to install it using brew.\n", "sentence": [ [ "-", ">", "appears", "jq", "installed", "system", ".", "let", "try", "install", "using", "brew", "." ], [ "- > appears jq installed system .", "let try install using brew ." ] ], "token": [ [ "-", ">", "appears", "jq", "installed", "system", ".", "let", "try", "install", "using", "brew", "." ], [ "- > appears jq installed system .", "let try install using brew ." ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "-> @FunctionCall execute_shell_command(shell_command=\"brew install jq\")\n", "sentence": [ [ "-", ">", "@", "functioncall", "execute_shell_command", "(", "shell_command=", "''", "brew", "install", "jq", "''", ")" ], [ "- > @ functioncall execute_shell_command ( shell_command= '' brew install jq '' )" ] ], "token": [ [ "-", ">", "@", "functioncall", "execute_shell_command", "(", "shell_command=", "''", "brew", "install", "jq", "''", ")" ], [ "- > @ functioncall execute_shell_command ( shell_command= '' brew install jq '' )" ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "-> jq has been successfully installed. Let me try to parse the file again.\n", "sentence": [ [ "-", ">", "jq", "successfully", "installed", ".", "let", "try", "parse", "file", "." ], [ "- > jq successfully installed .", "let try parse file ." ] ], "token": [ [ "-", ">", "jq", "successfully", "installed", ".", "let", "try", "parse", "file", "." ], [ "- > jq successfully installed .", "let try parse file ." ] ], "level of complexity": -1 }, { "url": "https://github.com/TheR1D/shell_gpt", "readme_url": "https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md", "topic": [ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ], "text": "Runtime configuration file\nYou can setup some parameters in runtime configuration file `~/.config/shell_gpt/.sgptrc`:\n```text\n", "sentence": [ [ "runtime", "configuration", "file", "setup", "parameter", "runtime", "configuration", "file", "`", "~/.config/shell_gpt/.sgptrc", "`", ":", "``", "`", "text" ], [ "runtime configuration file setup parameter runtime configuration file ` ~/.config/shell_gpt/.sgptrc ` : `` ` text" ] ], "token": [ [ "runtime", "configuration", "file", "setup", "parameter", "runtime", "configuration", "file", "`", "~/.config/shell_gpt/.sgptrc", "`", ":", "``", "`", "text" ], [ "runtime configuration file setup parameter runtime configuration file ` ~/.config/shell_gpt/.sgptrc ` : `` ` text" ] ], "level of complexity": -1 }, { "url": "https://github.com/continuedev/continue", "readme_url": "https://raw.githubusercontent.com/continuedev/continue/main/README.md", "topic": [ "ai", "chatgpt", "copilot", "developer-tools", "intellij", "jetbrains", "llm", "open-source", "openai", "pycharm", "software-development", "visual-studio-code", "vscode" ], "text": "Getting Started\n\n", "sentence": [ [ "getting", "started" ], [ "getting started" ] ], "token": [ [ "getting", "started" ], [ "getting started" ] ], "level of complexity": -1 }, { "url": "https://github.com/continuedev/continue", "readme_url": "https://raw.githubusercontent.com/continuedev/continue/main/README.md", "topic": [ "ai", "chatgpt", "copilot", "developer-tools", "intellij", "jetbrains", "llm", "open-source", "openai", "pycharm", "software-development", "visual-studio-code", "vscode" ], "text": "Download for [VS Code](https://marketplace.visualstudio.com/items?itemName=Continue.continue) and [JetBrains](https://plugins.jetbrains.com/plugin/22707-continue-extension)\n\nYou can try out Continue for free using a proxy server that securely makes calls with our API key to models like GPT-4, Gemini Pro, and Phind CodeLlama via OpenAI, Google, and Together respectively.\n\nOnce you're ready to use your own API key or a different model / provider, press the `+` button in the bottom left to add a new model to your `config.json`. Learn more about the models and providers [here](https://continue.dev/docs/model-setup/overview).\n\n", "sentence": [ [ "download", "[", "v", "code", "]", "(", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=continue.continue", ")", "[", "jetbrains", "]", "(", "http", ":", "//plugins.jetbrains.com/plugin/22707-continue-extension", ")", "try", "continue", "free", "using", "proxy", "server", "securely", "make", "call", "api", "key", "model", "like", "gpt-4", ",", "gemini", "pro", ",", "phind", "codellama", "via", "openai", ",", "google", ",", "together", "respectively", ".", "'re", "ready", "use", "api", "key", "different", "model", "/", "provider", ",", "press", "`", "+", "`", "button", "bottom", "left", "add", "new", "model", "`", "config.json", "`", ".", "learn", "model", "provider", "[", "]", "(", "http", ":", "//continue.dev/docs/model-setup/overview", ")", "." ], [ "download [ v code ] ( http : //marketplace.visualstudio.com/items ? itemname=continue.continue ) [ jetbrains ] ( http : //plugins.jetbrains.com/plugin/22707-continue-extension ) try continue free using proxy server securely make call api key model like gpt-4 , gemini pro , phind codellama via openai , google , together respectively .", "'re ready use api key different model / provider , press ` + ` button bottom left add new model ` config.json ` .", "learn model provider [ ] ( http : //continue.dev/docs/model-setup/overview ) ." ] ], "token": [ [ "download", "[", "v", "code", "]", "(", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=continue.continue", ")", "[", "jetbrains", "]", "(", "http", ":", "//plugins.jetbrains.com/plugin/22707-continue-extension", ")", "try", "continue", "free", "using", "proxy", "server", "securely", "make", "call", "api", "key", "model", "like", "gpt-4", ",", "gemini", "pro", ",", "phind", "codellama", "via", "openai", ",", "google", ",", "together", "respectively", ".", "'re", "ready", "use", "api", "key", "different", "model", "/", "provider", ",", "press", "`", "+", "`", "button", "bottom", "left", "add", "new", "model", "`", "config.json", "`", ".", "learn", "model", "provider", "[", "]", "(", "http", ":", "//continue.dev/docs/model-setup/overview", ")", "." ], [ "download [ v code ] ( http : //marketplace.visualstudio.com/items ? itemname=continue.continue ) [ jetbrains ] ( http : //plugins.jetbrains.com/plugin/22707-continue-extension ) try continue free using proxy server securely make call api key model like gpt-4 , gemini pro , phind codellama via openai , google , together respectively .", "'re ready use api key different model / provider , press ` + ` button bottom left add new model ` config.json ` .", "learn model provider [ ] ( http : //continue.dev/docs/model-setup/overview ) ." ] ], "level of complexity": -1 } ]