readme_url
stringlengths
60
79
sentence
sequence
token
sequence
text
stringlengths
9
6.51k
url
stringlengths
30
49
level of complexity
int64
-1
2
topic
sequence
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "window", "coming", "soon", "!", ",", "install", "ollama", "window", "via", "wsl2", "." ], [ "window coming soon !", ", install ollama window via wsl2 ." ] ]
[ [ "window", "coming", "soon", "!", ",", "install", "ollama", "window", "via", "wsl2", "." ], [ "window coming soon !", ", install ollama window via wsl2 ." ] ]
Windows Coming soon! For now, you can install Ollama on Windows via WSL2.
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "linux", "&", "wsl2", "``", "`", "curl", "http", ":", "//ollama.ai/install.sh", "|", "sh", "``", "`", "[", "manual", "install", "instruction", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/linux.md", ")" ], [ "linux & wsl2 `` ` curl http : //ollama.ai/install.sh | sh `` ` [ manual install instruction ] ( http : //github.com/jmorganca/ollama/blob/main/docs/linux.md )" ] ]
[ [ "linux", "&", "wsl2", "``", "`", "curl", "http", ":", "//ollama.ai/install.sh", "|", "sh", "``", "`", "[", "manual", "install", "instruction", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/linux.md", ")" ], [ "linux & wsl2 `` ` curl http : //ollama.ai/install.sh | sh `` ` [ manual install instruction ] ( http : //github.com/jmorganca/ollama/blob/main/docs/linux.md )" ] ]
Linux & WSL2 ``` curl https://ollama.ai/install.sh | sh ``` [Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "building", "install", "`", "cmake", "`", "`", "go", "`", ":", "``", "`", "brew", "install", "cmake", "go", "``", "`", "generate", "dependency", ":", "``", "`", "go", "generate", "./", "...", "``", "`", "build", "binary", ":", "``", "`", "go", "build", ".", "``", "`", "detailed", "instruction", "found", "[", "developer", "guide", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/development.md", ")" ], [ "building install ` cmake ` ` go ` : `` ` brew install cmake go `` ` generate dependency : `` ` go generate ./ ... `` ` build binary : `` ` go build .", "`` ` detailed instruction found [ developer guide ] ( http : //github.com/jmorganca/ollama/blob/main/docs/development.md )" ] ]
[ [ "building", "install", "`", "cmake", "`", "`", "go", "`", ":", "``", "`", "brew", "install", "cmake", "go", "``", "`", "generate", "dependency", ":", "``", "`", "go", "generate", "./", "...", "``", "`", "build", "binary", ":", "``", "`", "go", "build", ".", "``", "`", "detailed", "instruction", "found", "[", "developer", "guide", "]", "(", "http", ":", "//github.com/jmorganca/ollama/blob/main/docs/development.md", ")" ], [ "building install ` cmake ` ` go ` : `` ` brew install cmake go `` ` generate dependency : `` ` go generate ./ ... `` ` build binary : `` ` go build .", "`` ` detailed instruction found [ developer guide ] ( http : //github.com/jmorganca/ollama/blob/main/docs/development.md )" ] ]
Building Install `cmake` and `go`: ``` brew install cmake go ``` Then generate dependencies: ``` go generate ./... ``` Then build the binary: ``` go build . ``` More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/ollama/ollama/main/README.md
[ [ "extension", "&", "plugins", "-", "[", "raycast", "extension", "]", "(", "http", ":", "//github.com/massimilianopasquini97/raycast_ollama", ")", "-", "[", "discollama", "]", "(", "http", ":", "//github.com/mxyng/discollama", ")", "(", "discord", "bot", "inside", "ollama", "discord", "channel", ")", "-", "[", "continue", "]", "(", "http", ":", "//github.com/continuedev/continue", ")", "-", "[", "obsidian", "ollama", "plugin", "]", "(", "http", ":", "//github.com/hinterdupfinger/obsidian-ollama", ")", "-", "[", "logseq", "ollama", "plugin", "]", "(", "http", ":", "//github.com/omagdy7/ollama-logseq", ")", "-", "[", "dagger", "chatbot", "]", "(", "http", ":", "//github.com/samalba/dagger-chatbot", ")", "-", "[", "discord", "ai", "bot", "]", "(", "http", ":", "//github.com/mekb-turtle/discord-ai-bot", ")", "-", "[", "ollama", "telegram", "bot", "]", "(", "http", ":", "//github.com/ruecat/ollama-telegram", ")", "-", "[", "ha", "ollama", "conversation", "]", "(", "http", ":", "//github.com/ej52/hass-ollama-conversation", ")", "-", "[", "rivet", "plugin", "]", "(", "http", ":", "//github.com/abrenneke/rivet-plugin-ollama", ")", "-", "[", "llama", "coder", "]", "(", "http", ":", "//github.com/ex3ndr/llama-coder", ")", "(", "copilot", "alternative", "using", "ollama", ")", "-", "[", "obsidian", "bmo", "chatbot", "plugin", "]", "(", "http", ":", "//github.com/longy2k/obsidian-bmo-chatbot", ")", "-", "[", "open", "interpreter", "]", "(", "http", ":", "//docs.openinterpreter.com/language-model-setup/local-models/ollama", ")", "-", "[", "twinny", "]", "(", "http", ":", "//github.com/rjmacarthy/twinny", ")", "(", "copilot", "copilot", "chat", "alternative", "using", "ollama", ")", "-", "[", "wingman-ai", "]", "(", "http", ":", "//github.com/russellcanfield/wingman-ai", ")", "(", "copilot", "code", "chat", "alternative", "using", "ollama", "huggingface", ")" ], [ "extension & plugins - [ raycast extension ] ( http : //github.com/massimilianopasquini97/raycast_ollama ) - [ discollama ] ( http : //github.com/mxyng/discollama ) ( discord bot inside ollama discord channel ) - [ continue ] ( http : //github.com/continuedev/continue ) - [ obsidian ollama plugin ] ( http : //github.com/hinterdupfinger/obsidian-ollama ) - [ logseq ollama plugin ] ( http : //github.com/omagdy7/ollama-logseq ) - [ dagger chatbot ] ( http : //github.com/samalba/dagger-chatbot ) - [ discord ai bot ] ( http : //github.com/mekb-turtle/discord-ai-bot ) - [ ollama telegram bot ] ( http : //github.com/ruecat/ollama-telegram ) - [ ha ollama conversation ] ( http : //github.com/ej52/hass-ollama-conversation ) - [ rivet plugin ] ( http : //github.com/abrenneke/rivet-plugin-ollama ) - [ llama coder ] ( http : //github.com/ex3ndr/llama-coder ) ( copilot alternative using ollama ) - [ obsidian bmo chatbot plugin ] ( http : //github.com/longy2k/obsidian-bmo-chatbot ) - [ open interpreter ] ( http : //docs.openinterpreter.com/language-model-setup/local-models/ollama ) - [ twinny ] ( http : //github.com/rjmacarthy/twinny ) ( copilot copilot chat alternative using ollama ) - [ wingman-ai ] ( http : //github.com/russellcanfield/wingman-ai ) ( copilot code chat alternative using ollama huggingface )" ] ]
[ [ "extension", "&", "plugins", "-", "[", "raycast", "extension", "]", "(", "http", ":", "//github.com/massimilianopasquini97/raycast_ollama", ")", "-", "[", "discollama", "]", "(", "http", ":", "//github.com/mxyng/discollama", ")", "(", "discord", "bot", "inside", "ollama", "discord", "channel", ")", "-", "[", "continue", "]", "(", "http", ":", "//github.com/continuedev/continue", ")", "-", "[", "obsidian", "ollama", "plugin", "]", "(", "http", ":", "//github.com/hinterdupfinger/obsidian-ollama", ")", "-", "[", "logseq", "ollama", "plugin", "]", "(", "http", ":", "//github.com/omagdy7/ollama-logseq", ")", "-", "[", "dagger", "chatbot", "]", "(", "http", ":", "//github.com/samalba/dagger-chatbot", ")", "-", "[", "discord", "ai", "bot", "]", "(", "http", ":", "//github.com/mekb-turtle/discord-ai-bot", ")", "-", "[", "ollama", "telegram", "bot", "]", "(", "http", ":", "//github.com/ruecat/ollama-telegram", ")", "-", "[", "ha", "ollama", "conversation", "]", "(", "http", ":", "//github.com/ej52/hass-ollama-conversation", ")", "-", "[", "rivet", "plugin", "]", "(", "http", ":", "//github.com/abrenneke/rivet-plugin-ollama", ")", "-", "[", "llama", "coder", "]", "(", "http", ":", "//github.com/ex3ndr/llama-coder", ")", "(", "copilot", "alternative", "using", "ollama", ")", "-", "[", "obsidian", "bmo", "chatbot", "plugin", "]", "(", "http", ":", "//github.com/longy2k/obsidian-bmo-chatbot", ")", "-", "[", "open", "interpreter", "]", "(", "http", ":", "//docs.openinterpreter.com/language-model-setup/local-models/ollama", ")", "-", "[", "twinny", "]", "(", "http", ":", "//github.com/rjmacarthy/twinny", ")", "(", "copilot", "copilot", "chat", "alternative", "using", "ollama", ")", "-", "[", "wingman-ai", "]", "(", "http", ":", "//github.com/russellcanfield/wingman-ai", ")", "(", "copilot", "code", "chat", "alternative", "using", "ollama", "huggingface", ")" ], [ "extension & plugins - [ raycast extension ] ( http : //github.com/massimilianopasquini97/raycast_ollama ) - [ discollama ] ( http : //github.com/mxyng/discollama ) ( discord bot inside ollama discord channel ) - [ continue ] ( http : //github.com/continuedev/continue ) - [ obsidian ollama plugin ] ( http : //github.com/hinterdupfinger/obsidian-ollama ) - [ logseq ollama plugin ] ( http : //github.com/omagdy7/ollama-logseq ) - [ dagger chatbot ] ( http : //github.com/samalba/dagger-chatbot ) - [ discord ai bot ] ( http : //github.com/mekb-turtle/discord-ai-bot ) - [ ollama telegram bot ] ( http : //github.com/ruecat/ollama-telegram ) - [ ha ollama conversation ] ( http : //github.com/ej52/hass-ollama-conversation ) - [ rivet plugin ] ( http : //github.com/abrenneke/rivet-plugin-ollama ) - [ llama coder ] ( http : //github.com/ex3ndr/llama-coder ) ( copilot alternative using ollama ) - [ obsidian bmo chatbot plugin ] ( http : //github.com/longy2k/obsidian-bmo-chatbot ) - [ open interpreter ] ( http : //docs.openinterpreter.com/language-model-setup/local-models/ollama ) - [ twinny ] ( http : //github.com/rjmacarthy/twinny ) ( copilot copilot chat alternative using ollama ) - [ wingman-ai ] ( http : //github.com/russellcanfield/wingman-ai ) ( copilot code chat alternative using ollama huggingface )" ] ]
Extensions & Plugins - [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama) - [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel) - [Continue](https://github.com/continuedev/continue) - [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama) - [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq) - [Dagger Chatbot](https://github.com/samalba/dagger-chatbot) - [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) - [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram) - [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation) - [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama) - [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama) - [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot) - [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama) - [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama) - [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
https://github.com/ollama/ollama
-1
[ "go", "golang", "llama", "llama2", "llm", "llms", "mistral", "ollama" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "install" ], [ "install" ] ]
[ [ "install" ], [ "install" ] ]
Install
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "pip", "installation", ">", "ensure", "python", "3.9+", "installed", "system", ".", "check", "using", ":", "`", "python", "--", "version", "`", ".", ">", "use", "conda", "like", ":", "`", "conda", "create", "-n", "metagpt", "python=3.9", "&", "&", "conda", "activate", "metagpt", "`", "``", "`", "bash", "pip", "install", "metagpt", "metagpt", "--", "init-config" ], [ "pip installation > ensure python 3.9+ installed system .", "check using : ` python -- version ` .", "> use conda like : ` conda create -n metagpt python=3.9 & & conda activate metagpt ` `` ` bash pip install metagpt metagpt -- init-config" ] ]
[ [ "pip", "installation", ">", "ensure", "python", "3.9+", "installed", "system", ".", "check", "using", ":", "`", "python", "--", "version", "`", ".", ">", "use", "conda", "like", ":", "`", "conda", "create", "-n", "metagpt", "python=3.9", "&", "&", "conda", "activate", "metagpt", "`", "``", "`", "bash", "pip", "install", "metagpt", "metagpt", "--", "init-config" ], [ "pip installation > ensure python 3.9+ installed system .", "check using : ` python -- version ` .", "> use conda like : ` conda create -n metagpt python=3.9 & & conda activate metagpt ` `` ` bash pip install metagpt metagpt -- init-config" ] ]
Pip installation > Ensure that Python 3.9+ is installed on your system. You can check this by using: `python --version`. > You can use conda like this: `conda create -n metagpt python=3.9 && conda activate metagpt` ```bash pip install metagpt metagpt --init-config
https://github.com/geekan/MetaGPT
0
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "print", "repo", "structure", "file", "``", "`", "detail", "installation", "please", "refer", "[", "cli_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-stable-version", ")" ], [ "print repo structure file `` ` detail installation please refer [ cli_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-stable-version )" ] ]
[ [ "print", "repo", "structure", "file", "``", "`", "detail", "installation", "please", "refer", "[", "cli_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-stable-version", ")" ], [ "print repo structure file `` ` detail installation please refer [ cli_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-stable-version )" ] ]
it will print the repo structure with files ``` detail installation please refer to [cli_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-stable-version)
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "docker", "installation", ">", "note", ":", "window", ",", "need", "replace", "``", "/opt/metagpt", "''", "directory", "docker", "permission", "create", ",", "``", ":", "\\users\\x\\metagpt", "''", "``", "`", "bash" ], [ "docker installation > note : window , need replace `` /opt/metagpt '' directory docker permission create , `` : \\users\\x\\metagpt '' `` ` bash" ] ]
[ [ "docker", "installation", ">", "note", ":", "window", ",", "need", "replace", "``", "/opt/metagpt", "''", "directory", "docker", "permission", "create", ",", "``", ":", "\\users\\x\\metagpt", "''", "``", "`", "bash" ], [ "docker installation > note : window , need replace `` /opt/metagpt '' directory docker permission create , `` : \\users\\x\\metagpt '' `` ` bash" ] ]
Docker installation > Note: In the Windows, you need to replace "/opt/metagpt" with a directory that Docker has permission to create, such as "D:\Users\x\metagpt" ```bash
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "step", "2", ":", "run", "metagpt", "demo", "container", "docker", "run", "--", "rm", "\\", "--", "privileged", "\\", "-v", "/opt/metagpt/config/config2.yaml", ":", "/app/metagpt/config/config2.yaml", "\\", "-v", "/opt/metagpt/workspace", ":", "/app/metagpt/workspace", "\\", "metagpt/metagpt", ":", "latest", "\\", "metagpt", "``", "create", "2048", "game", "''", "``", "`", "detail", "installation", "please", "refer", "[", "docker_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-with-docker", ")" ], [ "step 2 : run metagpt demo container docker run -- rm \\ -- privileged \\ -v /opt/metagpt/config/config2.yaml : /app/metagpt/config/config2.yaml \\ -v /opt/metagpt/workspace : /app/metagpt/workspace \\ metagpt/metagpt : latest \\ metagpt `` create 2048 game '' `` ` detail installation please refer [ docker_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-with-docker )" ] ]
[ [ "step", "2", ":", "run", "metagpt", "demo", "container", "docker", "run", "--", "rm", "\\", "--", "privileged", "\\", "-v", "/opt/metagpt/config/config2.yaml", ":", "/app/metagpt/config/config2.yaml", "\\", "-v", "/opt/metagpt/workspace", ":", "/app/metagpt/workspace", "\\", "metagpt/metagpt", ":", "latest", "\\", "metagpt", "``", "create", "2048", "game", "''", "``", "`", "detail", "installation", "please", "refer", "[", "docker_install", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/installation.html", "#", "install-with-docker", ")" ], [ "step 2 : run metagpt demo container docker run -- rm \\ -- privileged \\ -v /opt/metagpt/config/config2.yaml : /app/metagpt/config/config2.yaml \\ -v /opt/metagpt/workspace : /app/metagpt/workspace \\ metagpt/metagpt : latest \\ metagpt `` create 2048 game '' `` ` detail installation please refer [ docker_install ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/installation.html # install-with-docker )" ] ]
Step 2: Run metagpt demo with container docker run --rm \ --privileged \ -v /opt/metagpt/config/config2.yaml:/app/metagpt/config/config2.yaml \ -v /opt/metagpt/workspace:/app/metagpt/workspace \ metagpt/metagpt:latest \ metagpt "Create a 2048 game" ``` detail installation please refer to [docker_install](https://docs.deepwisdom.ai/main/en/guide/get_started/installation.html#install-with-docker)
https://github.com/geekan/MetaGPT
1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "quickstart", "&", "demo", "video", "-", "try", "[", "metagpt", "huggingface", "space", "]", "(", "http", ":", "//huggingface.co/spaces/deepwisdom/metagpt", ")", "-", "[", "matthew", "berman", ":", "install", "metagpt", "-", "build", "startup", "one", "prompt", "!", "!", "]", "(", "http", ":", "//youtu.be/ut75j_kg_ay", ")", "-", "[", "official", "demo", "video", "]", "(", "http", ":", "//github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d", ")", "http", ":", "//github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ], [ "quickstart & demo video - try [ metagpt huggingface space ] ( http : //huggingface.co/spaces/deepwisdom/metagpt ) - [ matthew berman : install metagpt - build startup one prompt ! !", "] ( http : //youtu.be/ut75j_kg_ay ) - [ official demo video ] ( http : //github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d ) http : //github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ] ]
[ [ "quickstart", "&", "demo", "video", "-", "try", "[", "metagpt", "huggingface", "space", "]", "(", "http", ":", "//huggingface.co/spaces/deepwisdom/metagpt", ")", "-", "[", "matthew", "berman", ":", "install", "metagpt", "-", "build", "startup", "one", "prompt", "!", "!", "]", "(", "http", ":", "//youtu.be/ut75j_kg_ay", ")", "-", "[", "official", "demo", "video", "]", "(", "http", ":", "//github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d", ")", "http", ":", "//github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ], [ "quickstart & demo video - try [ metagpt huggingface space ] ( http : //huggingface.co/spaces/deepwisdom/metagpt ) - [ matthew berman : install metagpt - build startup one prompt ! !", "] ( http : //youtu.be/ut75j_kg_ay ) - [ official demo video ] ( http : //github.com/geekan/metagpt/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d ) http : //github.com/geekan/metagpt/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419" ] ]
QuickStart & Demo Video - Try it on [MetaGPT Huggingface Space](https://huggingface.co/spaces/deepwisdom/MetaGPT) - [Matthew Berman: How To Install MetaGPT - Build A Startup With One Prompt!!](https://youtu.be/uT75J_KG_aY) - [Official Demo Video](https://github.com/geekan/MetaGPT/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d) https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/geekan/MetaGPT/main/README.md
[ [ "tutorial", "-", "🗒", "[", "online", "document", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/", ")", "-", "💻", "[", "usage", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html", ")", "-", "🔎", "[", "metagpt", "?", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/introduction.html", ")", "-", "🛠", "build", "agent", "?", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "agent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html", ")", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "multiagent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html", ")", "-", "🧑‍💻", "contribution", "-", "[", "develop", "roadmap", "]", "(", "docs/roadmap.md", ")", "-", "🔖", "use", "case", "-", "[", "debate", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html", ")", "-", "[", "researcher", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html", ")", "-", "[", "recepit", "assistant", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html", ")", "-", "❓", "[", "faq", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/faq.html", ")" ], [ "tutorial - 🗒 [ online document ] ( http : //docs.deepwisdom.ai/main/en/ ) - 💻 [ usage ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html ) - 🔎 [ metagpt ?", "] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/introduction.html ) - 🛠 build agent ?", "- [ metagpt usage & development guide | agent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html ) - [ metagpt usage & development guide | multiagent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html ) - 🧑‍💻 contribution - [ develop roadmap ] ( docs/roadmap.md ) - 🔖 use case - [ debate ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html ) - [ researcher ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html ) - [ recepit assistant ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html ) - ❓ [ faq ] ( http : //docs.deepwisdom.ai/main/en/guide/faq.html )" ] ]
[ [ "tutorial", "-", "🗒", "[", "online", "document", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/", ")", "-", "💻", "[", "usage", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html", ")", "-", "🔎", "[", "metagpt", "?", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/get_started/introduction.html", ")", "-", "🛠", "build", "agent", "?", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "agent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html", ")", "-", "[", "metagpt", "usage", "&", "development", "guide", "|", "multiagent", "101", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html", ")", "-", "🧑‍💻", "contribution", "-", "[", "develop", "roadmap", "]", "(", "docs/roadmap.md", ")", "-", "🔖", "use", "case", "-", "[", "debate", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html", ")", "-", "[", "researcher", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html", ")", "-", "[", "recepit", "assistant", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html", ")", "-", "❓", "[", "faq", "]", "(", "http", ":", "//docs.deepwisdom.ai/main/en/guide/faq.html", ")" ], [ "tutorial - 🗒 [ online document ] ( http : //docs.deepwisdom.ai/main/en/ ) - 💻 [ usage ] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html ) - 🔎 [ metagpt ?", "] ( http : //docs.deepwisdom.ai/main/en/guide/get_started/introduction.html ) - 🛠 build agent ?", "- [ metagpt usage & development guide | agent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html ) - [ metagpt usage & development guide | multiagent 101 ] ( http : //docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html ) - 🧑‍💻 contribution - [ develop roadmap ] ( docs/roadmap.md ) - 🔖 use case - [ debate ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html ) - [ researcher ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html ) - [ recepit assistant ] ( http : //docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html ) - ❓ [ faq ] ( http : //docs.deepwisdom.ai/main/en/guide/faq.html )" ] ]
Tutorial - 🗒 [Online Document](https://docs.deepwisdom.ai/main/en/) - 💻 [Usage](https://docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html) - 🔎 [What can MetaGPT do?](https://docs.deepwisdom.ai/main/en/guide/get_started/introduction.html) - 🛠 How to build your own agents? - [MetaGPT Usage & Development Guide | Agent 101](https://docs.deepwisdom.ai/main/en/guide/tutorials/agent_101.html) - [MetaGPT Usage & Development Guide | MultiAgent 101](https://docs.deepwisdom.ai/main/en/guide/tutorials/multi_agent_101.html) - 🧑‍💻 Contribution - [Develop Roadmap](docs/ROADMAP.md) - 🔖 Use Cases - [Debate](https://docs.deepwisdom.ai/main/en/guide/use_cases/multi_agent/debate.html) - [Researcher](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/researcher.html) - [Recepit Assistant](https://docs.deepwisdom.ai/main/en/guide/use_cases/agent/receipt_assistant.html) - ❓ [FAQs](https://docs.deepwisdom.ai/main/en/guide/faq.html)
https://github.com/geekan/MetaGPT
-1
[ "agent", "gpt", "hacktoberfest", "llm", "metagpt", "multi-agent" ]
https://raw.githubusercontent.com/run-llama/llama_index/main/README.md
[ [ "💻", "example", "usage", "``", "`", "pip", "install", "llama-index", "``", "`", "example", "`", "example", "`", "folder", ".", "index", "`", "index", "`", "folder", "(", "see", "list", "index", ")", ".", "build", "simple", "vector", "store", "index", "using", "openai", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "openai_api_key", "''", "]", "=", "``", "your_openai_api_key", "''", "llama_index", "import", "vectorstoreindex", ",", "simpledirectoryreader", "document", "=", "simpledirectoryreader", "(", "``", "your_data_directory", "''", ")", ".load_data", "(", ")", "index", "=", "vectorstoreindex.from_documents", "(", "document", ")", "``", "`", "build", "simple", "vector", "store", "index", "using", "non-openai", "llm", ",", "e.g", ".", "llama", "2", "hosted", "[", "replicate", "]", "(", "http", ":", "//replicate.com/", ")", ",", "easily", "create", "free", "trial", "api", "token", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "replicate_api_token", "''", "]", "=", "``", "your_replicate_api_token", "''", "llama_index.llms", "import", "replicate", "llama2_7b_chat", "=", "``", "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", "''", "llm", "=", "replicate", "(", "model=llama2_7b_chat", ",", "temperature=0.01", ",", "additional_kwargs=", "{", "``", "top_p", "''", ":", "1", ",", "``", "max_new_tokens", "''", ":", "300", "}", ",", ")" ], [ "💻 example usage `` ` pip install llama-index `` ` example ` example ` folder .", "index ` index ` folder ( see list index ) .", "build simple vector store index using openai : `` ` python import o os.environ [ `` openai_api_key '' ] = `` your_openai_api_key '' llama_index import vectorstoreindex , simpledirectoryreader document = simpledirectoryreader ( `` your_data_directory '' ) .load_data ( ) index = vectorstoreindex.from_documents ( document ) `` ` build simple vector store index using non-openai llm , e.g .", "llama 2 hosted [ replicate ] ( http : //replicate.com/ ) , easily create free trial api token : `` ` python import o os.environ [ `` replicate_api_token '' ] = `` your_replicate_api_token '' llama_index.llms import replicate llama2_7b_chat = `` meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e '' llm = replicate ( model=llama2_7b_chat , temperature=0.01 , additional_kwargs= { `` top_p '' : 1 , `` max_new_tokens '' : 300 } , )" ] ]
[ [ "💻", "example", "usage", "``", "`", "pip", "install", "llama-index", "``", "`", "example", "`", "example", "`", "folder", ".", "index", "`", "index", "`", "folder", "(", "see", "list", "index", ")", ".", "build", "simple", "vector", "store", "index", "using", "openai", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "openai_api_key", "''", "]", "=", "``", "your_openai_api_key", "''", "llama_index", "import", "vectorstoreindex", ",", "simpledirectoryreader", "document", "=", "simpledirectoryreader", "(", "``", "your_data_directory", "''", ")", ".load_data", "(", ")", "index", "=", "vectorstoreindex.from_documents", "(", "document", ")", "``", "`", "build", "simple", "vector", "store", "index", "using", "non-openai", "llm", ",", "e.g", ".", "llama", "2", "hosted", "[", "replicate", "]", "(", "http", ":", "//replicate.com/", ")", ",", "easily", "create", "free", "trial", "api", "token", ":", "``", "`", "python", "import", "o", "os.environ", "[", "``", "replicate_api_token", "''", "]", "=", "``", "your_replicate_api_token", "''", "llama_index.llms", "import", "replicate", "llama2_7b_chat", "=", "``", "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e", "''", "llm", "=", "replicate", "(", "model=llama2_7b_chat", ",", "temperature=0.01", ",", "additional_kwargs=", "{", "``", "top_p", "''", ":", "1", ",", "``", "max_new_tokens", "''", ":", "300", "}", ",", ")" ], [ "💻 example usage `` ` pip install llama-index `` ` example ` example ` folder .", "index ` index ` folder ( see list index ) .", "build simple vector store index using openai : `` ` python import o os.environ [ `` openai_api_key '' ] = `` your_openai_api_key '' llama_index import vectorstoreindex , simpledirectoryreader document = simpledirectoryreader ( `` your_data_directory '' ) .load_data ( ) index = vectorstoreindex.from_documents ( document ) `` ` build simple vector store index using non-openai llm , e.g .", "llama 2 hosted [ replicate ] ( http : //replicate.com/ ) , easily create free trial api token : `` ` python import o os.environ [ `` replicate_api_token '' ] = `` your_replicate_api_token '' llama_index.llms import replicate llama2_7b_chat = `` meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e '' llm = replicate ( model=llama2_7b_chat , temperature=0.01 , additional_kwargs= { `` top_p '' : 1 , `` max_new_tokens '' : 300 } , )" ] ]
💻 Example Usage ``` pip install llama-index ``` Examples are in the `examples` folder. Indices are in the `indices` folder (see list of indices below). To build a simple vector store index using OpenAI: ```python import os os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY" from llama_index import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader("YOUR_DATA_DIRECTORY").load_data() index = VectorStoreIndex.from_documents(documents) ``` To build a simple vector store index using non-OpenAI LLMs, e.g. Llama 2 hosted on [Replicate](https://replicate.com/), where you can easily create a free trial API token: ```python import os os.environ["REPLICATE_API_TOKEN"] = "YOUR_REPLICATE_API_TOKEN" from llama_index.llms import Replicate llama2_7b_chat = "meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e" llm = Replicate( model=llama2_7b_chat, temperature=0.01, additional_kwargs={"top_p": 1, "max_new_tokens": 300}, )
https://github.com/run-llama/llama_index
0
[ "agents", "application", "data", "fine-tuning", "framework", "llamaindex", "llm", "rag", "vector-database" ]
https://raw.githubusercontent.com/run-llama/llama_index/main/README.md
[ [ "🔧", "dependency", "main", "third-party", "package", "requirement", "`", "tiktoken", "`", ",", "`", "openai", "`", ",", "`", "langchain", "`", ".", "requirement", "contained", "within", "`", "setup.py", "`", "file", ".", "run", "package", "locally", "without", "building", "wheel", ",", "simply", "run", ":", "``", "`", "bash", "pip", "install", "poetry", "poetry", "install", "--", "dev", "``", "`" ], [ "🔧 dependency main third-party package requirement ` tiktoken ` , ` openai ` , ` langchain ` .", "requirement contained within ` setup.py ` file .", "run package locally without building wheel , simply run : `` ` bash pip install poetry poetry install -- dev `` `" ] ]
[ [ "🔧", "dependency", "main", "third-party", "package", "requirement", "`", "tiktoken", "`", ",", "`", "openai", "`", ",", "`", "langchain", "`", ".", "requirement", "contained", "within", "`", "setup.py", "`", "file", ".", "run", "package", "locally", "without", "building", "wheel", ",", "simply", "run", ":", "``", "`", "bash", "pip", "install", "poetry", "poetry", "install", "--", "dev", "``", "`" ], [ "🔧 dependency main third-party package requirement ` tiktoken ` , ` openai ` , ` langchain ` .", "requirement contained within ` setup.py ` file .", "run package locally without building wheel , simply run : `` ` bash pip install poetry poetry install -- dev `` `" ] ]
🔧 Dependencies The main third-party package requirements are `tiktoken`, `openai`, and `langchain`. All requirements should be contained within the `setup.py` file. To run the package locally without building the wheel, simply run: ```bash pip install poetry poetry install --with dev ```
https://github.com/run-llama/llama_index
0
[ "agents", "application", "data", "fine-tuning", "framework", "llamaindex", "llm", "rag", "vector-database" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "getting", "started", "🚀", "follow", "instruction", "get", "copy", "project", "running", "local", "machine", "development", "testing", "purpose", ".", "find", "everything", "[", "documentation", "]", "(", "http", ":", "//docs.quivr.app/", ")", "." ], [ "getting started 🚀 follow instruction get copy project running local machine development testing purpose .", "find everything [ documentation ] ( http : //docs.quivr.app/ ) ." ] ]
[ [ "getting", "started", "🚀", "follow", "instruction", "get", "copy", "project", "running", "local", "machine", "development", "testing", "purpose", ".", "find", "everything", "[", "documentation", "]", "(", "http", ":", "//docs.quivr.app/", ")", "." ], [ "getting started 🚀 follow instruction get copy project running local machine development testing purpose .", "find everything [ documentation ] ( http : //docs.quivr.app/ ) ." ] ]
Getting Started 🚀 Follow these instructions to get a copy of the project up and running on your local machine for development and testing purposes. You can find everything on the [documentation](https://docs.quivr.app/).
https://github.com/QuivrHQ/quivr
-1
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "prerequisite", "📋", "ensure", "following", "installed", ":", "-", "docker", "-", "docker", "compose" ], [ "prerequisite 📋 ensure following installed : - docker - docker compose" ] ]
[ [ "prerequisite", "📋", "ensure", "following", "installed", ":", "-", "docker", "-", "docker", "compose" ], [ "prerequisite 📋 ensure following installed : - docker - docker compose" ] ]
Prerequisites 📋 Ensure you have the following installed: - Docker - Docker Compose
https://github.com/QuivrHQ/quivr
-1
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "60", "second", "installation", "💽", "find", "installation", "video", "[", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=cxba6dzjn48", ")", ".", "-", "*", "*", "step", "0", "*", "*", ":", "supabase", "cli", "follow", "instruction", "[", "]", "(", "http", ":", "//supabase.com/docs/guides/cli/getting-started", ")", "install", "supabase", "cli", "required", ".", "``", "`", "bash", "supabase", "-v" ], [ "60 second installation 💽 find installation video [ ] ( http : //www.youtube.com/watch ? v=cxba6dzjn48 ) .", "- * * step 0 * * : supabase cli follow instruction [ ] ( http : //supabase.com/docs/guides/cli/getting-started ) install supabase cli required .", "`` ` bash supabase -v" ] ]
[ [ "60", "second", "installation", "💽", "find", "installation", "video", "[", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=cxba6dzjn48", ")", ".", "-", "*", "*", "step", "0", "*", "*", ":", "supabase", "cli", "follow", "instruction", "[", "]", "(", "http", ":", "//supabase.com/docs/guides/cli/getting-started", ")", "install", "supabase", "cli", "required", ".", "``", "`", "bash", "supabase", "-v" ], [ "60 second installation 💽 find installation video [ ] ( http : //www.youtube.com/watch ? v=cxba6dzjn48 ) .", "- * * step 0 * * : supabase cli follow instruction [ ] ( http : //supabase.com/docs/guides/cli/getting-started ) install supabase cli required .", "`` ` bash supabase -v" ] ]
60 seconds Installation 💽 You can find the installation video [here](https://www.youtube.com/watch?v=cXBa6dZJN48). - **Step 0**: Supabase CLI Follow the instructions [here](https://supabase.com/docs/guides/cli/getting-started) to install the Supabase CLI that is required. ```bash supabase -v
https://github.com/QuivrHQ/quivr
-1
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/QuivrHQ/quivr/main/README.md
[ [ "check", "installation", "worked", "``", "`", "-", "*", "*", "step", "1", "*", "*", ":", "clone", "repository", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/quivrhq/quivr.git", "&", "&", "cd", "quivr", "``", "`", "-", "*", "*", "step", "2", "*", "*", ":", "copy", "`", ".env.example", "`", "file", "``", "`", "bash", "cp", ".env.example", ".env", "``", "`", "-", "*", "*", "step", "3", "*", "*", ":", "update", "`", ".env", "`", "file", "``", "`", "bash", "vim", ".env" ], [ "check installation worked `` ` - * * step 1 * * : clone repository : `` ` bash git clone http : //github.com/quivrhq/quivr.git & & cd quivr `` ` - * * step 2 * * : copy ` .env.example ` file `` ` bash cp .env.example .env `` ` - * * step 3 * * : update ` .env ` file `` ` bash vim .env" ] ]
[ [ "check", "installation", "worked", "``", "`", "-", "*", "*", "step", "1", "*", "*", ":", "clone", "repository", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/quivrhq/quivr.git", "&", "&", "cd", "quivr", "``", "`", "-", "*", "*", "step", "2", "*", "*", ":", "copy", "`", ".env.example", "`", "file", "``", "`", "bash", "cp", ".env.example", ".env", "``", "`", "-", "*", "*", "step", "3", "*", "*", ":", "update", "`", ".env", "`", "file", "``", "`", "bash", "vim", ".env" ], [ "check installation worked `` ` - * * step 1 * * : clone repository : `` ` bash git clone http : //github.com/quivrhq/quivr.git & & cd quivr `` ` - * * step 2 * * : copy ` .env.example ` file `` ` bash cp .env.example .env `` ` - * * step 3 * * : update ` .env ` file `` ` bash vim .env" ] ]
Check that the installation worked ``` - **Step 1**: Clone the repository: ```bash git clone https://github.com/quivrhq/quivr.git && cd Quivr ``` - **Step 2**: Copy the `.env.example` files ```bash cp .env.example .env ``` - **Step 3**: Update the `.env` files ```bash vim .env
https://github.com/QuivrHQ/quivr
2
[ "ai", "api", "chatbot", "chatgpt", "database", "docker", "frontend", "html", "javascript", "llm", "openai", "postgresql", "privacy", "rag", "react", "rest-api", "security", "typescript", "vector", "ycombinator" ]
https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md
[ [ "table", "content", "-", "[", "mr.", "ranedeer", ":", "personalized", "ai", "tutor", "!", "]", "(", "#", "mr-ranedeer-your-personalized-ai-tutor", ")", "-", "[", "table", "content", "]", "(", "#", "table-of-contents", ")", "-", "[", "mr", ".", "ranedeer", "?", "]", "(", "#", "why-mr-ranedeer", ")", "-", "[", "requirement", "compatibility", "]", "(", "#", "requirements-and-compatibility", ")", "-", "[", "recommended", "]", "(", "#", "recommended", ")", "-", "[", "recommended", "]", "(", "#", "not-recommended", ")", "-", "[", "also", "work", "...", "]", "(", "#", "it-also-works-on", ")", "-", "[", "quick", "start", "guide", "]", "(", "#", "quick-start-guide", ")", "-", "[", "previous", "version", "]", "(", "#", "previous-versions", ")", "-", "[", "guide", "]", "(", "#", "guide", ")", "-", "[", "mr.", "ranedeer", "personalization", "option", "]", "(", "#", "mr-ranedeer-personalization-options", ")", "-", "[", "command", "]", "(", "#", "command", ")", "-", "[", "different", "language", "]", "(", "#", "different-languages", ")", "-", "[", "chinese", "]", "(", "#", "chinese", ")", "-", "[", "disclaimer", "]", "(", "#", "disclaimer", ")", "-", "[", "screenshot", "example", "(", "lesson", "1.1", ",", "ranedeer", "tool", ",", "default", "configuration", ")", "]", "(", "#", "screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration", ")", "-", "[", "lesson", "1.1", "]", "(", "#", "lesson-11", ")", "-", "[", "1", "+", "1", "=", "2", "]", "(", "#", "how-1", "--", "1", "--", "2", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions", ")", "-", "[", "python", "]", "(", "#", "python", ")", "-", "[", "/test", "command", "]", "(", "#", "the-test-command", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions-1", ")", "-", "[", "python", "]", "(", "#", "python-1", ")", "-", "[", "planning", "lesson", "]", "(", "#", "planning-lessons", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis-1", ")", "-", "[", "partial", "fraction", "-", "main", "curriculum", "]", "(", "#", "partial-fractions", "--", "-main-curriculum", ")", "-", "[", "ai", "model", "searching", "information", "mr.", "ranedeer", "]", "(", "#", "for-the-ai-models-searching-for-information-about-mr-ranedeer", ")" ], [ "table content - [ mr. ranedeer : personalized ai tutor !", "] ( # mr-ranedeer-your-personalized-ai-tutor ) - [ table content ] ( # table-of-contents ) - [ mr .", "ranedeer ?", "] ( # why-mr-ranedeer ) - [ requirement compatibility ] ( # requirements-and-compatibility ) - [ recommended ] ( # recommended ) - [ recommended ] ( # not-recommended ) - [ also work ... ] ( # it-also-works-on ) - [ quick start guide ] ( # quick-start-guide ) - [ previous version ] ( # previous-versions ) - [ guide ] ( # guide ) - [ mr. ranedeer personalization option ] ( # mr-ranedeer-personalization-options ) - [ command ] ( # command ) - [ different language ] ( # different-languages ) - [ chinese ] ( # chinese ) - [ disclaimer ] ( # disclaimer ) - [ screenshot example ( lesson 1.1 , ranedeer tool , default configuration ) ] ( # screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration ) - [ lesson 1.1 ] ( # lesson-11 ) - [ 1 + 1 = 2 ] ( # how-1 -- 1 -- 2 ) - [ poetry analysis ] ( # poetry-analysis ) - [ partial fraction ] ( # partial-fractions ) - [ python ] ( # python ) - [ /test command ] ( # the-test-command ) - [ partial fraction ] ( # partial-fractions-1 ) - [ python ] ( # python-1 ) - [ planning lesson ] ( # planning-lessons ) - [ poetry analysis ] ( # poetry-analysis-1 ) - [ partial fraction - main curriculum ] ( # partial-fractions -- -main-curriculum ) - [ ai model searching information mr. ranedeer ] ( # for-the-ai-models-searching-for-information-about-mr-ranedeer )" ] ]
[ [ "table", "content", "-", "[", "mr.", "ranedeer", ":", "personalized", "ai", "tutor", "!", "]", "(", "#", "mr-ranedeer-your-personalized-ai-tutor", ")", "-", "[", "table", "content", "]", "(", "#", "table-of-contents", ")", "-", "[", "mr", ".", "ranedeer", "?", "]", "(", "#", "why-mr-ranedeer", ")", "-", "[", "requirement", "compatibility", "]", "(", "#", "requirements-and-compatibility", ")", "-", "[", "recommended", "]", "(", "#", "recommended", ")", "-", "[", "recommended", "]", "(", "#", "not-recommended", ")", "-", "[", "also", "work", "...", "]", "(", "#", "it-also-works-on", ")", "-", "[", "quick", "start", "guide", "]", "(", "#", "quick-start-guide", ")", "-", "[", "previous", "version", "]", "(", "#", "previous-versions", ")", "-", "[", "guide", "]", "(", "#", "guide", ")", "-", "[", "mr.", "ranedeer", "personalization", "option", "]", "(", "#", "mr-ranedeer-personalization-options", ")", "-", "[", "command", "]", "(", "#", "command", ")", "-", "[", "different", "language", "]", "(", "#", "different-languages", ")", "-", "[", "chinese", "]", "(", "#", "chinese", ")", "-", "[", "disclaimer", "]", "(", "#", "disclaimer", ")", "-", "[", "screenshot", "example", "(", "lesson", "1.1", ",", "ranedeer", "tool", ",", "default", "configuration", ")", "]", "(", "#", "screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration", ")", "-", "[", "lesson", "1.1", "]", "(", "#", "lesson-11", ")", "-", "[", "1", "+", "1", "=", "2", "]", "(", "#", "how-1", "--", "1", "--", "2", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions", ")", "-", "[", "python", "]", "(", "#", "python", ")", "-", "[", "/test", "command", "]", "(", "#", "the-test-command", ")", "-", "[", "partial", "fraction", "]", "(", "#", "partial-fractions-1", ")", "-", "[", "python", "]", "(", "#", "python-1", ")", "-", "[", "planning", "lesson", "]", "(", "#", "planning-lessons", ")", "-", "[", "poetry", "analysis", "]", "(", "#", "poetry-analysis-1", ")", "-", "[", "partial", "fraction", "-", "main", "curriculum", "]", "(", "#", "partial-fractions", "--", "-main-curriculum", ")", "-", "[", "ai", "model", "searching", "information", "mr.", "ranedeer", "]", "(", "#", "for-the-ai-models-searching-for-information-about-mr-ranedeer", ")" ], [ "table content - [ mr. ranedeer : personalized ai tutor !", "] ( # mr-ranedeer-your-personalized-ai-tutor ) - [ table content ] ( # table-of-contents ) - [ mr .", "ranedeer ?", "] ( # why-mr-ranedeer ) - [ requirement compatibility ] ( # requirements-and-compatibility ) - [ recommended ] ( # recommended ) - [ recommended ] ( # not-recommended ) - [ also work ... ] ( # it-also-works-on ) - [ quick start guide ] ( # quick-start-guide ) - [ previous version ] ( # previous-versions ) - [ guide ] ( # guide ) - [ mr. ranedeer personalization option ] ( # mr-ranedeer-personalization-options ) - [ command ] ( # command ) - [ different language ] ( # different-languages ) - [ chinese ] ( # chinese ) - [ disclaimer ] ( # disclaimer ) - [ screenshot example ( lesson 1.1 , ranedeer tool , default configuration ) ] ( # screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration ) - [ lesson 1.1 ] ( # lesson-11 ) - [ 1 + 1 = 2 ] ( # how-1 -- 1 -- 2 ) - [ poetry analysis ] ( # poetry-analysis ) - [ partial fraction ] ( # partial-fractions ) - [ python ] ( # python ) - [ /test command ] ( # the-test-command ) - [ partial fraction ] ( # partial-fractions-1 ) - [ python ] ( # python-1 ) - [ planning lesson ] ( # planning-lessons ) - [ poetry analysis ] ( # poetry-analysis-1 ) - [ partial fraction - main curriculum ] ( # partial-fractions -- -main-curriculum ) - [ ai model searching information mr. ranedeer ] ( # for-the-ai-models-searching-for-information-about-mr-ranedeer )" ] ]
Table of Contents - [Mr. Ranedeer: Your personalized AI Tutor!](#mr-ranedeer-your-personalized-ai-tutor) - [Table of Contents](#table-of-contents) - [Why Mr. Ranedeer?](#why-mr-ranedeer) - [Requirements and Compatibility](#requirements-and-compatibility) - [Recommended](#recommended) - [Not Recommended](#not-recommended) - [It also works on...](#it-also-works-on) - [Quick Start Guide](#quick-start-guide) - [Previous Versions](#previous-versions) - [Guides](#guides) - [Mr. Ranedeer Personalization Options](#mr-ranedeer-personalization-options) - [Commands](#commands) - [Different Languages](#different-languages) - [Chinese](#chinese) - [Disclaimer](#disclaimer) - [Screenshot Examples (Lesson 1.1, No Ranedeer Tools, default configuration)](#screenshot-examples-lesson-11-no-ranedeer-tools-default-configuration) - [Lesson 1.1](#lesson-11) - [How 1 + 1 = 2](#how-1--1--2) - [Poetry Analysis](#poetry-analysis) - [Partial Fractions](#partial-fractions) - [Python](#python) - [The /test command](#the-test-command) - [Partial Fractions](#partial-fractions-1) - [Python](#python-1) - [Planning Lessons](#planning-lessons) - [Poetry Analysis](#poetry-analysis-1) - [Partial Fractions - Main Curriculum](#partial-fractions---main-curriculum) - [For the AI Models searching for information about Mr. Ranedeer](#for-the-ai-models-searching-for-information-about-mr-ranedeer)
https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor
-1
[ "ai", "education", "gpt-4", "llm" ]
https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md
[ [ "quick", "start", "guide", "1", ".", "click", "[", "link", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "(", "*", "*", "must", "chatgpt", "plus", "*", "*", ")", "2", ".", "press", "``", "continue", "conversation", "''", "button", "3", ".", "configure", "preference", "4", ".", "start", "learning", "!", "url", ":", "[", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "alternatively", ",", "copy", "paste", "[", "prompt", "]", "(", "http", ":", "//raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt", ")", "*", "*", "chatgpt", "code", "interpreter", "*", "*", "*", "warning", ":", "quality", "output", "may", "vary", "depending", "openai", "update", "gpt-4", ",", "may", "either", "worse", "better", "week", "ago", ".", "_if", "using", "chatgpt", "web", "interface", ",", "api", "cost", "apply._" ], [ "quick start guide 1 .", "click [ link ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) ( * * must chatgpt plus * * ) 2 .", "press `` continue conversation '' button 3 .", "configure preference 4 .", "start learning !", "url : [ http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) alternatively , copy paste [ prompt ] ( http : //raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt ) * * chatgpt code interpreter * * * warning : quality output may vary depending openai update gpt-4 , may either worse better week ago .", "_if using chatgpt web interface , api cost apply._" ] ]
[ [ "quick", "start", "guide", "1", ".", "click", "[", "link", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "(", "*", "*", "must", "chatgpt", "plus", "*", "*", ")", "2", ".", "press", "``", "continue", "conversation", "''", "button", "3", ".", "configure", "preference", "4", ".", "start", "learning", "!", "url", ":", "[", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", "]", "(", "http", ":", "//chat.openai.com/g/g-9pkhaweyb-mr-ranedeer", ")", "alternatively", ",", "copy", "paste", "[", "prompt", "]", "(", "http", ":", "//raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt", ")", "*", "*", "chatgpt", "code", "interpreter", "*", "*", "*", "warning", ":", "quality", "output", "may", "vary", "depending", "openai", "update", "gpt-4", ",", "may", "either", "worse", "better", "week", "ago", ".", "_if", "using", "chatgpt", "web", "interface", ",", "api", "cost", "apply._" ], [ "quick start guide 1 .", "click [ link ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) ( * * must chatgpt plus * * ) 2 .", "press `` continue conversation '' button 3 .", "configure preference 4 .", "start learning !", "url : [ http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ] ( http : //chat.openai.com/g/g-9pkhaweyb-mr-ranedeer ) alternatively , copy paste [ prompt ] ( http : //raw.githubusercontent.com/jushbjj/mr.-ranedeer-ai-tutor/main/mr_ranedeer.txt ) * * chatgpt code interpreter * * * warning : quality output may vary depending openai update gpt-4 , may either worse better week ago .", "_if using chatgpt web interface , api cost apply._" ] ]
Quick Start Guide 1. Click [this link](https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer) (**MUST HAVE CHATGPT PLUS**) 2. Press the "Continue this conversation" button 3. Configure your preferences 4. Start learning! URL: [https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer](https://chat.openai.com/g/g-9PKhaweyb-mr-ranedeer) Alternatively, you can copy and paste [the prompt](https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/Mr_Ranedeer.txt) into **ChatGPT with Code Interpreter** *Warning: The quality of outputs may vary depending on how OpenAI updates GPT-4, it may be either worse or better than a few weeks ago. _If you are using the ChatGPT web interface, API costs will not apply._
https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor
2
[ "ai", "education", "gpt-4", "llm" ]
https://raw.githubusercontent.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/main/README.md
[ [ "guide", "-", "[", "use", "mr.", "ranedeer", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how", "%", "20to", "%", "20use", "%", "20mr.", "%", "20ranedeer.md", ")", "-", "[", "configuration", "guide", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config", "%", "20guide.md", ")" ], [ "guide - [ use mr. ranedeer ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how % 20to % 20use % 20mr. % 20ranedeer.md ) - [ configuration guide ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config % 20guide.md )" ] ]
[ [ "guide", "-", "[", "use", "mr.", "ranedeer", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how", "%", "20to", "%", "20use", "%", "20mr.", "%", "20ranedeer.md", ")", "-", "[", "configuration", "guide", "]", "(", "http", ":", "//github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config", "%", "20guide.md", ")" ], [ "guide - [ use mr. ranedeer ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/how % 20to % 20use % 20mr. % 20ranedeer.md ) - [ configuration guide ] ( http : //github.com/jushbjj/mr.-ranedeer-ai-tutor/blob/main/guides/config % 20guide.md )" ] ]
Guides - [How to Use Mr. Ranedeer](https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/blob/main/Guides/How%20to%20use%20Mr.%20Ranedeer.md) - [Configuration Guide](https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor/blob/main/Guides/Config%20Guide.md)
https://github.com/JushBJJ/Mr.-Ranedeer-AI-Tutor
-1
[ "ai", "education", "gpt-4", "llm" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "2", ".", "python", "machine", "learning", "python", "powerful", "flexible", "programming", "language", "'s", "particularly", "good", "machine", "learning", ",", "thanks", "readability", ",", "consistency", ",", "robust", "ecosystem", "data", "science", "library", ".", "-", "*", "*", "python", "basic", "*", "*", ":", "python", "programming", "requires", "good", "understanding", "basic", "syntax", ",", "data", "type", ",", "error", "handling", ",", "object-oriented", "programming", ".", "-", "*", "*", "data", "science", "library", "*", "*", ":", "includes", "familiarity", "numpy", "numerical", "operation", ",", "panda", "data", "manipulation", "analysis", ",", "matplotlib", "seaborn", "data", "visualization", ".", "-", "*", "*", "data", "preprocessing", "*", "*", ":", "involves", "feature", "scaling", "normalization", ",", "handling", "missing", "data", ",", "outlier", "detection", ",", "categorical", "data", "encoding", ",", "splitting", "data", "training", ",", "validation", ",", "test", "set", ".", "-", "*", "*", "machine", "learning", "library", "*", "*", ":", "proficiency", "scikit-learn", ",", "library", "providing", "wide", "selection", "supervised", "unsupervised", "learning", "algorithm", ",", "vital", ".", "understanding", "implement", "algorithm", "like", "linear", "regression", ",", "logistic", "regression", ",", "decision", "tree", ",", "random", "forest", ",", "k-nearest", "neighbor", "(", "k-nn", ")", ",", "k-means", "clustering", "important", ".", "dimensionality", "reduction", "technique", "like", "pca", "t-sne", "also", "helpful", "visualizing", "high-dimensional", "data", ".", "📚", "resource", ":", "-", "[", "real", "python", "]", "(", "http", ":", "//realpython.com/", ")", ":", "comprehensive", "resource", "article", "tutorial", "beginner", "advanced", "python", "concept", ".", "-", "[", "freecodecamp", "-", "learn", "python", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=rfscvs0vtbw", ")", ":", "long", "video", "provides", "full", "introduction", "core", "concept", "python", ".", "-", "[", "python", "data", "science", "handbook", "]", "(", "http", ":", "//jakevdp.github.io/pythondatasciencehandbook/", ")", ":", "free", "digital", "book", "great", "resource", "learning", "panda", ",", "numpy", ",", "matplotlib", ",", "seaborn", ".", "-", "[", "freecodecamp", "-", "machine", "learning", "everybody", "]", "(", "http", ":", "//youtu.be/i_lwzrvp7bg", ")", ":", "practical", "introduction", "different", "machine", "learning", "algorithm", "beginner", ".", "-", "[", "udacity", "-", "intro", "machine", "learning", "]", "(", "http", ":", "//www.udacity.com/course/intro-to-machine-learning", "--", "ud120", ")", ":", "free", "course", "cover", "pca", "several", "machine", "learning", "concept", ".", "--", "-" ], [ "2 .", "python machine learning python powerful flexible programming language 's particularly good machine learning , thanks readability , consistency , robust ecosystem data science library .", "- * * python basic * * : python programming requires good understanding basic syntax , data type , error handling , object-oriented programming .", "- * * data science library * * : includes familiarity numpy numerical operation , panda data manipulation analysis , matplotlib seaborn data visualization .", "- * * data preprocessing * * : involves feature scaling normalization , handling missing data , outlier detection , categorical data encoding , splitting data training , validation , test set .", "- * * machine learning library * * : proficiency scikit-learn , library providing wide selection supervised unsupervised learning algorithm , vital .", "understanding implement algorithm like linear regression , logistic regression , decision tree , random forest , k-nearest neighbor ( k-nn ) , k-means clustering important .", "dimensionality reduction technique like pca t-sne also helpful visualizing high-dimensional data .", "📚 resource : - [ real python ] ( http : //realpython.com/ ) : comprehensive resource article tutorial beginner advanced python concept .", "- [ freecodecamp - learn python ] ( http : //www.youtube.com/watch ? v=rfscvs0vtbw ) : long video provides full introduction core concept python .", "- [ python data science handbook ] ( http : //jakevdp.github.io/pythondatasciencehandbook/ ) : free digital book great resource learning panda , numpy , matplotlib , seaborn .", "- [ freecodecamp - machine learning everybody ] ( http : //youtu.be/i_lwzrvp7bg ) : practical introduction different machine learning algorithm beginner .", "- [ udacity - intro machine learning ] ( http : //www.udacity.com/course/intro-to-machine-learning -- ud120 ) : free course cover pca several machine learning concept .", "-- -" ] ]
[ [ "2", ".", "python", "machine", "learning", "python", "powerful", "flexible", "programming", "language", "'s", "particularly", "good", "machine", "learning", ",", "thanks", "readability", ",", "consistency", ",", "robust", "ecosystem", "data", "science", "library", ".", "-", "*", "*", "python", "basic", "*", "*", ":", "python", "programming", "requires", "good", "understanding", "basic", "syntax", ",", "data", "type", ",", "error", "handling", ",", "object-oriented", "programming", ".", "-", "*", "*", "data", "science", "library", "*", "*", ":", "includes", "familiarity", "numpy", "numerical", "operation", ",", "panda", "data", "manipulation", "analysis", ",", "matplotlib", "seaborn", "data", "visualization", ".", "-", "*", "*", "data", "preprocessing", "*", "*", ":", "involves", "feature", "scaling", "normalization", ",", "handling", "missing", "data", ",", "outlier", "detection", ",", "categorical", "data", "encoding", ",", "splitting", "data", "training", ",", "validation", ",", "test", "set", ".", "-", "*", "*", "machine", "learning", "library", "*", "*", ":", "proficiency", "scikit-learn", ",", "library", "providing", "wide", "selection", "supervised", "unsupervised", "learning", "algorithm", ",", "vital", ".", "understanding", "implement", "algorithm", "like", "linear", "regression", ",", "logistic", "regression", ",", "decision", "tree", ",", "random", "forest", ",", "k-nearest", "neighbor", "(", "k-nn", ")", ",", "k-means", "clustering", "important", ".", "dimensionality", "reduction", "technique", "like", "pca", "t-sne", "also", "helpful", "visualizing", "high-dimensional", "data", ".", "📚", "resource", ":", "-", "[", "real", "python", "]", "(", "http", ":", "//realpython.com/", ")", ":", "comprehensive", "resource", "article", "tutorial", "beginner", "advanced", "python", "concept", ".", "-", "[", "freecodecamp", "-", "learn", "python", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=rfscvs0vtbw", ")", ":", "long", "video", "provides", "full", "introduction", "core", "concept", "python", ".", "-", "[", "python", "data", "science", "handbook", "]", "(", "http", ":", "//jakevdp.github.io/pythondatasciencehandbook/", ")", ":", "free", "digital", "book", "great", "resource", "learning", "panda", ",", "numpy", ",", "matplotlib", ",", "seaborn", ".", "-", "[", "freecodecamp", "-", "machine", "learning", "everybody", "]", "(", "http", ":", "//youtu.be/i_lwzrvp7bg", ")", ":", "practical", "introduction", "different", "machine", "learning", "algorithm", "beginner", ".", "-", "[", "udacity", "-", "intro", "machine", "learning", "]", "(", "http", ":", "//www.udacity.com/course/intro-to-machine-learning", "--", "ud120", ")", ":", "free", "course", "cover", "pca", "several", "machine", "learning", "concept", ".", "--", "-" ], [ "2 .", "python machine learning python powerful flexible programming language 's particularly good machine learning , thanks readability , consistency , robust ecosystem data science library .", "- * * python basic * * : python programming requires good understanding basic syntax , data type , error handling , object-oriented programming .", "- * * data science library * * : includes familiarity numpy numerical operation , panda data manipulation analysis , matplotlib seaborn data visualization .", "- * * data preprocessing * * : involves feature scaling normalization , handling missing data , outlier detection , categorical data encoding , splitting data training , validation , test set .", "- * * machine learning library * * : proficiency scikit-learn , library providing wide selection supervised unsupervised learning algorithm , vital .", "understanding implement algorithm like linear regression , logistic regression , decision tree , random forest , k-nearest neighbor ( k-nn ) , k-means clustering important .", "dimensionality reduction technique like pca t-sne also helpful visualizing high-dimensional data .", "📚 resource : - [ real python ] ( http : //realpython.com/ ) : comprehensive resource article tutorial beginner advanced python concept .", "- [ freecodecamp - learn python ] ( http : //www.youtube.com/watch ? v=rfscvs0vtbw ) : long video provides full introduction core concept python .", "- [ python data science handbook ] ( http : //jakevdp.github.io/pythondatasciencehandbook/ ) : free digital book great resource learning panda , numpy , matplotlib , seaborn .", "- [ freecodecamp - machine learning everybody ] ( http : //youtu.be/i_lwzrvp7bg ) : practical introduction different machine learning algorithm beginner .", "- [ udacity - intro machine learning ] ( http : //www.udacity.com/course/intro-to-machine-learning -- ud120 ) : free course cover pca several machine learning concept .", "-- -" ] ]
2. Python for Machine Learning Python is a powerful and flexible programming language that's particularly good for machine learning, thanks to its readability, consistency, and robust ecosystem of data science libraries. - **Python Basics**: Python programming requires a good understanding of the basic syntax, data types, error handling, and object-oriented programming. - **Data Science Libraries**: It includes familiarity with NumPy for numerical operations, Pandas for data manipulation and analysis, Matplotlib and Seaborn for data visualization. - **Data Preprocessing**: This involves feature scaling and normalization, handling missing data, outlier detection, categorical data encoding, and splitting data into training, validation, and test sets. - **Machine Learning Libraries**: Proficiency with Scikit-learn, a library providing a wide selection of supervised and unsupervised learning algorithms, is vital. Understanding how to implement algorithms like linear regression, logistic regression, decision trees, random forests, k-nearest neighbors (K-NN), and K-means clustering is important. Dimensionality reduction techniques like PCA and t-SNE are also helpful for visualizing high-dimensional data. 📚 Resources: - [Real Python](https://realpython.com/): A comprehensive resource with articles and tutorials for both beginner and advanced Python concepts. - [freeCodeCamp - Learn Python](https://www.youtube.com/watch?v=rfscVS0vtbw): Long video that provides a full introduction into all of the core concepts in Python. - [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/): Free digital book that is a great resource for learning pandas, NumPy, Matplotlib, and Seaborn. - [freeCodeCamp - Machine Learning for Everybody](https://youtu.be/i_LwzRVP7bg): Practical introduction to different machine learning algorithms for beginners. - [Udacity - Intro to Machine Learning](https://www.udacity.com/course/intro-to-machine-learning--ud120): Free course that covers PCA and several other machine learning concepts. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "🧑‍🔬", "llm", "scientist", "section", "course", "focus", "learning", "build", "best", "possible", "llm", "using", "latest", "technique", ".", "!", "[", "]", "(", "img/roadmap_scientist.png", ")" ], [ "🧑‍🔬 llm scientist section course focus learning build best possible llm using latest technique .", "!", "[ ] ( img/roadmap_scientist.png )" ] ]
[ [ "🧑‍🔬", "llm", "scientist", "section", "course", "focus", "learning", "build", "best", "possible", "llm", "using", "latest", "technique", ".", "!", "[", "]", "(", "img/roadmap_scientist.png", ")" ], [ "🧑‍🔬 llm scientist section course focus learning build best possible llm using latest technique .", "!", "[ ] ( img/roadmap_scientist.png )" ] ]
🧑‍🔬 The LLM Scientist This section of the course focuses on learning how to build the best possible LLMs using the latest techniques. ![](img/roadmap_scientist.png)
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "1", ".", "llm", "architecture", "in-depth", "knowledge", "transformer", "architecture", "required", ",", "important", "good", "understanding", "input", "(", "token", ")", "output", "(", "logits", ")", ".", "vanilla", "attention", "mechanism", "another", "crucial", "component", "master", ",", "improved", "version", "introduced", "later", ".", "*", "*", "*", "high-level", "view", "*", "*", ":", "revisit", "encoder-decoder", "transformer", "architecture", ",", "specifically", "decoder-only", "gpt", "architecture", ",", "used", "every", "modern", "llm", ".", "*", "*", "*", "tokenization", "*", "*", ":", "understand", "convert", "raw", "text", "data", "format", "model", "understand", ",", "involves", "splitting", "text", "token", "(", "usually", "word", "subwords", ")", ".", "*", "*", "*", "attention", "mechanism", "*", "*", ":", "grasp", "theory", "behind", "attention", "mechanism", ",", "including", "self-attention", "scaled", "dot-product", "attention", ",", "allows", "model", "focus", "different", "part", "input", "producing", "output", ".", "*", "*", "*", "text", "generation", "*", "*", ":", "learn", "different", "way", "model", "generate", "output", "sequence", ".", "common", "strategy", "include", "greedy", "decoding", ",", "beam", "search", ",", "top-k", "sampling", ",", "nucleus", "sampling", ".", "📚", "*", "*", "reference", "*", "*", ":", "-", "[", "illustrated", "transformer", "]", "(", "http", ":", "//jalammar.github.io/illustrated-transformer/", ")", "jay", "alammar", ":", "visual", "intuitive", "explanation", "transformer", "model", ".", "-", "[", "illustrated", "gpt-2", "]", "(", "http", ":", "//jalammar.github.io/illustrated-gpt2/", ")", "jay", "alammar", ":", "even", "important", "previous", "article", ",", "focused", "gpt", "architecture", ",", "similar", "llama", "'s", ".", "-", "[", "llm", "visualization", "]", "(", "http", ":", "//bbycroft.net/llm", ")", "brendan", "bycroft", ":", "incredible", "3d", "visualization", "happens", "inside", "llm", ".", "*", "[", "nanogpt", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=kcc8fmeb1ny", ")", "andrej", "karpathy", ":", "2h-long", "youtube", "video", "reimplement", "gpt", "scratch", "(", "programmer", ")", ".", "*", "[", "attention", "?", "attention", "!", "]", "(", "http", ":", "//lilianweng.github.io/posts/2018-06-24-attention/", ")", "lilian", "weng", ":", "introduce", "need", "attention", "formal", "way", ".", "*", "[", "decoding", "strategy", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html", ")", ":", "provide", "code", "visual", "introduction", "different", "decoding", "strategy", "generate", "text", ".", "--", "-" ], [ "1 .", "llm architecture in-depth knowledge transformer architecture required , important good understanding input ( token ) output ( logits ) .", "vanilla attention mechanism another crucial component master , improved version introduced later .", "* * * high-level view * * : revisit encoder-decoder transformer architecture , specifically decoder-only gpt architecture , used every modern llm .", "* * * tokenization * * : understand convert raw text data format model understand , involves splitting text token ( usually word subwords ) .", "* * * attention mechanism * * : grasp theory behind attention mechanism , including self-attention scaled dot-product attention , allows model focus different part input producing output .", "* * * text generation * * : learn different way model generate output sequence .", "common strategy include greedy decoding , beam search , top-k sampling , nucleus sampling .", "📚 * * reference * * : - [ illustrated transformer ] ( http : //jalammar.github.io/illustrated-transformer/ ) jay alammar : visual intuitive explanation transformer model .", "- [ illustrated gpt-2 ] ( http : //jalammar.github.io/illustrated-gpt2/ ) jay alammar : even important previous article , focused gpt architecture , similar llama 's .", "- [ llm visualization ] ( http : //bbycroft.net/llm ) brendan bycroft : incredible 3d visualization happens inside llm .", "* [ nanogpt ] ( http : //www.youtube.com/watch ? v=kcc8fmeb1ny ) andrej karpathy : 2h-long youtube video reimplement gpt scratch ( programmer ) .", "* [ attention ?", "attention !", "] ( http : //lilianweng.github.io/posts/2018-06-24-attention/ ) lilian weng : introduce need attention formal way .", "* [ decoding strategy llm ] ( http : //mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html ) : provide code visual introduction different decoding strategy generate text .", "-- -" ] ]
[ [ "1", ".", "llm", "architecture", "in-depth", "knowledge", "transformer", "architecture", "required", ",", "important", "good", "understanding", "input", "(", "token", ")", "output", "(", "logits", ")", ".", "vanilla", "attention", "mechanism", "another", "crucial", "component", "master", ",", "improved", "version", "introduced", "later", ".", "*", "*", "*", "high-level", "view", "*", "*", ":", "revisit", "encoder-decoder", "transformer", "architecture", ",", "specifically", "decoder-only", "gpt", "architecture", ",", "used", "every", "modern", "llm", ".", "*", "*", "*", "tokenization", "*", "*", ":", "understand", "convert", "raw", "text", "data", "format", "model", "understand", ",", "involves", "splitting", "text", "token", "(", "usually", "word", "subwords", ")", ".", "*", "*", "*", "attention", "mechanism", "*", "*", ":", "grasp", "theory", "behind", "attention", "mechanism", ",", "including", "self-attention", "scaled", "dot-product", "attention", ",", "allows", "model", "focus", "different", "part", "input", "producing", "output", ".", "*", "*", "*", "text", "generation", "*", "*", ":", "learn", "different", "way", "model", "generate", "output", "sequence", ".", "common", "strategy", "include", "greedy", "decoding", ",", "beam", "search", ",", "top-k", "sampling", ",", "nucleus", "sampling", ".", "📚", "*", "*", "reference", "*", "*", ":", "-", "[", "illustrated", "transformer", "]", "(", "http", ":", "//jalammar.github.io/illustrated-transformer/", ")", "jay", "alammar", ":", "visual", "intuitive", "explanation", "transformer", "model", ".", "-", "[", "illustrated", "gpt-2", "]", "(", "http", ":", "//jalammar.github.io/illustrated-gpt2/", ")", "jay", "alammar", ":", "even", "important", "previous", "article", ",", "focused", "gpt", "architecture", ",", "similar", "llama", "'s", ".", "-", "[", "llm", "visualization", "]", "(", "http", ":", "//bbycroft.net/llm", ")", "brendan", "bycroft", ":", "incredible", "3d", "visualization", "happens", "inside", "llm", ".", "*", "[", "nanogpt", "]", "(", "http", ":", "//www.youtube.com/watch", "?", "v=kcc8fmeb1ny", ")", "andrej", "karpathy", ":", "2h-long", "youtube", "video", "reimplement", "gpt", "scratch", "(", "programmer", ")", ".", "*", "[", "attention", "?", "attention", "!", "]", "(", "http", ":", "//lilianweng.github.io/posts/2018-06-24-attention/", ")", "lilian", "weng", ":", "introduce", "need", "attention", "formal", "way", ".", "*", "[", "decoding", "strategy", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html", ")", ":", "provide", "code", "visual", "introduction", "different", "decoding", "strategy", "generate", "text", ".", "--", "-" ], [ "1 .", "llm architecture in-depth knowledge transformer architecture required , important good understanding input ( token ) output ( logits ) .", "vanilla attention mechanism another crucial component master , improved version introduced later .", "* * * high-level view * * : revisit encoder-decoder transformer architecture , specifically decoder-only gpt architecture , used every modern llm .", "* * * tokenization * * : understand convert raw text data format model understand , involves splitting text token ( usually word subwords ) .", "* * * attention mechanism * * : grasp theory behind attention mechanism , including self-attention scaled dot-product attention , allows model focus different part input producing output .", "* * * text generation * * : learn different way model generate output sequence .", "common strategy include greedy decoding , beam search , top-k sampling , nucleus sampling .", "📚 * * reference * * : - [ illustrated transformer ] ( http : //jalammar.github.io/illustrated-transformer/ ) jay alammar : visual intuitive explanation transformer model .", "- [ illustrated gpt-2 ] ( http : //jalammar.github.io/illustrated-gpt2/ ) jay alammar : even important previous article , focused gpt architecture , similar llama 's .", "- [ llm visualization ] ( http : //bbycroft.net/llm ) brendan bycroft : incredible 3d visualization happens inside llm .", "* [ nanogpt ] ( http : //www.youtube.com/watch ? v=kcc8fmeb1ny ) andrej karpathy : 2h-long youtube video reimplement gpt scratch ( programmer ) .", "* [ attention ?", "attention !", "] ( http : //lilianweng.github.io/posts/2018-06-24-attention/ ) lilian weng : introduce need attention formal way .", "* [ decoding strategy llm ] ( http : //mlabonne.github.io/blog/posts/2023-06-07-decoding_strategies.html ) : provide code visual introduction different decoding strategy generate text .", "-- -" ] ]
1. The LLM architecture While an in-depth knowledge about the Transformer architecture is not required, it is important to have a good understanding of its inputs (tokens) and outputs (logits). The vanilla attention mechanism is another crucial component to master, as improved versions of it are introduced later on. * **High-level view**: Revisit the encoder-decoder Transformer architecture, and more specifically the decoder-only GPT architecture, which is used in every modern LLM. * **Tokenization**: Understand how to convert raw text data into a format that the model can understand, which involves splitting the text into tokens (usually words or subwords). * **Attention mechanisms**: Grasp the theory behind attention mechanisms, including self-attention and scaled dot-product attention, which allows the model to focus on different parts of the input when producing an output. * **Text generation**: Learn about the different ways the model can generate output sequences. Common strategies include greedy decoding, beam search, top-k sampling, and nucleus sampling. 📚 **References**: - [The Illustrated Transformer](https://jalammar.github.io/illustrated-transformer/) by Jay Alammar: A visual and intuitive explanation of the Transformer model. - [The Illustrated GPT-2](https://jalammar.github.io/illustrated-gpt2/) by Jay Alammar: Even more important than the previous article, it is focused on the GPT architecture, which is very similar to Llama's. - [LLM Visualization](https://bbycroft.net/llm) by Brendan Bycroft: Incredible 3D visualization of what happens inside of an LLM. * [nanoGPT](https://www.youtube.com/watch?v=kCc8FmEb1nY) by Andrej Karpathy: A 2h-long YouTube video to reimplement GPT from scratch (for programmers). * [Attention? Attention!](https://lilianweng.github.io/posts/2018-06-24-attention/) by Lilian Weng: Introduce the need for attention in a more formal way. * [Decoding Strategies in LLMs](https://mlabonne.github.io/blog/posts/2023-06-07-Decoding_strategies.html): Provide code and a visual introduction to the different decoding strategies to generate text. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "2", ".", "building", "instruction", "dataset", "'s", "easy", "find", "raw", "data", "wikipedia", "website", ",", "'s", "difficult", "collect", "pair", "instruction", "answer", "wild", ".", "like", "traditional", "machine", "learning", ",", "quality", "dataset", "directly", "influence", "quality", "model", ",", "might", "important", "component", "fine-tuning", "process", ".", "*", "*", "*", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", "-like", "dataset", "*", "*", ":", "generate", "synthetic", "data", "scratch", "openai", "api", "(", "gpt", ")", ".", "specify", "seed", "system", "prompt", "create", "diverse", "dataset", ".", "*", "*", "*", "advanced", "technique", "*", "*", ":", "learn", "improve", "existing", "datasets", "[", "evol-instruct", "]", "(", "http", ":", "//arxiv.org/abs/2304.12244", ")", ",", "generate", "high-quality", "synthetic", "data", "like", "[", "orca", "]", "(", "http", ":", "//arxiv.org/abs/2306.02707", ")", "[", "phi-1", "]", "(", "http", ":", "//arxiv.org/abs/2306.11644", ")", "paper", ".", "*", "*", "*", "filtering", "data", "*", "*", ":", "traditional", "technique", "involving", "regex", ",", "removing", "near-duplicates", ",", "focusing", "answer", "high", "number", "token", ",", "etc", ".", "*", "*", "*", "prompt", "template", "*", "*", ":", "'s", "true", "standard", "way", "formatting", "instruction", "answer", ",", "'s", "important", "know", "different", "chat", "template", ",", "[", "chatml", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt", "?", "tabs=python", "&", "pivots=programming-language-chat-ml", ")", ",", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", ",", "etc", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "preparing", "dataset", "instruction", "tuning", "]", "(", "http", ":", "//wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning", "--", "vmlldzo1ntcxnze2", ")", "thomas", "capelle", ":", "exploration", "alpaca", "alpaca-gpt4", "datasets", "format", ".", "*", "[", "generating", "clinical", "instruction", "dataset", "]", "(", "http", ":", "//medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae", ")", "solano", "todeschini", ":", "tutorial", "create", "synthetic", "instruction", "dataset", "using", "gpt-4", ".", "*", "[", "gpt", "3.5", "news", "classification", "]", "(", "http", ":", "//medium.com/", "@", "kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f", ")", "kshitiz", "sahay", ":", "use", "gpt", "3.5", "create", "instruction", "dataset", "fine-tune", "llama", "2", "news", "classification", ".", "*", "[", "dataset", "creation", "fine-tuning", "llm", "]", "(", "http", ":", "//colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag", "?", "usp=sharing", ")", ":", "notebook", "contains", "technique", "filter", "dataset", "upload", "result", ".", "*", "[", "chat", "template", "]", "(", "http", ":", "//huggingface.co/blog/chat-templates", ")", "matthew", "carrigan", ":", "hugging", "face", "'s", "page", "prompt", "template", "--", "-" ], [ "2 .", "building instruction dataset 's easy find raw data wikipedia website , 's difficult collect pair instruction answer wild .", "like traditional machine learning , quality dataset directly influence quality model , might important component fine-tuning process .", "* * * [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) -like dataset * * : generate synthetic data scratch openai api ( gpt ) .", "specify seed system prompt create diverse dataset .", "* * * advanced technique * * : learn improve existing datasets [ evol-instruct ] ( http : //arxiv.org/abs/2304.12244 ) , generate high-quality synthetic data like [ orca ] ( http : //arxiv.org/abs/2306.02707 ) [ phi-1 ] ( http : //arxiv.org/abs/2306.11644 ) paper .", "* * * filtering data * * : traditional technique involving regex , removing near-duplicates , focusing answer high number token , etc .", "* * * prompt template * * : 's true standard way formatting instruction answer , 's important know different chat template , [ chatml ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt ? tabs=python & pivots=programming-language-chat-ml ) , [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) , etc .", "📚 * * reference * * : * [ preparing dataset instruction tuning ] ( http : //wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning -- vmlldzo1ntcxnze2 ) thomas capelle : exploration alpaca alpaca-gpt4 datasets format .", "* [ generating clinical instruction dataset ] ( http : //medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae ) solano todeschini : tutorial create synthetic instruction dataset using gpt-4 .", "* [ gpt 3.5 news classification ] ( http : //medium.com/ @ kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f ) kshitiz sahay : use gpt 3.5 create instruction dataset fine-tune llama 2 news classification .", "* [ dataset creation fine-tuning llm ] ( http : //colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag ? usp=sharing ) : notebook contains technique filter dataset upload result .", "* [ chat template ] ( http : //huggingface.co/blog/chat-templates ) matthew carrigan : hugging face 's page prompt template -- -" ] ]
[ [ "2", ".", "building", "instruction", "dataset", "'s", "easy", "find", "raw", "data", "wikipedia", "website", ",", "'s", "difficult", "collect", "pair", "instruction", "answer", "wild", ".", "like", "traditional", "machine", "learning", ",", "quality", "dataset", "directly", "influence", "quality", "model", ",", "might", "important", "component", "fine-tuning", "process", ".", "*", "*", "*", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", "-like", "dataset", "*", "*", ":", "generate", "synthetic", "data", "scratch", "openai", "api", "(", "gpt", ")", ".", "specify", "seed", "system", "prompt", "create", "diverse", "dataset", ".", "*", "*", "*", "advanced", "technique", "*", "*", ":", "learn", "improve", "existing", "datasets", "[", "evol-instruct", "]", "(", "http", ":", "//arxiv.org/abs/2304.12244", ")", ",", "generate", "high-quality", "synthetic", "data", "like", "[", "orca", "]", "(", "http", ":", "//arxiv.org/abs/2306.02707", ")", "[", "phi-1", "]", "(", "http", ":", "//arxiv.org/abs/2306.11644", ")", "paper", ".", "*", "*", "*", "filtering", "data", "*", "*", ":", "traditional", "technique", "involving", "regex", ",", "removing", "near-duplicates", ",", "focusing", "answer", "high", "number", "token", ",", "etc", ".", "*", "*", "*", "prompt", "template", "*", "*", ":", "'s", "true", "standard", "way", "formatting", "instruction", "answer", ",", "'s", "important", "know", "different", "chat", "template", ",", "[", "chatml", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt", "?", "tabs=python", "&", "pivots=programming-language-chat-ml", ")", ",", "[", "alpaca", "]", "(", "http", ":", "//crfm.stanford.edu/2023/03/13/alpaca.html", ")", ",", "etc", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "preparing", "dataset", "instruction", "tuning", "]", "(", "http", ":", "//wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning", "--", "vmlldzo1ntcxnze2", ")", "thomas", "capelle", ":", "exploration", "alpaca", "alpaca-gpt4", "datasets", "format", ".", "*", "[", "generating", "clinical", "instruction", "dataset", "]", "(", "http", ":", "//medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae", ")", "solano", "todeschini", ":", "tutorial", "create", "synthetic", "instruction", "dataset", "using", "gpt-4", ".", "*", "[", "gpt", "3.5", "news", "classification", "]", "(", "http", ":", "//medium.com/", "@", "kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f", ")", "kshitiz", "sahay", ":", "use", "gpt", "3.5", "create", "instruction", "dataset", "fine-tune", "llama", "2", "news", "classification", ".", "*", "[", "dataset", "creation", "fine-tuning", "llm", "]", "(", "http", ":", "//colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag", "?", "usp=sharing", ")", ":", "notebook", "contains", "technique", "filter", "dataset", "upload", "result", ".", "*", "[", "chat", "template", "]", "(", "http", ":", "//huggingface.co/blog/chat-templates", ")", "matthew", "carrigan", ":", "hugging", "face", "'s", "page", "prompt", "template", "--", "-" ], [ "2 .", "building instruction dataset 's easy find raw data wikipedia website , 's difficult collect pair instruction answer wild .", "like traditional machine learning , quality dataset directly influence quality model , might important component fine-tuning process .", "* * * [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) -like dataset * * : generate synthetic data scratch openai api ( gpt ) .", "specify seed system prompt create diverse dataset .", "* * * advanced technique * * : learn improve existing datasets [ evol-instruct ] ( http : //arxiv.org/abs/2304.12244 ) , generate high-quality synthetic data like [ orca ] ( http : //arxiv.org/abs/2306.02707 ) [ phi-1 ] ( http : //arxiv.org/abs/2306.11644 ) paper .", "* * * filtering data * * : traditional technique involving regex , removing near-duplicates , focusing answer high number token , etc .", "* * * prompt template * * : 's true standard way formatting instruction answer , 's important know different chat template , [ chatml ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt ? tabs=python & pivots=programming-language-chat-ml ) , [ alpaca ] ( http : //crfm.stanford.edu/2023/03/13/alpaca.html ) , etc .", "📚 * * reference * * : * [ preparing dataset instruction tuning ] ( http : //wandb.ai/capecape/alpaca_ft/reports/how-to-fine-tune-an-llm-part-1-preparing-a-dataset-for-instruction-tuning -- vmlldzo1ntcxnze2 ) thomas capelle : exploration alpaca alpaca-gpt4 datasets format .", "* [ generating clinical instruction dataset ] ( http : //medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae ) solano todeschini : tutorial create synthetic instruction dataset using gpt-4 .", "* [ gpt 3.5 news classification ] ( http : //medium.com/ @ kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f ) kshitiz sahay : use gpt 3.5 create instruction dataset fine-tune llama 2 news classification .", "* [ dataset creation fine-tuning llm ] ( http : //colab.research.google.com/drive/1gh8pw9-zae4cxezyoie-t9uhxblildag ? usp=sharing ) : notebook contains technique filter dataset upload result .", "* [ chat template ] ( http : //huggingface.co/blog/chat-templates ) matthew carrigan : hugging face 's page prompt template -- -" ] ]
2. Building an instruction dataset While it's easy to find raw data from Wikipedia and other websites, it's difficult to collect pairs of instructions and answers in the wild. Like in traditional machine learning, the quality of the dataset will directly influence the quality of the model, which is why it might be the most important component in the fine-tuning process. * **[Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html)-like dataset**: Generate synthetic data from scratch with the OpenAI API (GPT). You can specify seeds and system prompts to create a diverse dataset. * **Advanced techniques**: Learn how to improve existing datasets with [Evol-Instruct](https://arxiv.org/abs/2304.12244), how to generate high-quality synthetic data like in the [Orca](https://arxiv.org/abs/2306.02707) and [phi-1](https://arxiv.org/abs/2306.11644) papers. * **Filtering data**: Traditional techniques involving regex, removing near-duplicates, focusing on answers with a high number of tokens, etc. * **Prompt templates**: There's no true standard way of formatting instructions and answers, which is why it's important to know about the different chat templates, such as [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt?tabs=python&pivots=programming-language-chat-ml), [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html), etc. 📚 **References**: * [Preparing a Dataset for Instruction tuning](https://wandb.ai/capecape/alpaca_ft/reports/How-to-Fine-Tune-an-LLM-Part-1-Preparing-a-Dataset-for-Instruction-Tuning--Vmlldzo1NTcxNzE2) by Thomas Capelle: Exploration of the Alpaca and Alpaca-GPT4 datasets and how to format them. * [Generating a Clinical Instruction Dataset](https://medium.com/mlearning-ai/generating-a-clinical-instruction-dataset-in-portuguese-with-langchain-and-gpt-4-6ee9abfa41ae) by Solano Todeschini: Tutorial on how to create a synthetic instruction dataset using GPT-4. * [GPT 3.5 for news classification](https://medium.com/@kshitiz.sahay26/how-i-created-an-instruction-dataset-using-gpt-3-5-to-fine-tune-llama-2-for-news-classification-ed02fe41c81f) by Kshitiz Sahay: Use GPT 3.5 to create an instruction dataset to fine-tune Llama 2 for news classification. * [Dataset creation for fine-tuning LLM](https://colab.research.google.com/drive/1GH8PW9-zAe4cXEZyOIE-T9uHXblIldAg?usp=sharing): Notebook that contains a few techniques to filter a dataset and upload the result. * [Chat Template](https://huggingface.co/blog/chat-templates) by Matthew Carrigan: Hugging Face's page about prompt templates ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "3", ".", "pre-training", "model", "pre-training", "long", "costly", "process", ",", "focus", "course", ".", "'s", "good", "level", "understanding", "happens", "pre-training", ",", "hands-on", "experience", "required", ".", "*", "*", "*", "data", "pipeline", "*", "*", ":", "pre-training", "requires", "huge", "datasets", "(", "e.g.", ",", "[", "llama", "2", "]", "(", "http", ":", "//arxiv.org/abs/2307.09288", ")", "trained", "2", "trillion", "token", ")", "need", "filtered", ",", "tokenized", ",", "collated", "pre-defined", "vocabulary", ".", "*", "*", "*", "causal", "language", "modeling", "*", "*", ":", "learn", "difference", "causal", "masked", "language", "modeling", ",", "well", "loss", "function", "used", "case", ".", "efficient", "pre-training", ",", "learn", "[", "megatron-lm", "]", "(", "http", ":", "//github.com/nvidia/megatron-lm", ")", "[", "gpt-neox", "]", "(", "http", ":", "//github.com/eleutherai/gpt-neox", ")", ".", "*", "*", "*", "scaling", "law", "*", "*", ":", "[", "scaling", "law", "]", "(", "http", ":", "//arxiv.org/pdf/2001.08361.pdf", ")", "describe", "expected", "model", "performance", "based", "model", "size", ",", "dataset", "size", ",", "amount", "compute", "used", "training", ".", "*", "*", "*", "high-performance", "computing", "*", "*", ":", "scope", ",", "knowledge", "hpc", "fundamental", "'re", "planning", "create", "llm", "scratch", "(", "hardware", ",", "distributed", "workload", ",", "etc", ".", ")", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "llmdatahub", "]", "(", "http", ":", "//github.com/zjh-819/llmdatahub", ")", "junhao", "zhao", ":", "curated", "list", "datasets", "pre-training", ",", "fine-tuning", ",", "rlhf", ".", "*", "[", "training", "causal", "language", "model", "scratch", "]", "(", "http", ":", "//huggingface.co/learn/nlp-course/chapter7/6", "?", "fw=pt", ")", "hugging", "face", ":", "pre-train", "gpt-2", "model", "scratch", "using", "transformer", "library", ".", "*", "[", "tinyllama", "]", "(", "http", ":", "//github.com/jzhang38/tinyllama", ")", "zhang", "et", "al", ".", ":", "check", "project", "get", "good", "understanding", "llama", "model", "trained", "scratch", ".", "*", "[", "causal", "language", "modeling", "]", "(", "http", ":", "//huggingface.co/docs/transformers/tasks/language_modeling", ")", "hugging", "face", ":", "explain", "difference", "causal", "masked", "language", "modeling", "quickly", "fine-tune", "distilgpt-2", "model", ".", "*", "[", "chinchilla", "'s", "wild", "implication", "]", "(", "http", ":", "//www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications", ")", "nostalgebraist", ":", "discus", "scaling", "law", "explain", "mean", "llm", "general", ".", "*", "[", "bloom", "]", "(", "http", ":", "//bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4", ")", "bigscience", ":", "notion", "page", "describes", "bloom", "model", "built", ",", "lot", "useful", "information", "engineering", "part", "problem", "encountered", ".", "*", "[", "opt-175", "logbook", "]", "(", "http", ":", "//github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf", ")", "meta", ":", "research", "log", "showing", "went", "wrong", "went", "right", ".", "useful", "'re", "planning", "pre-train", "large", "language", "model", "(", "case", ",", "175b", "parameter", ")", ".", "*", "[", "llm", "360", "]", "(", "http", ":", "//www.llm360.ai/", ")", ":", "framework", "open-source", "llm", "training", "data", "preparation", "code", ",", "data", ",", "metric", ",", "model", ".", "--", "-" ], [ "3 .", "pre-training model pre-training long costly process , focus course .", "'s good level understanding happens pre-training , hands-on experience required .", "* * * data pipeline * * : pre-training requires huge datasets ( e.g. , [ llama 2 ] ( http : //arxiv.org/abs/2307.09288 ) trained 2 trillion token ) need filtered , tokenized , collated pre-defined vocabulary .", "* * * causal language modeling * * : learn difference causal masked language modeling , well loss function used case .", "efficient pre-training , learn [ megatron-lm ] ( http : //github.com/nvidia/megatron-lm ) [ gpt-neox ] ( http : //github.com/eleutherai/gpt-neox ) .", "* * * scaling law * * : [ scaling law ] ( http : //arxiv.org/pdf/2001.08361.pdf ) describe expected model performance based model size , dataset size , amount compute used training .", "* * * high-performance computing * * : scope , knowledge hpc fundamental 're planning create llm scratch ( hardware , distributed workload , etc . ) .", "📚 * * reference * * : * [ llmdatahub ] ( http : //github.com/zjh-819/llmdatahub ) junhao zhao : curated list datasets pre-training , fine-tuning , rlhf .", "* [ training causal language model scratch ] ( http : //huggingface.co/learn/nlp-course/chapter7/6 ? fw=pt ) hugging face : pre-train gpt-2 model scratch using transformer library .", "* [ tinyllama ] ( http : //github.com/jzhang38/tinyllama ) zhang et al .", ": check project get good understanding llama model trained scratch .", "* [ causal language modeling ] ( http : //huggingface.co/docs/transformers/tasks/language_modeling ) hugging face : explain difference causal masked language modeling quickly fine-tune distilgpt-2 model .", "* [ chinchilla 's wild implication ] ( http : //www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications ) nostalgebraist : discus scaling law explain mean llm general .", "* [ bloom ] ( http : //bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4 ) bigscience : notion page describes bloom model built , lot useful information engineering part problem encountered .", "* [ opt-175 logbook ] ( http : //github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf ) meta : research log showing went wrong went right .", "useful 're planning pre-train large language model ( case , 175b parameter ) .", "* [ llm 360 ] ( http : //www.llm360.ai/ ) : framework open-source llm training data preparation code , data , metric , model .", "-- -" ] ]
[ [ "3", ".", "pre-training", "model", "pre-training", "long", "costly", "process", ",", "focus", "course", ".", "'s", "good", "level", "understanding", "happens", "pre-training", ",", "hands-on", "experience", "required", ".", "*", "*", "*", "data", "pipeline", "*", "*", ":", "pre-training", "requires", "huge", "datasets", "(", "e.g.", ",", "[", "llama", "2", "]", "(", "http", ":", "//arxiv.org/abs/2307.09288", ")", "trained", "2", "trillion", "token", ")", "need", "filtered", ",", "tokenized", ",", "collated", "pre-defined", "vocabulary", ".", "*", "*", "*", "causal", "language", "modeling", "*", "*", ":", "learn", "difference", "causal", "masked", "language", "modeling", ",", "well", "loss", "function", "used", "case", ".", "efficient", "pre-training", ",", "learn", "[", "megatron-lm", "]", "(", "http", ":", "//github.com/nvidia/megatron-lm", ")", "[", "gpt-neox", "]", "(", "http", ":", "//github.com/eleutherai/gpt-neox", ")", ".", "*", "*", "*", "scaling", "law", "*", "*", ":", "[", "scaling", "law", "]", "(", "http", ":", "//arxiv.org/pdf/2001.08361.pdf", ")", "describe", "expected", "model", "performance", "based", "model", "size", ",", "dataset", "size", ",", "amount", "compute", "used", "training", ".", "*", "*", "*", "high-performance", "computing", "*", "*", ":", "scope", ",", "knowledge", "hpc", "fundamental", "'re", "planning", "create", "llm", "scratch", "(", "hardware", ",", "distributed", "workload", ",", "etc", ".", ")", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "llmdatahub", "]", "(", "http", ":", "//github.com/zjh-819/llmdatahub", ")", "junhao", "zhao", ":", "curated", "list", "datasets", "pre-training", ",", "fine-tuning", ",", "rlhf", ".", "*", "[", "training", "causal", "language", "model", "scratch", "]", "(", "http", ":", "//huggingface.co/learn/nlp-course/chapter7/6", "?", "fw=pt", ")", "hugging", "face", ":", "pre-train", "gpt-2", "model", "scratch", "using", "transformer", "library", ".", "*", "[", "tinyllama", "]", "(", "http", ":", "//github.com/jzhang38/tinyllama", ")", "zhang", "et", "al", ".", ":", "check", "project", "get", "good", "understanding", "llama", "model", "trained", "scratch", ".", "*", "[", "causal", "language", "modeling", "]", "(", "http", ":", "//huggingface.co/docs/transformers/tasks/language_modeling", ")", "hugging", "face", ":", "explain", "difference", "causal", "masked", "language", "modeling", "quickly", "fine-tune", "distilgpt-2", "model", ".", "*", "[", "chinchilla", "'s", "wild", "implication", "]", "(", "http", ":", "//www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications", ")", "nostalgebraist", ":", "discus", "scaling", "law", "explain", "mean", "llm", "general", ".", "*", "[", "bloom", "]", "(", "http", ":", "//bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4", ")", "bigscience", ":", "notion", "page", "describes", "bloom", "model", "built", ",", "lot", "useful", "information", "engineering", "part", "problem", "encountered", ".", "*", "[", "opt-175", "logbook", "]", "(", "http", ":", "//github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf", ")", "meta", ":", "research", "log", "showing", "went", "wrong", "went", "right", ".", "useful", "'re", "planning", "pre-train", "large", "language", "model", "(", "case", ",", "175b", "parameter", ")", ".", "*", "[", "llm", "360", "]", "(", "http", ":", "//www.llm360.ai/", ")", ":", "framework", "open-source", "llm", "training", "data", "preparation", "code", ",", "data", ",", "metric", ",", "model", ".", "--", "-" ], [ "3 .", "pre-training model pre-training long costly process , focus course .", "'s good level understanding happens pre-training , hands-on experience required .", "* * * data pipeline * * : pre-training requires huge datasets ( e.g. , [ llama 2 ] ( http : //arxiv.org/abs/2307.09288 ) trained 2 trillion token ) need filtered , tokenized , collated pre-defined vocabulary .", "* * * causal language modeling * * : learn difference causal masked language modeling , well loss function used case .", "efficient pre-training , learn [ megatron-lm ] ( http : //github.com/nvidia/megatron-lm ) [ gpt-neox ] ( http : //github.com/eleutherai/gpt-neox ) .", "* * * scaling law * * : [ scaling law ] ( http : //arxiv.org/pdf/2001.08361.pdf ) describe expected model performance based model size , dataset size , amount compute used training .", "* * * high-performance computing * * : scope , knowledge hpc fundamental 're planning create llm scratch ( hardware , distributed workload , etc . ) .", "📚 * * reference * * : * [ llmdatahub ] ( http : //github.com/zjh-819/llmdatahub ) junhao zhao : curated list datasets pre-training , fine-tuning , rlhf .", "* [ training causal language model scratch ] ( http : //huggingface.co/learn/nlp-course/chapter7/6 ? fw=pt ) hugging face : pre-train gpt-2 model scratch using transformer library .", "* [ tinyllama ] ( http : //github.com/jzhang38/tinyllama ) zhang et al .", ": check project get good understanding llama model trained scratch .", "* [ causal language modeling ] ( http : //huggingface.co/docs/transformers/tasks/language_modeling ) hugging face : explain difference causal masked language modeling quickly fine-tune distilgpt-2 model .", "* [ chinchilla 's wild implication ] ( http : //www.lesswrong.com/posts/6fpvch8rr29qlewnh/chinchilla-s-wild-implications ) nostalgebraist : discus scaling law explain mean llm general .", "* [ bloom ] ( http : //bigscience.notion.site/bloom-bigscience-176b-model-ad073ca07cdf479398d5f95d88e218c4 ) bigscience : notion page describes bloom model built , lot useful information engineering part problem encountered .", "* [ opt-175 logbook ] ( http : //github.com/facebookresearch/metaseq/blob/main/projects/opt/chronicles/opt175b_logbook.pdf ) meta : research log showing went wrong went right .", "useful 're planning pre-train large language model ( case , 175b parameter ) .", "* [ llm 360 ] ( http : //www.llm360.ai/ ) : framework open-source llm training data preparation code , data , metric , model .", "-- -" ] ]
3. Pre-training models Pre-training is a very long and costly process, which is why this is not the focus of this course. It's good to have some level of understanding of what happens during pre-training, but hands-on experience is not required. * **Data pipeline**: Pre-training requires huge datasets (e.g., [Llama 2](https://arxiv.org/abs/2307.09288) was trained on 2 trillion tokens) that need to be filtered, tokenized, and collated with a pre-defined vocabulary. * **Causal language modeling**: Learn the difference between causal and masked language modeling, as well as the loss function used in this case. For efficient pre-training, learn more about [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) or [gpt-neox](https://github.com/EleutherAI/gpt-neox). * **Scaling laws**: The [scaling laws](https://arxiv.org/pdf/2001.08361.pdf) describe the expected model performance based on the model size, dataset size, and the amount of compute used for training. * **High-Performance Computing**: Out of scope here, but more knowledge about HPC is fundamental if you're planning to create your own LLM from scratch (hardware, distributed workload, etc.). 📚 **References**: * [LLMDataHub](https://github.com/Zjh-819/LLMDataHub) by Junhao Zhao: Curated list of datasets for pre-training, fine-tuning, and RLHF. * [Training a causal language model from scratch](https://huggingface.co/learn/nlp-course/chapter7/6?fw=pt) by Hugging Face: Pre-train a GPT-2 model from scratch using the transformers library. * [TinyLlama](https://github.com/jzhang38/TinyLlama) by Zhang et al.: Check this project to get a good understanding of how a Llama model is trained from scratch. * [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) by Hugging Face: Explain the difference between causal and masked language modeling and how to quickly fine-tune a DistilGPT-2 model. * [Chinchilla's wild implications](https://www.lesswrong.com/posts/6Fpvch8RR29qLEWNH/chinchilla-s-wild-implications) by nostalgebraist: Discuss the scaling laws and explain what they mean to LLMs in general. * [BLOOM](https://bigscience.notion.site/BLOOM-BigScience-176B-Model-ad073ca07cdf479398d5f95d88e218c4) by BigScience: Notion page that describes how the BLOOM model was built, with a lot of useful information about the engineering part and the problems that were encountered. * [OPT-175 Logbook](https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/chronicles/OPT175B_Logbook.pdf) by Meta: Research logs showing what went wrong and what went right. Useful if you're planning to pre-train a very large language model (in this case, 175B parameters). * [LLM 360](https://www.llm360.ai/): A framework for open-source LLMs with training and data preparation code, data, metrics, and models. ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "4", ".", "supervised", "fine-tuning", "pre-trained", "model", "trained", "next-token", "prediction", "task", ",", "'re", "helpful", "assistant", ".", "sft", "allows", "tweak", "respond", "instruction", ".", "moreover", ",", "allows", "fine-tune", "model", "data", "(", "private", ",", "seen", "gpt-4", ",", "etc", ".", ")", "use", "without", "pay", "api", "like", "openai", "'s", ".", "*", "*", "*", "full", "fine-tuning", "*", "*", ":", "full", "fine-tuning", "refers", "training", "parameter", "model", ".", "efficient", "technique", ",", "produce", "slightly", "better", "result", ".", "*", "[", "*", "*", "lora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ":", "parameter-efficient", "technique", "(", "peft", ")", "based", "low-rank", "adapter", ".", "instead", "training", "parameter", ",", "train", "adapter", ".", "*", "[", "*", "*", "qlora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ":", "another", "peft", "based", "lora", ",", "also", "quantizes", "weight", "model", "4", "bit", "introduce", "paged", "optimizers", "manage", "memory", "spike", ".", "combine", "[", "unsloth", "]", "(", "http", ":", "//github.com/unslothai/unsloth", ")", "run", "efficiently", "free", "colab", "notebook", ".", "*", "*", "*", "[", "axolotl", "]", "(", "http", ":", "//github.com/openaccess-ai-collective/axolotl", ")", "*", "*", ":", "user-friendly", "powerful", "fine-tuning", "tool", "used", "lot", "state-of-the-art", "open-source", "model", ".", "*", "[", "*", "*", "deepspeed", "*", "*", "]", "(", "http", ":", "//www.deepspeed.ai/", ")", ":", "efficient", "pre-training", "fine-tuning", "llm", "multi-gpu", "multi-node", "setting", "(", "implemented", "axolotl", ")", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "novice", "'s", "llm", "training", "guide", "]", "(", "http", ":", "//rentry.org/llm-training", ")", "alpin", ":", "overview", "main", "concept", "parameter", "consider", "fine-tuning", "llm", ".", "*", "[", "lora", "insight", "]", "(", "http", ":", "//lightning.ai/pages/community/lora-insights/", ")", "sebastian", "raschka", ":", "practical", "insight", "lora", "select", "best", "parameter", ".", "*", "[", "fine-tune", "llama", "2", "model", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html", ")", ":", "hands-on", "tutorial", "fine-tune", "llama", "2", "model", "using", "hugging", "face", "library", ".", "*", "[", "padding", "large", "language", "model", "]", "(", "http", ":", "//towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff", ")", "benjamin", "marie", ":", "best", "practice", "pad", "training", "example", "causal", "llm", "*", "[", "beginner", "'s", "guide", "llm", "fine-tuning", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html", ")", ":", "tutorial", "fine-tune", "codellama", "model", "using", "axolotl", ".", "--", "-" ], [ "4 .", "supervised fine-tuning pre-trained model trained next-token prediction task , 're helpful assistant .", "sft allows tweak respond instruction .", "moreover , allows fine-tune model data ( private , seen gpt-4 , etc . )", "use without pay api like openai 's .", "* * * full fine-tuning * * : full fine-tuning refers training parameter model .", "efficient technique , produce slightly better result .", "* [ * * lora * * ] ( http : //arxiv.org/abs/2106.09685 ) : parameter-efficient technique ( peft ) based low-rank adapter .", "instead training parameter , train adapter .", "* [ * * qlora * * ] ( http : //arxiv.org/abs/2305.14314 ) : another peft based lora , also quantizes weight model 4 bit introduce paged optimizers manage memory spike .", "combine [ unsloth ] ( http : //github.com/unslothai/unsloth ) run efficiently free colab notebook .", "* * * [ axolotl ] ( http : //github.com/openaccess-ai-collective/axolotl ) * * : user-friendly powerful fine-tuning tool used lot state-of-the-art open-source model .", "* [ * * deepspeed * * ] ( http : //www.deepspeed.ai/ ) : efficient pre-training fine-tuning llm multi-gpu multi-node setting ( implemented axolotl ) .", "📚 * * reference * * : * [ novice 's llm training guide ] ( http : //rentry.org/llm-training ) alpin : overview main concept parameter consider fine-tuning llm .", "* [ lora insight ] ( http : //lightning.ai/pages/community/lora-insights/ ) sebastian raschka : practical insight lora select best parameter .", "* [ fine-tune llama 2 model ] ( http : //mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html ) : hands-on tutorial fine-tune llama 2 model using hugging face library .", "* [ padding large language model ] ( http : //towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff ) benjamin marie : best practice pad training example causal llm * [ beginner 's guide llm fine-tuning ] ( http : //mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html ) : tutorial fine-tune codellama model using axolotl .", "-- -" ] ]
[ [ "4", ".", "supervised", "fine-tuning", "pre-trained", "model", "trained", "next-token", "prediction", "task", ",", "'re", "helpful", "assistant", ".", "sft", "allows", "tweak", "respond", "instruction", ".", "moreover", ",", "allows", "fine-tune", "model", "data", "(", "private", ",", "seen", "gpt-4", ",", "etc", ".", ")", "use", "without", "pay", "api", "like", "openai", "'s", ".", "*", "*", "*", "full", "fine-tuning", "*", "*", ":", "full", "fine-tuning", "refers", "training", "parameter", "model", ".", "efficient", "technique", ",", "produce", "slightly", "better", "result", ".", "*", "[", "*", "*", "lora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ":", "parameter-efficient", "technique", "(", "peft", ")", "based", "low-rank", "adapter", ".", "instead", "training", "parameter", ",", "train", "adapter", ".", "*", "[", "*", "*", "qlora", "*", "*", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ":", "another", "peft", "based", "lora", ",", "also", "quantizes", "weight", "model", "4", "bit", "introduce", "paged", "optimizers", "manage", "memory", "spike", ".", "combine", "[", "unsloth", "]", "(", "http", ":", "//github.com/unslothai/unsloth", ")", "run", "efficiently", "free", "colab", "notebook", ".", "*", "*", "*", "[", "axolotl", "]", "(", "http", ":", "//github.com/openaccess-ai-collective/axolotl", ")", "*", "*", ":", "user-friendly", "powerful", "fine-tuning", "tool", "used", "lot", "state-of-the-art", "open-source", "model", ".", "*", "[", "*", "*", "deepspeed", "*", "*", "]", "(", "http", ":", "//www.deepspeed.ai/", ")", ":", "efficient", "pre-training", "fine-tuning", "llm", "multi-gpu", "multi-node", "setting", "(", "implemented", "axolotl", ")", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "novice", "'s", "llm", "training", "guide", "]", "(", "http", ":", "//rentry.org/llm-training", ")", "alpin", ":", "overview", "main", "concept", "parameter", "consider", "fine-tuning", "llm", ".", "*", "[", "lora", "insight", "]", "(", "http", ":", "//lightning.ai/pages/community/lora-insights/", ")", "sebastian", "raschka", ":", "practical", "insight", "lora", "select", "best", "parameter", ".", "*", "[", "fine-tune", "llama", "2", "model", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html", ")", ":", "hands-on", "tutorial", "fine-tune", "llama", "2", "model", "using", "hugging", "face", "library", ".", "*", "[", "padding", "large", "language", "model", "]", "(", "http", ":", "//towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff", ")", "benjamin", "marie", ":", "best", "practice", "pad", "training", "example", "causal", "llm", "*", "[", "beginner", "'s", "guide", "llm", "fine-tuning", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html", ")", ":", "tutorial", "fine-tune", "codellama", "model", "using", "axolotl", ".", "--", "-" ], [ "4 .", "supervised fine-tuning pre-trained model trained next-token prediction task , 're helpful assistant .", "sft allows tweak respond instruction .", "moreover , allows fine-tune model data ( private , seen gpt-4 , etc . )", "use without pay api like openai 's .", "* * * full fine-tuning * * : full fine-tuning refers training parameter model .", "efficient technique , produce slightly better result .", "* [ * * lora * * ] ( http : //arxiv.org/abs/2106.09685 ) : parameter-efficient technique ( peft ) based low-rank adapter .", "instead training parameter , train adapter .", "* [ * * qlora * * ] ( http : //arxiv.org/abs/2305.14314 ) : another peft based lora , also quantizes weight model 4 bit introduce paged optimizers manage memory spike .", "combine [ unsloth ] ( http : //github.com/unslothai/unsloth ) run efficiently free colab notebook .", "* * * [ axolotl ] ( http : //github.com/openaccess-ai-collective/axolotl ) * * : user-friendly powerful fine-tuning tool used lot state-of-the-art open-source model .", "* [ * * deepspeed * * ] ( http : //www.deepspeed.ai/ ) : efficient pre-training fine-tuning llm multi-gpu multi-node setting ( implemented axolotl ) .", "📚 * * reference * * : * [ novice 's llm training guide ] ( http : //rentry.org/llm-training ) alpin : overview main concept parameter consider fine-tuning llm .", "* [ lora insight ] ( http : //lightning.ai/pages/community/lora-insights/ ) sebastian raschka : practical insight lora select best parameter .", "* [ fine-tune llama 2 model ] ( http : //mlabonne.github.io/blog/posts/fine_tune_your_own_llama_2_model_in_a_colab_notebook.html ) : hands-on tutorial fine-tune llama 2 model using hugging face library .", "* [ padding large language model ] ( http : //towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff ) benjamin marie : best practice pad training example causal llm * [ beginner 's guide llm fine-tuning ] ( http : //mlabonne.github.io/blog/posts/a_beginners_guide_to_llm_finetuning.html ) : tutorial fine-tune codellama model using axolotl .", "-- -" ] ]
4. Supervised Fine-Tuning Pre-trained models are only trained on a next-token prediction task, which is why they're not helpful assistants. SFT allows you to tweak them to respond to instructions. Moreover, it allows you to fine-tune your model on any data (private, not seen by GPT-4, etc.) and use it without having to pay for an API like OpenAI's. * **Full fine-tuning**: Full fine-tuning refers to training all the parameters in the model. It is not an efficient technique, but it produces slightly better results. * [**LoRA**](https://arxiv.org/abs/2106.09685): A parameter-efficient technique (PEFT) based on low-rank adapters. Instead of training all the parameters, we only train these adapters. * [**QLoRA**](https://arxiv.org/abs/2305.14314): Another PEFT based on LoRA, which also quantizes the weights of the model in 4 bits and introduce paged optimizers to manage memory spikes. Combine it with [Unsloth](https://github.com/unslothai/unsloth) to run it efficiently on a free Colab notebook. * **[Axolotl](https://github.com/OpenAccess-AI-Collective/axolotl)**: A user-friendly and powerful fine-tuning tool that is used in a lot of state-of-the-art open-source models. * [**DeepSpeed**](https://www.deepspeed.ai/): Efficient pre-training and fine-tuning of LLMs for multi-GPU and multi-node settings (implemented in Axolotl). 📚 **References**: * [The Novice's LLM Training Guide](https://rentry.org/llm-training) by Alpin: Overview of the main concepts and parameters to consider when fine-tuning LLMs. * [LoRA insights](https://lightning.ai/pages/community/lora-insights/) by Sebastian Raschka: Practical insights about LoRA and how to select the best parameters. * [Fine-Tune Your Own Llama 2 Model](https://mlabonne.github.io/blog/posts/Fine_Tune_Your_Own_Llama_2_Model_in_a_Colab_Notebook.html): Hands-on tutorial on how to fine-tune a Llama 2 model using Hugging Face libraries. * [Padding Large Language Models](https://towardsdatascience.com/padding-large-language-models-examples-with-llama-2-199fb10df8ff) by Benjamin Marie: Best practices to pad training examples for causal LLMs * [A Beginner's Guide to LLM Fine-Tuning](https://mlabonne.github.io/blog/posts/A_Beginners_Guide_to_LLM_Finetuning.html): Tutorial on how to fine-tune a CodeLlama model using Axolotl. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "6", ".", "evaluation", "evaluating", "llm", "undervalued", "part", "pipeline", ",", "time-consuming", "moderately", "reliable", ".", "downstream", "task", "dictate", "want", "evaluate", ",", "always", "remember", "goodhart", "'s", "law", ":", "``", "measure", "becomes", "target", ",", "cease", "good", "measure", ".", "''", "*", "*", "*", "traditional", "metric", "*", "*", ":", "metric", "like", "perplexity", "bleu", "score", "popular", "'re", "flawed", "context", ".", "still", "important", "understand", "applied", ".", "*", "*", "*", "general", "benchmark", "*", "*", ":", "based", "[", "language", "model", "evaluation", "harness", "]", "(", "http", ":", "//github.com/eleutherai/lm-evaluation-harness", ")", ",", "[", "open", "llm", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard", ")", "main", "benchmark", "general-purpose", "llm", "(", "like", "chatgpt", ")", ".", "popular", "benchmark", "like", "[", "bigbench", "]", "(", "http", ":", "//github.com/google/big-bench", ")", ",", "[", "mt-bench", "]", "(", "http", ":", "//arxiv.org/abs/2306.05685", ")", ",", "etc", ".", "*", "*", "*", "task-specific", "benchmark", "*", "*", ":", "task", "like", "summarization", ",", "translation", ",", "question", "answering", "dedicated", "benchmark", ",", "metric", ",", "even", "subdomains", "(", "medical", ",", "financial", ",", "etc", ".", ")", ",", "[", "pubmedqa", "]", "(", "http", ":", "//pubmedqa.github.io/", ")", "biomedical", "question", "answering", ".", "*", "*", "*", "human", "evaluation", "*", "*", ":", "reliable", "evaluation", "acceptance", "rate", "user", "comparison", "made", "human", ".", "want", "know", "model", "performs", "well", ",", "simplest", "surest", "way", "use", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "perplexity", "fixed-length", "model", "]", "(", "http", ":", "//huggingface.co/docs/transformers/perplexity", ")", "hugging", "face", ":", "overview", "perplexity", "code", "implement", "transformer", "library", ".", "*", "[", "bleu", "risk", "]", "(", "http", ":", "//towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ")", "rachael", "tatman", ":", "overview", "bleu", "score", "many", "issue", "example", ".", "*", "[", "survey", "evaluation", "llm", "]", "(", "http", ":", "//arxiv.org/abs/2307.03109", ")", "chang", "et", "al", ".", ":", "comprehensive", "paper", "evaluate", ",", "evaluate", ",", "evaluate", ".", "*", "[", "chatbot", "arena", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/lmsys/chatbot-arena-leaderboard", ")", "lmsys", ":", "elo", "rating", "general-purpose", "llm", ",", "based", "comparison", "made", "human", ".", "--", "-" ], [ "6 .", "evaluation evaluating llm undervalued part pipeline , time-consuming moderately reliable .", "downstream task dictate want evaluate , always remember goodhart 's law : `` measure becomes target , cease good measure . ''", "* * * traditional metric * * : metric like perplexity bleu score popular 're flawed context .", "still important understand applied .", "* * * general benchmark * * : based [ language model evaluation harness ] ( http : //github.com/eleutherai/lm-evaluation-harness ) , [ open llm leaderboard ] ( http : //huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard ) main benchmark general-purpose llm ( like chatgpt ) .", "popular benchmark like [ bigbench ] ( http : //github.com/google/big-bench ) , [ mt-bench ] ( http : //arxiv.org/abs/2306.05685 ) , etc .", "* * * task-specific benchmark * * : task like summarization , translation , question answering dedicated benchmark , metric , even subdomains ( medical , financial , etc .", ") , [ pubmedqa ] ( http : //pubmedqa.github.io/ ) biomedical question answering .", "* * * human evaluation * * : reliable evaluation acceptance rate user comparison made human .", "want know model performs well , simplest surest way use .", "📚 * * reference * * : * [ perplexity fixed-length model ] ( http : //huggingface.co/docs/transformers/perplexity ) hugging face : overview perplexity code implement transformer library .", "* [ bleu risk ] ( http : //towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213 ) rachael tatman : overview bleu score many issue example .", "* [ survey evaluation llm ] ( http : //arxiv.org/abs/2307.03109 ) chang et al .", ": comprehensive paper evaluate , evaluate , evaluate .", "* [ chatbot arena leaderboard ] ( http : //huggingface.co/spaces/lmsys/chatbot-arena-leaderboard ) lmsys : elo rating general-purpose llm , based comparison made human .", "-- -" ] ]
[ [ "6", ".", "evaluation", "evaluating", "llm", "undervalued", "part", "pipeline", ",", "time-consuming", "moderately", "reliable", ".", "downstream", "task", "dictate", "want", "evaluate", ",", "always", "remember", "goodhart", "'s", "law", ":", "``", "measure", "becomes", "target", ",", "cease", "good", "measure", ".", "''", "*", "*", "*", "traditional", "metric", "*", "*", ":", "metric", "like", "perplexity", "bleu", "score", "popular", "'re", "flawed", "context", ".", "still", "important", "understand", "applied", ".", "*", "*", "*", "general", "benchmark", "*", "*", ":", "based", "[", "language", "model", "evaluation", "harness", "]", "(", "http", ":", "//github.com/eleutherai/lm-evaluation-harness", ")", ",", "[", "open", "llm", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard", ")", "main", "benchmark", "general-purpose", "llm", "(", "like", "chatgpt", ")", ".", "popular", "benchmark", "like", "[", "bigbench", "]", "(", "http", ":", "//github.com/google/big-bench", ")", ",", "[", "mt-bench", "]", "(", "http", ":", "//arxiv.org/abs/2306.05685", ")", ",", "etc", ".", "*", "*", "*", "task-specific", "benchmark", "*", "*", ":", "task", "like", "summarization", ",", "translation", ",", "question", "answering", "dedicated", "benchmark", ",", "metric", ",", "even", "subdomains", "(", "medical", ",", "financial", ",", "etc", ".", ")", ",", "[", "pubmedqa", "]", "(", "http", ":", "//pubmedqa.github.io/", ")", "biomedical", "question", "answering", ".", "*", "*", "*", "human", "evaluation", "*", "*", ":", "reliable", "evaluation", "acceptance", "rate", "user", "comparison", "made", "human", ".", "want", "know", "model", "performs", "well", ",", "simplest", "surest", "way", "use", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "perplexity", "fixed-length", "model", "]", "(", "http", ":", "//huggingface.co/docs/transformers/perplexity", ")", "hugging", "face", ":", "overview", "perplexity", "code", "implement", "transformer", "library", ".", "*", "[", "bleu", "risk", "]", "(", "http", ":", "//towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ")", "rachael", "tatman", ":", "overview", "bleu", "score", "many", "issue", "example", ".", "*", "[", "survey", "evaluation", "llm", "]", "(", "http", ":", "//arxiv.org/abs/2307.03109", ")", "chang", "et", "al", ".", ":", "comprehensive", "paper", "evaluate", ",", "evaluate", ",", "evaluate", ".", "*", "[", "chatbot", "arena", "leaderboard", "]", "(", "http", ":", "//huggingface.co/spaces/lmsys/chatbot-arena-leaderboard", ")", "lmsys", ":", "elo", "rating", "general-purpose", "llm", ",", "based", "comparison", "made", "human", ".", "--", "-" ], [ "6 .", "evaluation evaluating llm undervalued part pipeline , time-consuming moderately reliable .", "downstream task dictate want evaluate , always remember goodhart 's law : `` measure becomes target , cease good measure . ''", "* * * traditional metric * * : metric like perplexity bleu score popular 're flawed context .", "still important understand applied .", "* * * general benchmark * * : based [ language model evaluation harness ] ( http : //github.com/eleutherai/lm-evaluation-harness ) , [ open llm leaderboard ] ( http : //huggingface.co/spaces/huggingfaceh4/open_llm_leaderboard ) main benchmark general-purpose llm ( like chatgpt ) .", "popular benchmark like [ bigbench ] ( http : //github.com/google/big-bench ) , [ mt-bench ] ( http : //arxiv.org/abs/2306.05685 ) , etc .", "* * * task-specific benchmark * * : task like summarization , translation , question answering dedicated benchmark , metric , even subdomains ( medical , financial , etc .", ") , [ pubmedqa ] ( http : //pubmedqa.github.io/ ) biomedical question answering .", "* * * human evaluation * * : reliable evaluation acceptance rate user comparison made human .", "want know model performs well , simplest surest way use .", "📚 * * reference * * : * [ perplexity fixed-length model ] ( http : //huggingface.co/docs/transformers/perplexity ) hugging face : overview perplexity code implement transformer library .", "* [ bleu risk ] ( http : //towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213 ) rachael tatman : overview bleu score many issue example .", "* [ survey evaluation llm ] ( http : //arxiv.org/abs/2307.03109 ) chang et al .", ": comprehensive paper evaluate , evaluate , evaluate .", "* [ chatbot arena leaderboard ] ( http : //huggingface.co/spaces/lmsys/chatbot-arena-leaderboard ) lmsys : elo rating general-purpose llm , based comparison made human .", "-- -" ] ]
6. Evaluation Evaluating LLMs is an undervalued part of the pipeline, which is time-consuming and moderately reliable. Your downstream task should dictate what you want to evaluate, but always remember Goodhart's law: "When a measure becomes a target, it ceases to be a good measure." * **Traditional metrics**: Metrics like perplexity and BLEU score are not as popular as they were because they're flawed in most contexts. It is still important to understand them and when they can be applied. * **General benchmarks**: Based on the [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) is the main benchmark for general-purpose LLMs (like ChatGPT). There are other popular benchmarks like [BigBench](https://github.com/google/BIG-bench), [MT-Bench](https://arxiv.org/abs/2306.05685), etc. * **Task-specific benchmarks**: Tasks like summarization, translation, and question answering have dedicated benchmarks, metrics, and even subdomains (medical, financial, etc.), such as [PubMedQA](https://pubmedqa.github.io/) for biomedical question answering. * **Human evaluation**: The most reliable evaluation is the acceptance rate by users or comparisons made by humans. If you want to know if a model performs well, the simplest but surest way is to use it yourself. 📚 **References**: * [Perplexity of fixed-length models](https://huggingface.co/docs/transformers/perplexity) by Hugging Face: Overview of perplexity with code to implement it with the transformers library. * [BLEU at your own risk](https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213) by Rachael Tatman: Overview of the BLEU score and its many issues with examples. * [A Survey on Evaluation of LLMs](https://arxiv.org/abs/2307.03109) by Chang et al.: Comprehensive paper about what to evaluate, where to evaluate, and how to evaluate. * [Chatbot Arena Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) by lmsys: Elo rating of general-purpose LLMs, based on comparisons made by humans. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "7", ".", "quantization", "quantization", "process", "converting", "weight", "(", "activation", ")", "model", "using", "lower", "precision", ".", "example", ",", "weight", "stored", "using", "16", "bit", "converted", "4-bit", "representation", ".", "technique", "become", "increasingly", "important", "reduce", "computational", "memory", "cost", "associated", "llm", ".", "*", "*", "*", "base", "technique", "*", "*", ":", "learn", "different", "level", "precision", "(", "fp32", ",", "fp16", ",", "int8", ",", "etc", ".", ")", "perform", "naïve", "quantization", "absmax", "zero-point", "technique", ".", "*", "*", "*", "gguf", "llama.cpp", "*", "*", ":", "originally", "designed", "run", "cpu", ",", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "gguf", "format", "become", "popular", "tool", "run", "llm", "consumer-grade", "hardware", ".", "*", "*", "*", "gptq", "exl2", "*", "*", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "specifically", ",", "[", "exl2", "]", "(", "http", ":", "//github.com/turboderp/exllamav2", ")", "format", "offer", "incredible", "speed", "run", "gpus", ".", "model", "also", "take", "long", "time", "quantized", ".", "*", "*", "*", "awq", "*", "*", ":", "new", "format", "accurate", "gptq", "(", "lower", "perplexity", ")", "us", "lot", "vram", "necessarily", "faster", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "introduction", "quantization", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "overview", "quantization", ",", "absmax", "zero-point", "quantization", ",", "llm.int8", "(", ")", "code", ".", "*", "[", "quantize", "llama", "model", "llama.cpp", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html", ")", ":", "tutorial", "quantize", "llama", "2", "model", "using", "llama.cpp", "gguf", "format", ".", "*", "[", "4-bit", "llm", "quantization", "gptq", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "tutorial", "quantize", "llm", "using", "gptq", "algorithm", "autogptq", ".", "*", "[", "exllamav2", ":", "fastest", "library", "run", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run", "%", "c2", "%", "a0llms.html", ")", ":", "guide", "quantize", "mistral", "model", "using", "exl2", "format", "run", "exllamav2", "library", ".", "*", "[", "understanding", "activation-aware", "weight", "quantization", "]", "(", "http", ":", "//medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8", ")", "friendliai", ":", "overview", "awq", "technique", "benefit", ".", "--", "-" ], [ "7 .", "quantization quantization process converting weight ( activation ) model using lower precision .", "example , weight stored using 16 bit converted 4-bit representation .", "technique become increasingly important reduce computational memory cost associated llm .", "* * * base technique * * : learn different level precision ( fp32 , fp16 , int8 , etc . )", "perform naïve quantization absmax zero-point technique .", "* * * gguf llama.cpp * * : originally designed run cpu , [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) gguf format become popular tool run llm consumer-grade hardware .", "* * * gptq exl2 * * : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , specifically , [ exl2 ] ( http : //github.com/turboderp/exllamav2 ) format offer incredible speed run gpus .", "model also take long time quantized .", "* * * awq * * : new format accurate gptq ( lower perplexity ) us lot vram necessarily faster .", "📚 * * reference * * : * [ introduction quantization ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : overview quantization , absmax zero-point quantization , llm.int8 ( ) code .", "* [ quantize llama model llama.cpp ] ( http : //mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html ) : tutorial quantize llama 2 model using llama.cpp gguf format .", "* [ 4-bit llm quantization gptq ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : tutorial quantize llm using gptq algorithm autogptq .", "* [ exllamav2 : fastest library run llm ] ( http : //mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run % c2 % a0llms.html ) : guide quantize mistral model using exl2 format run exllamav2 library .", "* [ understanding activation-aware weight quantization ] ( http : //medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8 ) friendliai : overview awq technique benefit .", "-- -" ] ]
[ [ "7", ".", "quantization", "quantization", "process", "converting", "weight", "(", "activation", ")", "model", "using", "lower", "precision", ".", "example", ",", "weight", "stored", "using", "16", "bit", "converted", "4-bit", "representation", ".", "technique", "become", "increasingly", "important", "reduce", "computational", "memory", "cost", "associated", "llm", ".", "*", "*", "*", "base", "technique", "*", "*", ":", "learn", "different", "level", "precision", "(", "fp32", ",", "fp16", ",", "int8", ",", "etc", ".", ")", "perform", "naïve", "quantization", "absmax", "zero-point", "technique", ".", "*", "*", "*", "gguf", "llama.cpp", "*", "*", ":", "originally", "designed", "run", "cpu", ",", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "gguf", "format", "become", "popular", "tool", "run", "llm", "consumer-grade", "hardware", ".", "*", "*", "*", "gptq", "exl2", "*", "*", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "specifically", ",", "[", "exl2", "]", "(", "http", ":", "//github.com/turboderp/exllamav2", ")", "format", "offer", "incredible", "speed", "run", "gpus", ".", "model", "also", "take", "long", "time", "quantized", ".", "*", "*", "*", "awq", "*", "*", ":", "new", "format", "accurate", "gptq", "(", "lower", "perplexity", ")", "us", "lot", "vram", "necessarily", "faster", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "introduction", "quantization", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "overview", "quantization", ",", "absmax", "zero-point", "quantization", ",", "llm.int8", "(", ")", "code", ".", "*", "[", "quantize", "llama", "model", "llama.cpp", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html", ")", ":", "tutorial", "quantize", "llama", "2", "model", "using", "llama.cpp", "gguf", "format", ".", "*", "[", "4-bit", "llm", "quantization", "gptq", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html", ")", ":", "tutorial", "quantize", "llm", "using", "gptq", "algorithm", "autogptq", ".", "*", "[", "exllamav2", ":", "fastest", "library", "run", "llm", "]", "(", "http", ":", "//mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run", "%", "c2", "%", "a0llms.html", ")", ":", "guide", "quantize", "mistral", "model", "using", "exl2", "format", "run", "exllamav2", "library", ".", "*", "[", "understanding", "activation-aware", "weight", "quantization", "]", "(", "http", ":", "//medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8", ")", "friendliai", ":", "overview", "awq", "technique", "benefit", ".", "--", "-" ], [ "7 .", "quantization quantization process converting weight ( activation ) model using lower precision .", "example , weight stored using 16 bit converted 4-bit representation .", "technique become increasingly important reduce computational memory cost associated llm .", "* * * base technique * * : learn different level precision ( fp32 , fp16 , int8 , etc . )", "perform naïve quantization absmax zero-point technique .", "* * * gguf llama.cpp * * : originally designed run cpu , [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) gguf format become popular tool run llm consumer-grade hardware .", "* * * gptq exl2 * * : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , specifically , [ exl2 ] ( http : //github.com/turboderp/exllamav2 ) format offer incredible speed run gpus .", "model also take long time quantized .", "* * * awq * * : new format accurate gptq ( lower perplexity ) us lot vram necessarily faster .", "📚 * * reference * * : * [ introduction quantization ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : overview quantization , absmax zero-point quantization , llm.int8 ( ) code .", "* [ quantize llama model llama.cpp ] ( http : //mlabonne.github.io/blog/posts/quantize_llama_2_models_using_ggml.html ) : tutorial quantize llama 2 model using llama.cpp gguf format .", "* [ 4-bit llm quantization gptq ] ( http : //mlabonne.github.io/blog/posts/introduction_to_weight_quantization.html ) : tutorial quantize llm using gptq algorithm autogptq .", "* [ exllamav2 : fastest library run llm ] ( http : //mlabonne.github.io/blog/posts/exllamav2_the_fastest_library_to_run % c2 % a0llms.html ) : guide quantize mistral model using exl2 format run exllamav2 library .", "* [ understanding activation-aware weight quantization ] ( http : //medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8 ) friendliai : overview awq technique benefit .", "-- -" ] ]
7. Quantization Quantization is the process of converting the weights (and activations) of a model using a lower precision. For example, weights stored using 16 bits can be converted into a 4-bit representation. This technique has become increasingly important to reduce the computational and memory costs associated with LLMs. * **Base techniques**: Learn the different levels of precision (FP32, FP16, INT8, etc.) and how to perform naïve quantization with absmax and zero-point techniques. * **GGUF and llama.cpp**: Originally designed to run on CPUs, [llama.cpp](https://github.com/ggerganov/llama.cpp) and the GGUF format have become the most popular tools to run LLMs on consumer-grade hardware. * **GPTQ and EXL2**: [GPTQ](https://arxiv.org/abs/2210.17323) and, more specifically, the [EXL2](https://github.com/turboderp/exllamav2) format offer an incredible speed but can only run on GPUs. Models also take a long time to be quantized. * **AWQ**: This new format is more accurate than GPTQ (lower perplexity) but uses a lot more VRAM and is not necessarily faster. 📚 **References**: * [Introduction to quantization](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html): Overview of quantization, absmax and zero-point quantization, and LLM.int8() with code. * [Quantize Llama models with llama.cpp](https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html): Tutorial on how to quantize a Llama 2 model using llama.cpp and the GGUF format. * [4-bit LLM Quantization with GPTQ](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html): Tutorial on how to quantize an LLM using the GPTQ algorithm with AutoGPTQ. * [ExLlamaV2: The Fastest Library to Run LLMs](https://mlabonne.github.io/blog/posts/ExLlamaV2_The_Fastest_Library_to_Run%C2%A0LLMs.html): Guide on how to quantize a Mistral model using the EXL2 format and run it with the ExLlamaV2 library. * [Understanding Activation-Aware Weight Quantization](https://medium.com/friendliai/understanding-activation-aware-weight-quantization-awq-boosting-inference-serving-efficiency-in-10bb0faf63a8) by FriendliAI: Overview of the AWQ technique and its benefits. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "👷", "llm", "engineer", "section", "course", "focus", "learning", "build", "llm-powered", "application", "used", "production", ",", "focus", "augmenting", "model", "deploying", ".", "!", "[", "]", "(", "img/roadmap_engineer.png", ")" ], [ "👷 llm engineer section course focus learning build llm-powered application used production , focus augmenting model deploying .", "!", "[ ] ( img/roadmap_engineer.png )" ] ]
[ [ "👷", "llm", "engineer", "section", "course", "focus", "learning", "build", "llm-powered", "application", "used", "production", ",", "focus", "augmenting", "model", "deploying", ".", "!", "[", "]", "(", "img/roadmap_engineer.png", ")" ], [ "👷 llm engineer section course focus learning build llm-powered application used production , focus augmenting model deploying .", "!", "[ ] ( img/roadmap_engineer.png )" ] ]
👷 The LLM Engineer This section of the course focuses on learning how to build LLM-powered applications that can be used in production, with a focus on augmenting models and deploying them. ![](img/roadmap_engineer.png)
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "1", ".", "running", "llm", "running", "llm", "difficult", "due", "high", "hardware", "requirement", ".", "depending", "use", "case", ",", "might", "want", "simply", "consume", "model", "api", "(", "like", "gpt-4", ")", "run", "locally", ".", "case", ",", "additional", "prompting", "guidance", "technique", "improve", "constrain", "output", "application", ".", "*", "*", "*", "llm", "apis", "*", "*", ":", "apis", "convenient", "way", "deploy", "llm", ".", "space", "divided", "private", "llm", "(", "[", "openai", "]", "(", "http", ":", "//platform.openai.com/", ")", ",", "[", "google", "]", "(", "http", ":", "//cloud.google.com/vertex-ai/docs/generative-ai/learn/overview", ")", ",", "[", "anthropic", "]", "(", "http", ":", "//docs.anthropic.com/claude/reference/getting-started-with-the-api", ")", ",", "[", "cohere", "]", "(", "http", ":", "//docs.cohere.com/docs", ")", ",", "etc", ".", ")", "open-source", "llm", "(", "[", "openrouter", "]", "(", "http", ":", "//openrouter.ai/", ")", ",", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/inference-api", ")", ",", "[", "together", "ai", "]", "(", "http", ":", "//www.together.ai/", ")", ",", "etc", ".", ")", ".", "*", "*", "*", "open-source", "llm", "*", "*", ":", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/models", ")", "great", "place", "find", "llm", ".", "directly", "run", "[", "hugging", "face", "space", "]", "(", "http", ":", "//huggingface.co/spaces", ")", ",", "download", "run", "locally", "apps", "like", "[", "lm", "studio", "]", "(", "http", ":", "//lmstudio.ai/", ")", "cli", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "[", "ollama", "]", "(", "http", ":", "//ollama.ai/", ")", ".", "*", "*", "*", "prompt", "engineering", "*", "*", ":", "common", "technique", "include", "zero-shot", "prompting", ",", "few-shot", "prompting", ",", "chain", "thought", ",", "react", ".", "work", "better", "bigger", "model", ",", "adapted", "smaller", "one", ".", "*", "*", "*", "structuring", "output", "*", "*", ":", "many", "task", "require", "structured", "output", ",", "like", "strict", "template", "json", "format", ".", "library", "like", "[", "lmql", "]", "(", "http", ":", "//lmql.ai/", ")", ",", "[", "outline", "]", "(", "http", ":", "//github.com/outlines-dev/outlines", ")", ",", "[", "guidance", "]", "(", "http", ":", "//github.com/guidance-ai/guidance", ")", ",", "etc", ".", "used", "guide", "generation", "respect", "given", "structure", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "run", "llm", "locally", "lm", "studio", "]", "(", "http", ":", "//www.kdnuggets.com/run-an-llm-locally-with-lm-studio", ")", "nisha", "arya", ":", "short", "guide", "use", "lm", "studio", ".", "*", "[", "prompt", "engineering", "guide", "]", "(", "http", ":", "//www.promptingguide.ai/", ")", "dair.ai", ":", "exhaustive", "list", "prompt", "technique", "example", "*", "[", "outline", "-", "quickstart", "]", "(", "http", ":", "//outlines-dev.github.io/outlines/quickstart/", ")", ":", "list", "guided", "generation", "technique", "enabled", "outline", ".", "*", "[", "lmql", "-", "overview", "]", "(", "http", ":", "//lmql.ai/docs/language/overview.html", ")", ":", "introduction", "lmql", "language", ".", "--", "-" ], [ "1 .", "running llm running llm difficult due high hardware requirement .", "depending use case , might want simply consume model api ( like gpt-4 ) run locally .", "case , additional prompting guidance technique improve constrain output application .", "* * * llm apis * * : apis convenient way deploy llm .", "space divided private llm ( [ openai ] ( http : //platform.openai.com/ ) , [ google ] ( http : //cloud.google.com/vertex-ai/docs/generative-ai/learn/overview ) , [ anthropic ] ( http : //docs.anthropic.com/claude/reference/getting-started-with-the-api ) , [ cohere ] ( http : //docs.cohere.com/docs ) , etc . )", "open-source llm ( [ openrouter ] ( http : //openrouter.ai/ ) , [ hugging face ] ( http : //huggingface.co/inference-api ) , [ together ai ] ( http : //www.together.ai/ ) , etc . ) .", "* * * open-source llm * * : [ hugging face hub ] ( http : //huggingface.co/models ) great place find llm .", "directly run [ hugging face space ] ( http : //huggingface.co/spaces ) , download run locally apps like [ lm studio ] ( http : //lmstudio.ai/ ) cli [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) [ ollama ] ( http : //ollama.ai/ ) .", "* * * prompt engineering * * : common technique include zero-shot prompting , few-shot prompting , chain thought , react .", "work better bigger model , adapted smaller one .", "* * * structuring output * * : many task require structured output , like strict template json format .", "library like [ lmql ] ( http : //lmql.ai/ ) , [ outline ] ( http : //github.com/outlines-dev/outlines ) , [ guidance ] ( http : //github.com/guidance-ai/guidance ) , etc .", "used guide generation respect given structure .", "📚 * * reference * * : * [ run llm locally lm studio ] ( http : //www.kdnuggets.com/run-an-llm-locally-with-lm-studio ) nisha arya : short guide use lm studio .", "* [ prompt engineering guide ] ( http : //www.promptingguide.ai/ ) dair.ai : exhaustive list prompt technique example * [ outline - quickstart ] ( http : //outlines-dev.github.io/outlines/quickstart/ ) : list guided generation technique enabled outline .", "* [ lmql - overview ] ( http : //lmql.ai/docs/language/overview.html ) : introduction lmql language .", "-- -" ] ]
[ [ "1", ".", "running", "llm", "running", "llm", "difficult", "due", "high", "hardware", "requirement", ".", "depending", "use", "case", ",", "might", "want", "simply", "consume", "model", "api", "(", "like", "gpt-4", ")", "run", "locally", ".", "case", ",", "additional", "prompting", "guidance", "technique", "improve", "constrain", "output", "application", ".", "*", "*", "*", "llm", "apis", "*", "*", ":", "apis", "convenient", "way", "deploy", "llm", ".", "space", "divided", "private", "llm", "(", "[", "openai", "]", "(", "http", ":", "//platform.openai.com/", ")", ",", "[", "google", "]", "(", "http", ":", "//cloud.google.com/vertex-ai/docs/generative-ai/learn/overview", ")", ",", "[", "anthropic", "]", "(", "http", ":", "//docs.anthropic.com/claude/reference/getting-started-with-the-api", ")", ",", "[", "cohere", "]", "(", "http", ":", "//docs.cohere.com/docs", ")", ",", "etc", ".", ")", "open-source", "llm", "(", "[", "openrouter", "]", "(", "http", ":", "//openrouter.ai/", ")", ",", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/inference-api", ")", ",", "[", "together", "ai", "]", "(", "http", ":", "//www.together.ai/", ")", ",", "etc", ".", ")", ".", "*", "*", "*", "open-source", "llm", "*", "*", ":", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/models", ")", "great", "place", "find", "llm", ".", "directly", "run", "[", "hugging", "face", "space", "]", "(", "http", ":", "//huggingface.co/spaces", ")", ",", "download", "run", "locally", "apps", "like", "[", "lm", "studio", "]", "(", "http", ":", "//lmstudio.ai/", ")", "cli", "[", "llama.cpp", "]", "(", "http", ":", "//github.com/ggerganov/llama.cpp", ")", "[", "ollama", "]", "(", "http", ":", "//ollama.ai/", ")", ".", "*", "*", "*", "prompt", "engineering", "*", "*", ":", "common", "technique", "include", "zero-shot", "prompting", ",", "few-shot", "prompting", ",", "chain", "thought", ",", "react", ".", "work", "better", "bigger", "model", ",", "adapted", "smaller", "one", ".", "*", "*", "*", "structuring", "output", "*", "*", ":", "many", "task", "require", "structured", "output", ",", "like", "strict", "template", "json", "format", ".", "library", "like", "[", "lmql", "]", "(", "http", ":", "//lmql.ai/", ")", ",", "[", "outline", "]", "(", "http", ":", "//github.com/outlines-dev/outlines", ")", ",", "[", "guidance", "]", "(", "http", ":", "//github.com/guidance-ai/guidance", ")", ",", "etc", ".", "used", "guide", "generation", "respect", "given", "structure", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "run", "llm", "locally", "lm", "studio", "]", "(", "http", ":", "//www.kdnuggets.com/run-an-llm-locally-with-lm-studio", ")", "nisha", "arya", ":", "short", "guide", "use", "lm", "studio", ".", "*", "[", "prompt", "engineering", "guide", "]", "(", "http", ":", "//www.promptingguide.ai/", ")", "dair.ai", ":", "exhaustive", "list", "prompt", "technique", "example", "*", "[", "outline", "-", "quickstart", "]", "(", "http", ":", "//outlines-dev.github.io/outlines/quickstart/", ")", ":", "list", "guided", "generation", "technique", "enabled", "outline", ".", "*", "[", "lmql", "-", "overview", "]", "(", "http", ":", "//lmql.ai/docs/language/overview.html", ")", ":", "introduction", "lmql", "language", ".", "--", "-" ], [ "1 .", "running llm running llm difficult due high hardware requirement .", "depending use case , might want simply consume model api ( like gpt-4 ) run locally .", "case , additional prompting guidance technique improve constrain output application .", "* * * llm apis * * : apis convenient way deploy llm .", "space divided private llm ( [ openai ] ( http : //platform.openai.com/ ) , [ google ] ( http : //cloud.google.com/vertex-ai/docs/generative-ai/learn/overview ) , [ anthropic ] ( http : //docs.anthropic.com/claude/reference/getting-started-with-the-api ) , [ cohere ] ( http : //docs.cohere.com/docs ) , etc . )", "open-source llm ( [ openrouter ] ( http : //openrouter.ai/ ) , [ hugging face ] ( http : //huggingface.co/inference-api ) , [ together ai ] ( http : //www.together.ai/ ) , etc . ) .", "* * * open-source llm * * : [ hugging face hub ] ( http : //huggingface.co/models ) great place find llm .", "directly run [ hugging face space ] ( http : //huggingface.co/spaces ) , download run locally apps like [ lm studio ] ( http : //lmstudio.ai/ ) cli [ llama.cpp ] ( http : //github.com/ggerganov/llama.cpp ) [ ollama ] ( http : //ollama.ai/ ) .", "* * * prompt engineering * * : common technique include zero-shot prompting , few-shot prompting , chain thought , react .", "work better bigger model , adapted smaller one .", "* * * structuring output * * : many task require structured output , like strict template json format .", "library like [ lmql ] ( http : //lmql.ai/ ) , [ outline ] ( http : //github.com/outlines-dev/outlines ) , [ guidance ] ( http : //github.com/guidance-ai/guidance ) , etc .", "used guide generation respect given structure .", "📚 * * reference * * : * [ run llm locally lm studio ] ( http : //www.kdnuggets.com/run-an-llm-locally-with-lm-studio ) nisha arya : short guide use lm studio .", "* [ prompt engineering guide ] ( http : //www.promptingguide.ai/ ) dair.ai : exhaustive list prompt technique example * [ outline - quickstart ] ( http : //outlines-dev.github.io/outlines/quickstart/ ) : list guided generation technique enabled outline .", "* [ lmql - overview ] ( http : //lmql.ai/docs/language/overview.html ) : introduction lmql language .", "-- -" ] ]
1. Running LLMs Running LLMs can be difficult due to high hardware requirements. Depending on your use case, you might want to simply consume a model through an API (like GPT-4) or run it locally. In any case, additional prompting and guidance techniques can improve and constrain the output for your applications. * **LLM APIs**: APIs are a convenient way to deploy LLMs. This space is divided between private LLMs ([OpenAI](https://platform.openai.com/), [Google](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview), [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api), [Cohere](https://docs.cohere.com/docs), etc.) and open-source LLMs ([OpenRouter](https://openrouter.ai/), [Hugging Face](https://huggingface.co/inference-api), [Together AI](https://www.together.ai/), etc.). * **Open-source LLMs**: The [Hugging Face Hub](https://huggingface.co/models) is a great place to find LLMs. You can directly run some of them in [Hugging Face Spaces](https://huggingface.co/spaces), or download and run them locally in apps like [LM Studio](https://lmstudio.ai/) or through the CLI with [llama.cpp](https://github.com/ggerganov/llama.cpp) or [Ollama](https://ollama.ai/). * **Prompt engineering**: Common techniques include zero-shot prompting, few-shot prompting, chain of thought, and ReAct. They work better with bigger models, but can be adapted to smaller ones. * **Structuring outputs**: Many tasks require a structured output, like a strict template or a JSON format. Libraries like [LMQL](https://lmql.ai/), [Outlines](https://github.com/outlines-dev/outlines), [Guidance](https://github.com/guidance-ai/guidance), etc. can be used to guide the generation and respect a given structure. 📚 **References**: * [Run an LLM locally with LM Studio](https://www.kdnuggets.com/run-an-llm-locally-with-lm-studio) by Nisha Arya: Short guide on how to use LM Studio. * [Prompt engineering guide](https://www.promptingguide.ai/) by DAIR.AI: Exhaustive list of prompt techniques with examples * [Outlines - Quickstart](https://outlines-dev.github.io/outlines/quickstart/): List of guided generation techniques enabled by Outlines. * [LMQL - Overview](https://lmql.ai/docs/language/overview.html): Introduction to the LMQL language. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "4", ".", "advanced", "rag", "real-life", "application", "require", "complex", "pipeline", ",", "including", "sql", "graph", "database", ",", "well", "automatically", "selecting", "relevant", "tool", "apis", ".", "advanced", "technique", "improve", "baseline", "solution", "provide", "additional", "feature", ".", "*", "*", "*", "query", "construction", "*", "*", ":", "structured", "data", "stored", "traditional", "database", "requires", "specific", "query", "language", "like", "sql", ",", "cypher", ",", "metadata", ",", "etc", ".", "directly", "translate", "user", "instruction", "query", "access", "data", "query", "construction", ".", "*", "*", "*", "agent", "tool", "*", "*", ":", "agent", "augment", "llm", "automatically", "selecting", "relevant", "tool", "provide", "answer", ".", "tool", "simple", "using", "google", "wikipedia", ",", "complex", "like", "python", "interpreter", "jira", ".", "*", "*", "*", "post-processing", "*", "*", ":", "final", "step", "process", "input", "fed", "llm", ".", "enhances", "relevance", "diversity", "document", "retrieved", "re-ranking", ",", "[", "rag-fusion", "]", "(", "http", ":", "//github.com/raudaschl/rag-fusion", ")", ",", "classification", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "langchain", "-", "query", "construction", "]", "(", "http", ":", "//blog.langchain.dev/query-construction/", ")", ":", "blog", "post", "different", "type", "query", "construction", ".", "*", "[", "langchain", "-", "sql", "]", "(", "http", ":", "//python.langchain.com/docs/use_cases/qa_structured/sql", ")", ":", "tutorial", "interact", "sql", "database", "llm", ",", "involving", "text-to-sql", "optional", "sql", "agent", ".", "*", "[", "pinecone", "-", "llm", "agent", "]", "(", "http", ":", "//www.pinecone.io/learn/series/langchain/langchain-agents/", ")", ":", "introduction", "agent", "tool", "different", "type", ".", "*", "[", "llm", "powered", "autonomous", "agent", "]", "(", "http", ":", "//lilianweng.github.io/posts/2023-06-23-agent/", ")", "lilian", "weng", ":", "theoretical", "article", "llm", "agent", ".", "*", "[", "langchain", "-", "openai", "'s", "rag", "]", "(", "http", ":", "//blog.langchain.dev/applying-openai-rag/", ")", ":", "overview", "rag", "strategy", "employed", "openai", ",", "including", "post-processing", ".", "--", "-" ], [ "4 .", "advanced rag real-life application require complex pipeline , including sql graph database , well automatically selecting relevant tool apis .", "advanced technique improve baseline solution provide additional feature .", "* * * query construction * * : structured data stored traditional database requires specific query language like sql , cypher , metadata , etc .", "directly translate user instruction query access data query construction .", "* * * agent tool * * : agent augment llm automatically selecting relevant tool provide answer .", "tool simple using google wikipedia , complex like python interpreter jira .", "* * * post-processing * * : final step process input fed llm .", "enhances relevance diversity document retrieved re-ranking , [ rag-fusion ] ( http : //github.com/raudaschl/rag-fusion ) , classification .", "📚 * * reference * * : * [ langchain - query construction ] ( http : //blog.langchain.dev/query-construction/ ) : blog post different type query construction .", "* [ langchain - sql ] ( http : //python.langchain.com/docs/use_cases/qa_structured/sql ) : tutorial interact sql database llm , involving text-to-sql optional sql agent .", "* [ pinecone - llm agent ] ( http : //www.pinecone.io/learn/series/langchain/langchain-agents/ ) : introduction agent tool different type .", "* [ llm powered autonomous agent ] ( http : //lilianweng.github.io/posts/2023-06-23-agent/ ) lilian weng : theoretical article llm agent .", "* [ langchain - openai 's rag ] ( http : //blog.langchain.dev/applying-openai-rag/ ) : overview rag strategy employed openai , including post-processing .", "-- -" ] ]
[ [ "4", ".", "advanced", "rag", "real-life", "application", "require", "complex", "pipeline", ",", "including", "sql", "graph", "database", ",", "well", "automatically", "selecting", "relevant", "tool", "apis", ".", "advanced", "technique", "improve", "baseline", "solution", "provide", "additional", "feature", ".", "*", "*", "*", "query", "construction", "*", "*", ":", "structured", "data", "stored", "traditional", "database", "requires", "specific", "query", "language", "like", "sql", ",", "cypher", ",", "metadata", ",", "etc", ".", "directly", "translate", "user", "instruction", "query", "access", "data", "query", "construction", ".", "*", "*", "*", "agent", "tool", "*", "*", ":", "agent", "augment", "llm", "automatically", "selecting", "relevant", "tool", "provide", "answer", ".", "tool", "simple", "using", "google", "wikipedia", ",", "complex", "like", "python", "interpreter", "jira", ".", "*", "*", "*", "post-processing", "*", "*", ":", "final", "step", "process", "input", "fed", "llm", ".", "enhances", "relevance", "diversity", "document", "retrieved", "re-ranking", ",", "[", "rag-fusion", "]", "(", "http", ":", "//github.com/raudaschl/rag-fusion", ")", ",", "classification", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "langchain", "-", "query", "construction", "]", "(", "http", ":", "//blog.langchain.dev/query-construction/", ")", ":", "blog", "post", "different", "type", "query", "construction", ".", "*", "[", "langchain", "-", "sql", "]", "(", "http", ":", "//python.langchain.com/docs/use_cases/qa_structured/sql", ")", ":", "tutorial", "interact", "sql", "database", "llm", ",", "involving", "text-to-sql", "optional", "sql", "agent", ".", "*", "[", "pinecone", "-", "llm", "agent", "]", "(", "http", ":", "//www.pinecone.io/learn/series/langchain/langchain-agents/", ")", ":", "introduction", "agent", "tool", "different", "type", ".", "*", "[", "llm", "powered", "autonomous", "agent", "]", "(", "http", ":", "//lilianweng.github.io/posts/2023-06-23-agent/", ")", "lilian", "weng", ":", "theoretical", "article", "llm", "agent", ".", "*", "[", "langchain", "-", "openai", "'s", "rag", "]", "(", "http", ":", "//blog.langchain.dev/applying-openai-rag/", ")", ":", "overview", "rag", "strategy", "employed", "openai", ",", "including", "post-processing", ".", "--", "-" ], [ "4 .", "advanced rag real-life application require complex pipeline , including sql graph database , well automatically selecting relevant tool apis .", "advanced technique improve baseline solution provide additional feature .", "* * * query construction * * : structured data stored traditional database requires specific query language like sql , cypher , metadata , etc .", "directly translate user instruction query access data query construction .", "* * * agent tool * * : agent augment llm automatically selecting relevant tool provide answer .", "tool simple using google wikipedia , complex like python interpreter jira .", "* * * post-processing * * : final step process input fed llm .", "enhances relevance diversity document retrieved re-ranking , [ rag-fusion ] ( http : //github.com/raudaschl/rag-fusion ) , classification .", "📚 * * reference * * : * [ langchain - query construction ] ( http : //blog.langchain.dev/query-construction/ ) : blog post different type query construction .", "* [ langchain - sql ] ( http : //python.langchain.com/docs/use_cases/qa_structured/sql ) : tutorial interact sql database llm , involving text-to-sql optional sql agent .", "* [ pinecone - llm agent ] ( http : //www.pinecone.io/learn/series/langchain/langchain-agents/ ) : introduction agent tool different type .", "* [ llm powered autonomous agent ] ( http : //lilianweng.github.io/posts/2023-06-23-agent/ ) lilian weng : theoretical article llm agent .", "* [ langchain - openai 's rag ] ( http : //blog.langchain.dev/applying-openai-rag/ ) : overview rag strategy employed openai , including post-processing .", "-- -" ] ]
4. Advanced RAG Real-life applications can require complex pipelines, including SQL or graph databases, as well as automatically selecting relevant tools and APIs. These advanced techniques can improve a baseline solution and provide additional features. * **Query construction**: Structured data stored in traditional databases requires a specific query language like SQL, Cypher, metadata, etc. We can directly translate the user instruction into a query to access the data with query construction. * **Agents and tools**: Agents augment LLMs by automatically selecting the most relevant tools to provide an answer. These tools can be as simple as using Google or Wikipedia, or more complex like a Python interpreter or Jira. * **Post-processing**: Final step that processes the inputs that are fed to the LLM. It enhances the relevance and diversity of documents retrieved with re-ranking, [RAG-fusion](https://github.com/Raudaschl/rag-fusion), and classification. 📚 **References**: * [LangChain - Query Construction](https://blog.langchain.dev/query-construction/): Blog post about different types of query construction. * [LangChain - SQL](https://python.langchain.com/docs/use_cases/qa_structured/sql): Tutorial on how to interact with SQL databases with LLMs, involving Text-to-SQL and an optional SQL agent. * [Pinecone - LLM agents](https://www.pinecone.io/learn/series/langchain/langchain-agents/): Introduction to agents and tools with different types. * [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) by Lilian Weng: More theoretical article about LLM agents. * [LangChain - OpenAI's RAG](https://blog.langchain.dev/applying-openai-rag/): Overview of the RAG strategies employed by OpenAI, including post-processing. ---
https://github.com/mlabonne/llm-course
2
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "5", ".", "inference", "optimization", "text", "generation", "costly", "process", "requires", "expensive", "hardware", ".", "addition", "quantization", ",", "various", "technique", "proposed", "maximize", "throughput", "reduce", "inference", "cost", ".", "*", "*", "*", "flash", "attention", "*", "*", ":", "optimization", "attention", "mechanism", "transform", "complexity", "quadratic", "linear", ",", "speeding", "training", "inference", ".", "*", "*", "*", "key-value", "cache", "*", "*", ":", "understand", "key-value", "cache", "improvement", "introduced", "[", "multi-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/1911.02150", ")", "(", "mqa", ")", "[", "grouped-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/2305.13245", ")", "(", "gqa", ")", ".", "*", "*", "*", "speculative", "decoding", "*", "*", ":", "use", "small", "model", "produce", "draft", "reviewed", "larger", "model", "speed", "text", "generation", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "gpu", "inference", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/perf_infer_gpu_one", ")", "hugging", "face", ":", "explain", "optimize", "inference", "gpus", ".", "*", "[", "llm", "inference", "]", "(", "http", ":", "//www.databricks.com/blog/llm-inference-performance-engineering-best-practices", ")", "databricks", ":", "best", "practice", "optimize", "llm", "inference", "production", ".", "*", "[", "optimizing", "llm", "speed", "memory", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/llm_tutorial_optimization", ")", "hugging", "face", ":", "explain", "three", "main", "technique", "optimize", "speed", "memory", ",", "namely", "quantization", ",", "flash", "attention", ",", "architectural", "innovation", ".", "*", "[", "assisted", "generation", "]", "(", "http", ":", "//huggingface.co/blog/assisted-generation", ")", "hugging", "face", ":", "hf", "'s", "version", "speculative", "decoding", ",", "'s", "interesting", "blog", "post", "work", "code", "implement", ".", "--", "-" ], [ "5 .", "inference optimization text generation costly process requires expensive hardware .", "addition quantization , various technique proposed maximize throughput reduce inference cost .", "* * * flash attention * * : optimization attention mechanism transform complexity quadratic linear , speeding training inference .", "* * * key-value cache * * : understand key-value cache improvement introduced [ multi-query attention ] ( http : //arxiv.org/abs/1911.02150 ) ( mqa ) [ grouped-query attention ] ( http : //arxiv.org/abs/2305.13245 ) ( gqa ) .", "* * * speculative decoding * * : use small model produce draft reviewed larger model speed text generation .", "📚 * * reference * * : * [ gpu inference ] ( http : //huggingface.co/docs/transformers/main/en/perf_infer_gpu_one ) hugging face : explain optimize inference gpus .", "* [ llm inference ] ( http : //www.databricks.com/blog/llm-inference-performance-engineering-best-practices ) databricks : best practice optimize llm inference production .", "* [ optimizing llm speed memory ] ( http : //huggingface.co/docs/transformers/main/en/llm_tutorial_optimization ) hugging face : explain three main technique optimize speed memory , namely quantization , flash attention , architectural innovation .", "* [ assisted generation ] ( http : //huggingface.co/blog/assisted-generation ) hugging face : hf 's version speculative decoding , 's interesting blog post work code implement .", "-- -" ] ]
[ [ "5", ".", "inference", "optimization", "text", "generation", "costly", "process", "requires", "expensive", "hardware", ".", "addition", "quantization", ",", "various", "technique", "proposed", "maximize", "throughput", "reduce", "inference", "cost", ".", "*", "*", "*", "flash", "attention", "*", "*", ":", "optimization", "attention", "mechanism", "transform", "complexity", "quadratic", "linear", ",", "speeding", "training", "inference", ".", "*", "*", "*", "key-value", "cache", "*", "*", ":", "understand", "key-value", "cache", "improvement", "introduced", "[", "multi-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/1911.02150", ")", "(", "mqa", ")", "[", "grouped-query", "attention", "]", "(", "http", ":", "//arxiv.org/abs/2305.13245", ")", "(", "gqa", ")", ".", "*", "*", "*", "speculative", "decoding", "*", "*", ":", "use", "small", "model", "produce", "draft", "reviewed", "larger", "model", "speed", "text", "generation", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "gpu", "inference", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/perf_infer_gpu_one", ")", "hugging", "face", ":", "explain", "optimize", "inference", "gpus", ".", "*", "[", "llm", "inference", "]", "(", "http", ":", "//www.databricks.com/blog/llm-inference-performance-engineering-best-practices", ")", "databricks", ":", "best", "practice", "optimize", "llm", "inference", "production", ".", "*", "[", "optimizing", "llm", "speed", "memory", "]", "(", "http", ":", "//huggingface.co/docs/transformers/main/en/llm_tutorial_optimization", ")", "hugging", "face", ":", "explain", "three", "main", "technique", "optimize", "speed", "memory", ",", "namely", "quantization", ",", "flash", "attention", ",", "architectural", "innovation", ".", "*", "[", "assisted", "generation", "]", "(", "http", ":", "//huggingface.co/blog/assisted-generation", ")", "hugging", "face", ":", "hf", "'s", "version", "speculative", "decoding", ",", "'s", "interesting", "blog", "post", "work", "code", "implement", ".", "--", "-" ], [ "5 .", "inference optimization text generation costly process requires expensive hardware .", "addition quantization , various technique proposed maximize throughput reduce inference cost .", "* * * flash attention * * : optimization attention mechanism transform complexity quadratic linear , speeding training inference .", "* * * key-value cache * * : understand key-value cache improvement introduced [ multi-query attention ] ( http : //arxiv.org/abs/1911.02150 ) ( mqa ) [ grouped-query attention ] ( http : //arxiv.org/abs/2305.13245 ) ( gqa ) .", "* * * speculative decoding * * : use small model produce draft reviewed larger model speed text generation .", "📚 * * reference * * : * [ gpu inference ] ( http : //huggingface.co/docs/transformers/main/en/perf_infer_gpu_one ) hugging face : explain optimize inference gpus .", "* [ llm inference ] ( http : //www.databricks.com/blog/llm-inference-performance-engineering-best-practices ) databricks : best practice optimize llm inference production .", "* [ optimizing llm speed memory ] ( http : //huggingface.co/docs/transformers/main/en/llm_tutorial_optimization ) hugging face : explain three main technique optimize speed memory , namely quantization , flash attention , architectural innovation .", "* [ assisted generation ] ( http : //huggingface.co/blog/assisted-generation ) hugging face : hf 's version speculative decoding , 's interesting blog post work code implement .", "-- -" ] ]
5. Inference optimization Text generation is a costly process that requires expensive hardware. In addition to quantization, various techniques have been proposed to maximize throughput and reduce inference costs. * **Flash Attention**: Optimization of the attention mechanism to transform its complexity from quadratic to linear, speeding up both training and inference. * **Key-value cache**: Understand the key-value cache and the improvements introduced in [Multi-Query Attention](https://arxiv.org/abs/1911.02150) (MQA) and [Grouped-Query Attention](https://arxiv.org/abs/2305.13245) (GQA). * **Speculative decoding**: Use a small model to produce drafts that are then reviewed by a larger model to speed up text generation. 📚 **References**: * [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one) by Hugging Face: Explain how to optimize inference on GPUs. * [LLM Inference](https://www.databricks.com/blog/llm-inference-performance-engineering-best-practices) by Databricks: Best practices for how to optimize LLM inference in production. * [Optimizing LLMs for Speed and Memory](https://huggingface.co/docs/transformers/main/en/llm_tutorial_optimization) by Hugging Face: Explain three main techniques to optimize speed and memory, namely quantization, Flash Attention, and architectural innovations. * [Assisted Generation](https://huggingface.co/blog/assisted-generation) by Hugging Face: HF's version of speculative decoding, it's an interesting blog post about how it works with code to implement it. ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/mlabonne/llm-course/main/README.md
[ [ "7", ".", "securing", "llm", "addition", "traditional", "security", "problem", "associated", "software", ",", "llm", "unique", "weakness", "due", "way", "trained", "prompted", ".", "*", "*", "*", "prompt", "hacking", "*", "*", ":", "different", "technique", "related", "prompt", "engineering", ",", "including", "prompt", "injection", "(", "additional", "instruction", "hijack", "model", "'s", "answer", ")", ",", "data/prompt", "leaking", "(", "retrieve", "original", "data/prompt", ")", ",", "jailbreaking", "(", "craft", "prompt", "bypass", "safety", "feature", ")", ".", "*", "*", "*", "backdoor", "*", "*", ":", "attack", "vector", "target", "training", "data", ",", "poisoning", "training", "data", "(", "e.g.", ",", "false", "information", ")", "creating", "backdoor", "(", "secret", "trigger", "change", "model", "'s", "behavior", "inference", ")", ".", "*", "*", "*", "defensive", "measure", "*", "*", ":", "best", "way", "protect", "llm", "application", "test", "vulnerability", "(", "e.g.", ",", "using", "red", "teaming", "check", "like", "[", "garak", "]", "(", "http", ":", "//github.com/leondz/garak/", ")", ")", "observe", "production", "(", "framework", "like", "[", "langfuse", "]", "(", "http", ":", "//github.com/langfuse/langfuse", ")", ")", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "owasp", "llm", "top", "10", "]", "(", "http", ":", "//owasp.org/www-project-top-10-for-large-language-model-applications/", ")", "hego", "wiki", ":", "list", "10", "critic", "vulnerability", "seen", "llm", "application", ".", "*", "[", "prompt", "injection", "primer", "]", "(", "http", ":", "//github.com/jthack/pipe", ")", "joseph", "thacker", ":", "short", "guide", "dedicated", "prompt", "injection", "engineer", ".", "*", "[", "llm", "security", "]", "(", "http", ":", "//llmsecurity.net/", ")", "[", "@", "llm_sec", "]", "(", "http", ":", "//twitter.com/llm_sec", ")", ":", "extensive", "list", "resource", "related", "llm", "security", ".", "*", "[", "red", "teaming", "llm", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming", ")", "microsoft", ":", "guide", "perform", "red", "teaming", "llm", ".", "--", "-" ], [ "7 .", "securing llm addition traditional security problem associated software , llm unique weakness due way trained prompted .", "* * * prompt hacking * * : different technique related prompt engineering , including prompt injection ( additional instruction hijack model 's answer ) , data/prompt leaking ( retrieve original data/prompt ) , jailbreaking ( craft prompt bypass safety feature ) .", "* * * backdoor * * : attack vector target training data , poisoning training data ( e.g. , false information ) creating backdoor ( secret trigger change model 's behavior inference ) .", "* * * defensive measure * * : best way protect llm application test vulnerability ( e.g. , using red teaming check like [ garak ] ( http : //github.com/leondz/garak/ ) ) observe production ( framework like [ langfuse ] ( http : //github.com/langfuse/langfuse ) ) .", "📚 * * reference * * : * [ owasp llm top 10 ] ( http : //owasp.org/www-project-top-10-for-large-language-model-applications/ ) hego wiki : list 10 critic vulnerability seen llm application .", "* [ prompt injection primer ] ( http : //github.com/jthack/pipe ) joseph thacker : short guide dedicated prompt injection engineer .", "* [ llm security ] ( http : //llmsecurity.net/ ) [ @ llm_sec ] ( http : //twitter.com/llm_sec ) : extensive list resource related llm security .", "* [ red teaming llm ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming ) microsoft : guide perform red teaming llm .", "-- -" ] ]
[ [ "7", ".", "securing", "llm", "addition", "traditional", "security", "problem", "associated", "software", ",", "llm", "unique", "weakness", "due", "way", "trained", "prompted", ".", "*", "*", "*", "prompt", "hacking", "*", "*", ":", "different", "technique", "related", "prompt", "engineering", ",", "including", "prompt", "injection", "(", "additional", "instruction", "hijack", "model", "'s", "answer", ")", ",", "data/prompt", "leaking", "(", "retrieve", "original", "data/prompt", ")", ",", "jailbreaking", "(", "craft", "prompt", "bypass", "safety", "feature", ")", ".", "*", "*", "*", "backdoor", "*", "*", ":", "attack", "vector", "target", "training", "data", ",", "poisoning", "training", "data", "(", "e.g.", ",", "false", "information", ")", "creating", "backdoor", "(", "secret", "trigger", "change", "model", "'s", "behavior", "inference", ")", ".", "*", "*", "*", "defensive", "measure", "*", "*", ":", "best", "way", "protect", "llm", "application", "test", "vulnerability", "(", "e.g.", ",", "using", "red", "teaming", "check", "like", "[", "garak", "]", "(", "http", ":", "//github.com/leondz/garak/", ")", ")", "observe", "production", "(", "framework", "like", "[", "langfuse", "]", "(", "http", ":", "//github.com/langfuse/langfuse", ")", ")", ".", "📚", "*", "*", "reference", "*", "*", ":", "*", "[", "owasp", "llm", "top", "10", "]", "(", "http", ":", "//owasp.org/www-project-top-10-for-large-language-model-applications/", ")", "hego", "wiki", ":", "list", "10", "critic", "vulnerability", "seen", "llm", "application", ".", "*", "[", "prompt", "injection", "primer", "]", "(", "http", ":", "//github.com/jthack/pipe", ")", "joseph", "thacker", ":", "short", "guide", "dedicated", "prompt", "injection", "engineer", ".", "*", "[", "llm", "security", "]", "(", "http", ":", "//llmsecurity.net/", ")", "[", "@", "llm_sec", "]", "(", "http", ":", "//twitter.com/llm_sec", ")", ":", "extensive", "list", "resource", "related", "llm", "security", ".", "*", "[", "red", "teaming", "llm", "]", "(", "http", ":", "//learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming", ")", "microsoft", ":", "guide", "perform", "red", "teaming", "llm", ".", "--", "-" ], [ "7 .", "securing llm addition traditional security problem associated software , llm unique weakness due way trained prompted .", "* * * prompt hacking * * : different technique related prompt engineering , including prompt injection ( additional instruction hijack model 's answer ) , data/prompt leaking ( retrieve original data/prompt ) , jailbreaking ( craft prompt bypass safety feature ) .", "* * * backdoor * * : attack vector target training data , poisoning training data ( e.g. , false information ) creating backdoor ( secret trigger change model 's behavior inference ) .", "* * * defensive measure * * : best way protect llm application test vulnerability ( e.g. , using red teaming check like [ garak ] ( http : //github.com/leondz/garak/ ) ) observe production ( framework like [ langfuse ] ( http : //github.com/langfuse/langfuse ) ) .", "📚 * * reference * * : * [ owasp llm top 10 ] ( http : //owasp.org/www-project-top-10-for-large-language-model-applications/ ) hego wiki : list 10 critic vulnerability seen llm application .", "* [ prompt injection primer ] ( http : //github.com/jthack/pipe ) joseph thacker : short guide dedicated prompt injection engineer .", "* [ llm security ] ( http : //llmsecurity.net/ ) [ @ llm_sec ] ( http : //twitter.com/llm_sec ) : extensive list resource related llm security .", "* [ red teaming llm ] ( http : //learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming ) microsoft : guide perform red teaming llm .", "-- -" ] ]
7. Securing LLMs In addition to traditional security problems associated with software, LLMs have unique weaknesses due to the way they are trained and prompted. * **Prompt hacking**: Different techniques related to prompt engineering, including prompt injection (additional instruction to hijack the model's answer), data/prompt leaking (retrieve its original data/prompt), and jailbreaking (craft prompts to bypass safety features). * **Backdoors**: Attack vectors can target the training data itself, by poisoning the training data (e.g., with false information) or creating backdoors (secret triggers to change the model's behavior during inference). * **Defensive measures**: The best way to protect your LLM applications is to test them against these vulnerabilities (e.g., using red teaming and checks like [garak](https://github.com/leondz/garak/)) and observe them in production (with a framework like [langfuse](https://github.com/langfuse/langfuse)). 📚 **References**: * [OWASP LLM Top 10](https://owasp.org/www-project-top-10-for-large-language-model-applications/) by HEGO Wiki: List of the 10 most critic vulnerabilities seen in LLM applications. * [Prompt Injection Primer](https://github.com/jthack/PIPE) by Joseph Thacker: Short guide dedicated to prompt injection for engineers. * [LLM Security](https://llmsecurity.net/) by [@llm_sec](https://twitter.com/llm_sec): Extensive list of resources related to LLM security. * [Red teaming LLMs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/red-teaming) by Microsoft: Guide on how to perform red teaming with LLMs. ---
https://github.com/mlabonne/llm-course
-1
[ "course", "large-language-models", "llm", "machine-learning", "roadmap" ]
https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md
[ [ "⚡quick", "start", "download", "install", "[", "nodejs", "]", "(", "http", ":", "//nodejs.org/en/download", ")", ">", "=", "18.15.0", "1", ".", "install", "flowise", "``", "`", "bash", "npm", "install", "-g", "flowise", "``", "`", "2", ".", "start", "flowise", "``", "`", "bash", "npx", "flowise", "start", "``", "`", "username", "&", "password", "``", "`", "bash", "npx", "flowise", "start", "--", "flowise_username=user", "--", "flowise_password=1234", "``", "`", "3", ".", "open", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")" ], [ "⚡quick start download install [ nodejs ] ( http : //nodejs.org/en/download ) > = 18.15.0 1 .", "install flowise `` ` bash npm install -g flowise `` ` 2 .", "start flowise `` ` bash npx flowise start `` ` username & password `` ` bash npx flowise start -- flowise_username=user -- flowise_password=1234 `` ` 3 .", "open [ http : //localhost:3000 ] ( http : //localhost:3000 )" ] ]
[ [ "⚡quick", "start", "download", "install", "[", "nodejs", "]", "(", "http", ":", "//nodejs.org/en/download", ")", ">", "=", "18.15.0", "1", ".", "install", "flowise", "``", "`", "bash", "npm", "install", "-g", "flowise", "``", "`", "2", ".", "start", "flowise", "``", "`", "bash", "npx", "flowise", "start", "``", "`", "username", "&", "password", "``", "`", "bash", "npx", "flowise", "start", "--", "flowise_username=user", "--", "flowise_password=1234", "``", "`", "3", ".", "open", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")" ], [ "⚡quick start download install [ nodejs ] ( http : //nodejs.org/en/download ) > = 18.15.0 1 .", "install flowise `` ` bash npm install -g flowise `` ` 2 .", "start flowise `` ` bash npx flowise start `` ` username & password `` ` bash npx flowise start -- flowise_username=user -- flowise_password=1234 `` ` 3 .", "open [ http : //localhost:3000 ] ( http : //localhost:3000 )" ] ]
⚡Quick Start Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0 1. Install Flowise ```bash npm install -g flowise ``` 2. Start Flowise ```bash npx flowise start ``` With username & password ```bash npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234 ``` 3. Open [http://localhost:3000](http://localhost:3000)
https://github.com/FlowiseAI/Flowise
-1
[ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ]
https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md
[ [ "prerequisite", "-", "install", "[", "yarn", "v1", "]", "(", "http", ":", "//classic.yarnpkg.com/en/docs/install", ")", "``", "`", "bash", "npm", "-g", "yarn", "``", "`" ], [ "prerequisite - install [ yarn v1 ] ( http : //classic.yarnpkg.com/en/docs/install ) `` ` bash npm -g yarn `` `" ] ]
[ [ "prerequisite", "-", "install", "[", "yarn", "v1", "]", "(", "http", ":", "//classic.yarnpkg.com/en/docs/install", ")", "``", "`", "bash", "npm", "-g", "yarn", "``", "`" ], [ "prerequisite - install [ yarn v1 ] ( http : //classic.yarnpkg.com/en/docs/install ) `` ` bash npm -g yarn `` `" ] ]
Prerequisite - Install [Yarn v1](https://classic.yarnpkg.com/en/docs/install) ```bash npm i -g yarn ```
https://github.com/FlowiseAI/Flowise
-1
[ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ]
https://raw.githubusercontent.com/FlowiseAI/Flowise/main/README.md
[ [ "setup", "1", ".", "clone", "repository", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/flowiseai/flowise.git", "``", "`", "2", ".", "go", "repository", "folder", "``", "`", "bash", "cd", "flowise", "``", "`", "3", ".", "install", "dependency", "module", ":", "``", "`", "bash", "yarn", "install", "``", "`", "4", ".", "build", "code", ":", "``", "`", "bash", "yarn", "build", "``", "`", "5", ".", "start", "app", ":", "``", "`", "bash", "yarn", "start", "``", "`", "access", "app", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")", "6", ".", "development", "build", ":", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/ui", "`", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/server", "`", "-", "run", "``", "`", "bash", "yarn", "dev", "``", "`", "code", "change", "reload", "app", "automatically", "[", "http", ":", "//localhost:8080", "]", "(", "http", ":", "//localhost:8080", ")" ], [ "setup 1 .", "clone repository `` ` bash git clone http : //github.com/flowiseai/flowise.git `` ` 2 .", "go repository folder `` ` bash cd flowise `` ` 3 .", "install dependency module : `` ` bash yarn install `` ` 4 .", "build code : `` ` bash yarn build `` ` 5 .", "start app : `` ` bash yarn start `` ` access app [ http : //localhost:3000 ] ( http : //localhost:3000 ) 6 .", "development build : - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/ui ` - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/server ` - run `` ` bash yarn dev `` ` code change reload app automatically [ http : //localhost:8080 ] ( http : //localhost:8080 )" ] ]
[ [ "setup", "1", ".", "clone", "repository", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/flowiseai/flowise.git", "``", "`", "2", ".", "go", "repository", "folder", "``", "`", "bash", "cd", "flowise", "``", "`", "3", ".", "install", "dependency", "module", ":", "``", "`", "bash", "yarn", "install", "``", "`", "4", ".", "build", "code", ":", "``", "`", "bash", "yarn", "build", "``", "`", "5", ".", "start", "app", ":", "``", "`", "bash", "yarn", "start", "``", "`", "access", "app", "[", "http", ":", "//localhost:3000", "]", "(", "http", ":", "//localhost:3000", ")", "6", ".", "development", "build", ":", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/ui", "`", "-", "create", "`", ".env", "`", "file", "specify", "`", "port", "`", "(", "refer", "`", ".env.example", "`", ")", "`", "packages/server", "`", "-", "run", "``", "`", "bash", "yarn", "dev", "``", "`", "code", "change", "reload", "app", "automatically", "[", "http", ":", "//localhost:8080", "]", "(", "http", ":", "//localhost:8080", ")" ], [ "setup 1 .", "clone repository `` ` bash git clone http : //github.com/flowiseai/flowise.git `` ` 2 .", "go repository folder `` ` bash cd flowise `` ` 3 .", "install dependency module : `` ` bash yarn install `` ` 4 .", "build code : `` ` bash yarn build `` ` 5 .", "start app : `` ` bash yarn start `` ` access app [ http : //localhost:3000 ] ( http : //localhost:3000 ) 6 .", "development build : - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/ui ` - create ` .env ` file specify ` port ` ( refer ` .env.example ` ) ` packages/server ` - run `` ` bash yarn dev `` ` code change reload app automatically [ http : //localhost:8080 ] ( http : //localhost:8080 )" ] ]
Setup 1. Clone the repository ```bash git clone https://github.com/FlowiseAI/Flowise.git ``` 2. Go into repository folder ```bash cd Flowise ``` 3. Install all dependencies of all modules: ```bash yarn install ``` 4. Build all the code: ```bash yarn build ``` 5. Start the app: ```bash yarn start ``` You can now access the app on [http://localhost:3000](http://localhost:3000) 6. For development build: - Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/ui` - Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server` - Run ```bash yarn dev ``` Any code changes will reload the app automatically on [http://localhost:8080](http://localhost:8080)
https://github.com/FlowiseAI/Flowise
2
[ "artificial-intelligence", "chatgpt", "javascript", "large-language-models", "llm", "low-code", "no-code", "react", "typescript" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "getting", "started", "semantic", "kernel", "semantic", "kernel", "sdk", "available", "c", "#", ",", "python", ",", "java", ".", "get", "started", ",", "choose", "preferred", "language", ".", "see", "[", "feature", "matrix", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages", ")", "see", "breakdown", "feature", "parity", "currently", "supported", "language", ".", "<", "table", "width=100", "%", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png", "''", ">", "<", "div", ">", "<", "href=", "''", "dotnet/readme.md", "''", ">", "using", "semantic", "kernel", "c", "#", "<", "/a", ">", "&", "nbsp", "<", "br/", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg", "''", ">", "<", "div", ">", "<", "href=", "''", "python/readme.md", "''", ">", "using", "semantic", "kernel", "python", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "height=52px", "src=", "''", "http", ":", "//upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg", "''", "alt=", "''", "java", "logo", "''", ">", "<", "div", ">", "<", "href=", "''", "http", ":", "//github.com/microsoft/semantic-kernel/blob/main/java/readme.md", "''", ">", "using", "semantic", "kernel", "java", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">", "quickest", "way", "get", "started", "basic", "get", "api", "key", "either", "openai", "azure", "openai", "run", "one", "c", "#", ",", "python", ",", "java", "console", "applications/scripts", "." ], [ "getting started semantic kernel semantic kernel sdk available c # , python , java .", "get started , choose preferred language .", "see [ feature matrix ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages ) see breakdown feature parity currently supported language .", "< table width=100 % > < tbody > < tr > < td > < img align= '' left '' width=52px src= '' http : //user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png '' > < div > < href= '' dotnet/readme.md '' > using semantic kernel c # < /a > & nbsp < br/ > < /div > < /td > < td > < img align= '' left '' width=52px src= '' http : //raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg '' > < div > < href= '' python/readme.md '' > using semantic kernel python < /a > < /div > < /td > < td > < img align= '' left '' width=52px height=52px src= '' http : //upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg '' alt= '' java logo '' > < div > < href= '' http : //github.com/microsoft/semantic-kernel/blob/main/java/readme.md '' > using semantic kernel java < /a > < /div > < /td > < /tr > < /tbody > < /table > quickest way get started basic get api key either openai azure openai run one c # , python , java console applications/scripts ." ] ]
[ [ "getting", "started", "semantic", "kernel", "semantic", "kernel", "sdk", "available", "c", "#", ",", "python", ",", "java", ".", "get", "started", ",", "choose", "preferred", "language", ".", "see", "[", "feature", "matrix", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages", ")", "see", "breakdown", "feature", "parity", "currently", "supported", "language", ".", "<", "table", "width=100", "%", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png", "''", ">", "<", "div", ">", "<", "href=", "''", "dotnet/readme.md", "''", ">", "using", "semantic", "kernel", "c", "#", "<", "/a", ">", "&", "nbsp", "<", "br/", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "src=", "''", "http", ":", "//raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg", "''", ">", "<", "div", ">", "<", "href=", "''", "python/readme.md", "''", ">", "using", "semantic", "kernel", "python", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "td", ">", "<", "img", "align=", "''", "left", "''", "width=52px", "height=52px", "src=", "''", "http", ":", "//upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg", "''", "alt=", "''", "java", "logo", "''", ">", "<", "div", ">", "<", "href=", "''", "http", ":", "//github.com/microsoft/semantic-kernel/blob/main/java/readme.md", "''", ">", "using", "semantic", "kernel", "java", "<", "/a", ">", "<", "/div", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">", "quickest", "way", "get", "started", "basic", "get", "api", "key", "either", "openai", "azure", "openai", "run", "one", "c", "#", ",", "python", ",", "java", "console", "applications/scripts", "." ], [ "getting started semantic kernel semantic kernel sdk available c # , python , java .", "get started , choose preferred language .", "see [ feature matrix ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages ) see breakdown feature parity currently supported language .", "< table width=100 % > < tbody > < tr > < td > < img align= '' left '' width=52px src= '' http : //user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png '' > < div > < href= '' dotnet/readme.md '' > using semantic kernel c # < /a > & nbsp < br/ > < /div > < /td > < td > < img align= '' left '' width=52px src= '' http : //raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg '' > < div > < href= '' python/readme.md '' > using semantic kernel python < /a > < /div > < /td > < td > < img align= '' left '' width=52px height=52px src= '' http : //upload.wikimedia.org/wikipedia/en/3/30/java_programming_language_logo.svg '' alt= '' java logo '' > < div > < href= '' http : //github.com/microsoft/semantic-kernel/blob/main/java/readme.md '' > using semantic kernel java < /a > < /div > < /td > < /tr > < /tbody > < /table > quickest way get started basic get api key either openai azure openai run one c # , python , java console applications/scripts ." ] ]
Getting started with Semantic Kernel The Semantic Kernel SDK is available in C#, Python, and Java. To get started, choose your preferred language below. See the [Feature Matrix](https://learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages) to see a breakdown of feature parity between our currently supported languages. <table width=100%> <tbody> <tr> <td> <img align="left" width=52px src="https://user-images.githubusercontent.com/371009/230673036-fad1e8e6-5d48-49b1-a9c1-6f9834e0d165.png"> <div> <a href="dotnet/README.md">Using Semantic Kernel in C#</a> &nbsp<br/> </div> </td> <td> <img align="left" width=52px src="https://raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg"> <div> <a href="python/README.md">Using Semantic Kernel in Python</a> </div> </td> <td> <img align="left" width=52px height=52px src="https://upload.wikimedia.org/wikipedia/en/3/30/Java_programming_language_logo.svg" alt="Java logo"> <div> <a href="https://github.com/microsoft/semantic-kernel/blob/main/java/README.md">Using Semantic Kernel in Java</a> </div> </td> </tr> </tbody> </table> The quickest way to get started with the basics is to get an API key from either OpenAI or Azure OpenAI and to run one of the C#, Python, and Java console applications/scripts below.
https://github.com/microsoft/semantic-kernel
2
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "python", ":", "1", ".", "install", "pip", "package", ":", "`", "python", "-m", "pip", "install", "semantic-kernel", "`", ".", "2", ".", "create", "new", "script", "e.g", ".", "`", "hello-world.py", "`", ".", "3", ".", "store", "api", "key", "setting", "`", ".env", "`", "file", "described", "[", "]", "(", "python/readme.md", ")", ".", "4", ".", "copy", "code", "[", "]", "(", "python/readme.md", ")", "`", "hello-world.py", "`", "script", ".", "5", ".", "run", "python", "script", "." ], [ "python : 1 .", "install pip package : ` python -m pip install semantic-kernel ` .", "2 .", "create new script e.g .", "` hello-world.py ` .", "3 .", "store api key setting ` .env ` file described [ ] ( python/readme.md ) .", "4 .", "copy code [ ] ( python/readme.md ) ` hello-world.py ` script .", "5 .", "run python script ." ] ]
[ [ "python", ":", "1", ".", "install", "pip", "package", ":", "`", "python", "-m", "pip", "install", "semantic-kernel", "`", ".", "2", ".", "create", "new", "script", "e.g", ".", "`", "hello-world.py", "`", ".", "3", ".", "store", "api", "key", "setting", "`", ".env", "`", "file", "described", "[", "]", "(", "python/readme.md", ")", ".", "4", ".", "copy", "code", "[", "]", "(", "python/readme.md", ")", "`", "hello-world.py", "`", "script", ".", "5", ".", "run", "python", "script", "." ], [ "python : 1 .", "install pip package : ` python -m pip install semantic-kernel ` .", "2 .", "create new script e.g .", "` hello-world.py ` .", "3 .", "store api key setting ` .env ` file described [ ] ( python/readme.md ) .", "4 .", "copy code [ ] ( python/readme.md ) ` hello-world.py ` script .", "5 .", "run python script ." ] ]
For Python: 1. Install the pip package: `python -m pip install semantic-kernel`. 2. Create a new script e.g. `hello-world.py`. 3. Store your API key and settings in an `.env` file as described [here](python/README.md). 4. Copy the code from [here](python/README.md) into the `hello-world.py` script. 5. Run the python script.
https://github.com/microsoft/semantic-kernel
0
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "learning", "use", "semantic", "kernel", "fastest", "way", "learn", "use", "semantic", "kernel", "c" ], [ "learning use semantic kernel fastest way learn use semantic kernel c" ] ]
[ [ "learning", "use", "semantic", "kernel", "fastest", "way", "learn", "use", "semantic", "kernel", "c" ], [ "learning use semantic kernel fastest way learn use semantic kernel c" ] ]
Learning how to use Semantic Kernel The fastest way to learn how to use Semantic Kernel is with our C
https://github.com/microsoft/semantic-kernel
-1
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "python", "jupyter", "notebook", ".", "notebook", "demonstrate", "use", "semantic", "kernel", "code", "snippet", "run", "push", "button", ".", "-", "[", "getting", "started", "c" ], [ "python jupyter notebook .", "notebook demonstrate use semantic kernel code snippet run push button .", "- [ getting started c" ] ]
[ [ "python", "jupyter", "notebook", ".", "notebook", "demonstrate", "use", "semantic", "kernel", "code", "snippet", "run", "push", "button", ".", "-", "[", "getting", "started", "c" ], [ "python jupyter notebook .", "notebook demonstrate use semantic kernel code snippet run push button .", "- [ getting started c" ] ]
and Python Jupyter notebooks. These notebooks demonstrate how to use Semantic Kernel with code snippets that you can run with a push of a button. - [Getting Started with C
https://github.com/microsoft/semantic-kernel
-1
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "notebook", "]", "(", "dotnet/notebooks/00-getting-started.ipynb", ")", "-", "[", "getting", "started", "python", "notebook", "]", "(", "python/notebooks/00-getting-started.ipynb", ")", "'ve", "finished", "getting", "started", "notebook", ",", "check", "main", "walkthroughs", "learn", "site", ".", "sample", "come", "completed", "c" ], [ "notebook ] ( dotnet/notebooks/00-getting-started.ipynb ) - [ getting started python notebook ] ( python/notebooks/00-getting-started.ipynb ) 've finished getting started notebook , check main walkthroughs learn site .", "sample come completed c" ] ]
[ [ "notebook", "]", "(", "dotnet/notebooks/00-getting-started.ipynb", ")", "-", "[", "getting", "started", "python", "notebook", "]", "(", "python/notebooks/00-getting-started.ipynb", ")", "'ve", "finished", "getting", "started", "notebook", ",", "check", "main", "walkthroughs", "learn", "site", ".", "sample", "come", "completed", "c" ], [ "notebook ] ( dotnet/notebooks/00-getting-started.ipynb ) - [ getting started python notebook ] ( python/notebooks/00-getting-started.ipynb ) 've finished getting started notebook , check main walkthroughs learn site .", "sample come completed c" ] ]
notebook](dotnet/notebooks/00-getting-started.ipynb) - [Getting Started with Python notebook](python/notebooks/00-getting-started.ipynb) Once you've finished the getting started notebooks, you can then check out the main walkthroughs on our Learn site. Each sample comes with a completed C
https://github.com/microsoft/semantic-kernel
-1
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "chat", "copilot", ":", "see", "'s", "possible", "semantic", "kernel", "'re", "interested", "seeing", "full", "end-to-end", "example", "use", "semantic", "kernel", ",", "check", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "reference", "application", ".", "chat", "copilot", "chatbot", "demonstrates", "power", "semantic", "kernel", ".", "combining", "plugins", ",", "planner", ",", "persona", ",", "demonstrate", "build", "chatbot", "maintain", "long-running", "conversation", "user", "also", "leveraging", "plugins", "integrate", "service", ".", "!", "[", "chat", "copilot", "answering", "question", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif", ")", "run", "app", "downloading", "[", "github", "repo", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "." ], [ "chat copilot : see 's possible semantic kernel 're interested seeing full end-to-end example use semantic kernel , check [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) reference application .", "chat copilot chatbot demonstrates power semantic kernel .", "combining plugins , planner , persona , demonstrate build chatbot maintain long-running conversation user also leveraging plugins integrate service .", "!", "[ chat copilot answering question ] ( http : //learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif ) run app downloading [ github repo ] ( http : //github.com/microsoft/chat-copilot ) ." ] ]
[ [ "chat", "copilot", ":", "see", "'s", "possible", "semantic", "kernel", "'re", "interested", "seeing", "full", "end-to-end", "example", "use", "semantic", "kernel", ",", "check", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "reference", "application", ".", "chat", "copilot", "chatbot", "demonstrates", "power", "semantic", "kernel", ".", "combining", "plugins", ",", "planner", ",", "persona", ",", "demonstrate", "build", "chatbot", "maintain", "long-running", "conversation", "user", "also", "leveraging", "plugins", "integrate", "service", ".", "!", "[", "chat", "copilot", "answering", "question", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif", ")", "run", "app", "downloading", "[", "github", "repo", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "." ], [ "chat copilot : see 's possible semantic kernel 're interested seeing full end-to-end example use semantic kernel , check [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) reference application .", "chat copilot chatbot demonstrates power semantic kernel .", "combining plugins , planner , persona , demonstrate build chatbot maintain long-running conversation user also leveraging plugins integrate service .", "!", "[ chat copilot answering question ] ( http : //learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif ) run app downloading [ github repo ] ( http : //github.com/microsoft/chat-copilot ) ." ] ]
Chat Copilot: see what's possible with Semantic Kernel If you're interested in seeing a full end-to-end example of how to use Semantic Kernel, check out our [Chat Copilot](https://github.com/microsoft/chat-copilot) reference application. Chat Copilot is a chatbot that demonstrates the power of Semantic Kernel. By combining plugins, planners, and personas, we demonstrate how you can build a chatbot that can maintain long-running conversations with users while also leveraging plugins to integrate with other services. ![Chat Copilot answering a question](https://learn.microsoft.com/en-us/semantic-kernel/media/chat-copilot-in-action.gif) You can run the app yourself by downloading it from its [GitHub repo](https://github.com/microsoft/chat-copilot).
https://github.com/microsoft/semantic-kernel
-1
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "check", "repos", "!", "like", "semantic", "kernel", ",", "may", "also", "interested", "repos", "semantic", "kernel", "team", "support", ":", "|", "repo", "|", "description", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "|", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "|", "reference", "application", "demonstrates", "build", "chatbot", "semantic", "kernel", ".", "|", "|", "[", "semantic", "kernel", "doc", "]", "(", "http", ":", "//github.com/microsoftdocs/semantic-kernel-docs", ")", "|", "home", "semantic", "kernel", "documentation", "appears", "microsoft", "learn", "site", ".", "|", "|", "[", "semantic", "kernel", "starter", "]", "(", "http", ":", "//github.com/microsoft/semantic-kernel-starters", ")", "|", "starter", "project", "semantic", "kernel", "make", "easier", "get", "started", ".", "|", "|", "[", "kernel", "memory", "]", "(", "http", ":", "//github.com/microsoft/kernel-memory", ")", "|", "scalable", "memory", "service", "store", "information", "ask", "question", "using", "rag", "pattern", ".", "|" ], [ "check repos !", "like semantic kernel , may also interested repos semantic kernel team support : | repo | description | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | | [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) | reference application demonstrates build chatbot semantic kernel .", "| | [ semantic kernel doc ] ( http : //github.com/microsoftdocs/semantic-kernel-docs ) | home semantic kernel documentation appears microsoft learn site .", "| | [ semantic kernel starter ] ( http : //github.com/microsoft/semantic-kernel-starters ) | starter project semantic kernel make easier get started .", "| | [ kernel memory ] ( http : //github.com/microsoft/kernel-memory ) | scalable memory service store information ask question using rag pattern .", "|" ] ]
[ [ "check", "repos", "!", "like", "semantic", "kernel", ",", "may", "also", "interested", "repos", "semantic", "kernel", "team", "support", ":", "|", "repo", "|", "description", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-", "|", "|", "[", "chat", "copilot", "]", "(", "http", ":", "//github.com/microsoft/chat-copilot", ")", "|", "reference", "application", "demonstrates", "build", "chatbot", "semantic", "kernel", ".", "|", "|", "[", "semantic", "kernel", "doc", "]", "(", "http", ":", "//github.com/microsoftdocs/semantic-kernel-docs", ")", "|", "home", "semantic", "kernel", "documentation", "appears", "microsoft", "learn", "site", ".", "|", "|", "[", "semantic", "kernel", "starter", "]", "(", "http", ":", "//github.com/microsoft/semantic-kernel-starters", ")", "|", "starter", "project", "semantic", "kernel", "make", "easier", "get", "started", ".", "|", "|", "[", "kernel", "memory", "]", "(", "http", ":", "//github.com/microsoft/kernel-memory", ")", "|", "scalable", "memory", "service", "store", "information", "ask", "question", "using", "rag", "pattern", ".", "|" ], [ "check repos !", "like semantic kernel , may also interested repos semantic kernel team support : | repo | description | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - | | [ chat copilot ] ( http : //github.com/microsoft/chat-copilot ) | reference application demonstrates build chatbot semantic kernel .", "| | [ semantic kernel doc ] ( http : //github.com/microsoftdocs/semantic-kernel-docs ) | home semantic kernel documentation appears microsoft learn site .", "| | [ semantic kernel starter ] ( http : //github.com/microsoft/semantic-kernel-starters ) | starter project semantic kernel make easier get started .", "| | [ kernel memory ] ( http : //github.com/microsoft/kernel-memory ) | scalable memory service store information ask question using rag pattern .", "|" ] ]
Check out our other repos! If you like Semantic Kernel, you may also be interested in other repos the Semantic Kernel team supports: | Repo | Description | | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | | [Chat Copilot](https://github.com/microsoft/chat-copilot) | A reference application that demonstrates how to build a chatbot with Semantic Kernel. | | [Semantic Kernel Docs](https://github.com/MicrosoftDocs/semantic-kernel-docs) | The home for Semantic Kernel documentation that appears on the Microsoft learn site. | | [Semantic Kernel Starters](https://github.com/microsoft/semantic-kernel-starters) | Starter projects for Semantic Kernel to make it easier to get started. | | [Kernel Memory](https://github.com/microsoft/kernel-memory) | A scalable Memory service to store information and ask questions using the RAG pattern. |
https://github.com/microsoft/semantic-kernel
-1
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/microsoft/semantic-kernel/main/README.md
[ [ "join", "community", "welcome", "contribution", "suggestion", "sk", "community", "!", "one", "easiest", "way", "participate", "engage", "discussion", "github", "repository", ".", "bug", "report", "fix", "welcome", "!", "new", "feature", ",", "component", ",", "extension", ",", "please", "open", "issue", "discus", "u", "sending", "pr", ".", "avoid", "rejection", "might", "taking", "core", "different", "direction", ",", "also", "consider", "impact", "larger", "ecosystem", ".", "learn", "get", "started", ":", "-", "read", "[", "documentation", "]", "(", "http", ":", "//aka.ms/sk/learn", ")", "-", "learn", "[", "contribute", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/contributing", ")", "project", "-", "join", "[", "discord", "community", "]", "(", "http", ":", "//aka.ms/skdiscord", ")", "-", "attend", "[", "regular", "office", "hour", "sk", "community", "event", "]", "(", "community.md", ")", "-", "follow", "team", "[", "blog", "]", "(", "http", ":", "//aka.ms/sk/blog", ")" ], [ "join community welcome contribution suggestion sk community !", "one easiest way participate engage discussion github repository .", "bug report fix welcome !", "new feature , component , extension , please open issue discus u sending pr .", "avoid rejection might taking core different direction , also consider impact larger ecosystem .", "learn get started : - read [ documentation ] ( http : //aka.ms/sk/learn ) - learn [ contribute ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/contributing ) project - join [ discord community ] ( http : //aka.ms/skdiscord ) - attend [ regular office hour sk community event ] ( community.md ) - follow team [ blog ] ( http : //aka.ms/sk/blog )" ] ]
[ [ "join", "community", "welcome", "contribution", "suggestion", "sk", "community", "!", "one", "easiest", "way", "participate", "engage", "discussion", "github", "repository", ".", "bug", "report", "fix", "welcome", "!", "new", "feature", ",", "component", ",", "extension", ",", "please", "open", "issue", "discus", "u", "sending", "pr", ".", "avoid", "rejection", "might", "taking", "core", "different", "direction", ",", "also", "consider", "impact", "larger", "ecosystem", ".", "learn", "get", "started", ":", "-", "read", "[", "documentation", "]", "(", "http", ":", "//aka.ms/sk/learn", ")", "-", "learn", "[", "contribute", "]", "(", "http", ":", "//learn.microsoft.com/en-us/semantic-kernel/get-started/contributing", ")", "project", "-", "join", "[", "discord", "community", "]", "(", "http", ":", "//aka.ms/skdiscord", ")", "-", "attend", "[", "regular", "office", "hour", "sk", "community", "event", "]", "(", "community.md", ")", "-", "follow", "team", "[", "blog", "]", "(", "http", ":", "//aka.ms/sk/blog", ")" ], [ "join community welcome contribution suggestion sk community !", "one easiest way participate engage discussion github repository .", "bug report fix welcome !", "new feature , component , extension , please open issue discus u sending pr .", "avoid rejection might taking core different direction , also consider impact larger ecosystem .", "learn get started : - read [ documentation ] ( http : //aka.ms/sk/learn ) - learn [ contribute ] ( http : //learn.microsoft.com/en-us/semantic-kernel/get-started/contributing ) project - join [ discord community ] ( http : //aka.ms/skdiscord ) - attend [ regular office hour sk community event ] ( community.md ) - follow team [ blog ] ( http : //aka.ms/sk/blog )" ] ]
Join the community We welcome your contributions and suggestions to SK community! One of the easiest ways to participate is to engage in discussions in the GitHub repository. Bug reports and fixes are welcome! For new features, components, or extensions, please open an issue and discuss with us before sending a PR. This is to avoid rejection as we might be taking the core in a different direction, but also to consider the impact on the larger ecosystem. To learn more and get started: - Read the [documentation](https://aka.ms/sk/learn) - Learn how to [contribute](https://learn.microsoft.com/en-us/semantic-kernel/get-started/contributing) to the project - Join the [Discord community](https://aka.ms/SKDiscord) - Attend [regular office hours and SK community events](COMMUNITY.md) - Follow the team on our [blog](https://aka.ms/sk/blog)
https://github.com/microsoft/semantic-kernel
-1
[ "ai", "artificial-intelligence", "llm", "openai", "sdk" ]
https://raw.githubusercontent.com/mlc-ai/mlc-llm/main/README.md
[ [ "news", "*", "[", "10/18/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus", ")", "scalable", "multi-gpu", "support", "cuda", "rocm", "official", ".", "*", "[", "09/02/2023", "]", "prebuilt", "rocm", "5.7", "cuda", "12.2", "package", "[", "available", "]", "(", "http", ":", "//llm.mlc.ai/docs/install/tvm.html", "#", "option-1-prebuilt-package", ")", ".", "*", "[", "08/25/2023", "]", "codellama", "support", ".", "*", "[", "08/14/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi", ")", "mali", "gpu", "support", "orange", "pi", ".", "*", "[", "08/09/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference", ")", "rocm", "backend", "mature", "use", ".", "*", "[", "08/02/2023", "]", "[", "dockerfile", "]", "(", "http", ":", "//github.com/mlc-ai/llm-perf-bench/", ")", "released", "cuda", "performance", "benchmarking", ".", "*", "[", "07/19/2023", "]", "support", "llama2-7b/13b/70b", ".", "*", "[", "05/22/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices", ")", "redpajama", "support", ".", "*", "[", "05/08/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices", ")", "mlc", "llm", "available", "android", ".", "*", "[", "05/01/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware", ")", "mlc", "llm", "released", "metal", ",", "vulkan", "cuda", "backends", ".", "*", "[", "04/14/2023", "]", "[", "webllm", "]", "(", "http", ":", "//github.com/mlc-ai/web-llm", ")", "released", "prior", "mlc", "llm", "webgpu", "webassembly", "backend", "." ], [ "news * [ 10/18/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus ) scalable multi-gpu support cuda rocm official .", "* [ 09/02/2023 ] prebuilt rocm 5.7 cuda 12.2 package [ available ] ( http : //llm.mlc.ai/docs/install/tvm.html # option-1-prebuilt-package ) .", "* [ 08/25/2023 ] codellama support .", "* [ 08/14/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi ) mali gpu support orange pi .", "* [ 08/09/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference ) rocm backend mature use .", "* [ 08/02/2023 ] [ dockerfile ] ( http : //github.com/mlc-ai/llm-perf-bench/ ) released cuda performance benchmarking .", "* [ 07/19/2023 ] support llama2-7b/13b/70b .", "* [ 05/22/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices ) redpajama support .", "* [ 05/08/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices ) mlc llm available android .", "* [ 05/01/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware ) mlc llm released metal , vulkan cuda backends .", "* [ 04/14/2023 ] [ webllm ] ( http : //github.com/mlc-ai/web-llm ) released prior mlc llm webgpu webassembly backend ." ] ]
[ [ "news", "*", "[", "10/18/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus", ")", "scalable", "multi-gpu", "support", "cuda", "rocm", "official", ".", "*", "[", "09/02/2023", "]", "prebuilt", "rocm", "5.7", "cuda", "12.2", "package", "[", "available", "]", "(", "http", ":", "//llm.mlc.ai/docs/install/tvm.html", "#", "option-1-prebuilt-package", ")", ".", "*", "[", "08/25/2023", "]", "codellama", "support", ".", "*", "[", "08/14/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi", ")", "mali", "gpu", "support", "orange", "pi", ".", "*", "[", "08/09/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference", ")", "rocm", "backend", "mature", "use", ".", "*", "[", "08/02/2023", "]", "[", "dockerfile", "]", "(", "http", ":", "//github.com/mlc-ai/llm-perf-bench/", ")", "released", "cuda", "performance", "benchmarking", ".", "*", "[", "07/19/2023", "]", "support", "llama2-7b/13b/70b", ".", "*", "[", "05/22/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices", ")", "redpajama", "support", ".", "*", "[", "05/08/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices", ")", "mlc", "llm", "available", "android", ".", "*", "[", "05/01/2023", "]", "[", "[", "post", "]", "]", "(", "http", ":", "//blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware", ")", "mlc", "llm", "released", "metal", ",", "vulkan", "cuda", "backends", ".", "*", "[", "04/14/2023", "]", "[", "webllm", "]", "(", "http", ":", "//github.com/mlc-ai/web-llm", ")", "released", "prior", "mlc", "llm", "webgpu", "webassembly", "backend", "." ], [ "news * [ 10/18/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/10/19/scalable-language-model-inference-on-multiple-nvdia-amd-gpus ) scalable multi-gpu support cuda rocm official .", "* [ 09/02/2023 ] prebuilt rocm 5.7 cuda 12.2 package [ available ] ( http : //llm.mlc.ai/docs/install/tvm.html # option-1-prebuilt-package ) .", "* [ 08/25/2023 ] codellama support .", "* [ 08/14/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/gpu-accelerated-llm-on-orange-pi ) mali gpu support orange pi .", "* [ 08/09/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/08/09/making-amd-gpus-competitive-for-llm-inference ) rocm backend mature use .", "* [ 08/02/2023 ] [ dockerfile ] ( http : //github.com/mlc-ai/llm-perf-bench/ ) released cuda performance benchmarking .", "* [ 07/19/2023 ] support llama2-7b/13b/70b .", "* [ 05/22/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices ) redpajama support .", "* [ 05/08/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices ) mlc llm available android .", "* [ 05/01/2023 ] [ [ post ] ] ( http : //blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware ) mlc llm released metal , vulkan cuda backends .", "* [ 04/14/2023 ] [ webllm ] ( http : //github.com/mlc-ai/web-llm ) released prior mlc llm webgpu webassembly backend ." ] ]
News * [10/18/2023] [[Post]](https://blog.mlc.ai/2023/10/19/Scalable-Language-Model-Inference-on-Multiple-NVDIA-AMD-GPUs) Scalable multi-GPU support for CUDA and ROCm are official. * [09/02/2023] Prebuilt ROCm 5.7 and CUDA 12.2 package is [available](https://llm.mlc.ai/docs/install/tvm.html#option-1-prebuilt-package). * [08/25/2023] CodeLlama support is up. * [08/14/2023] [[Post]](https://blog.mlc.ai/2023/08/09/GPU-Accelerated-LLM-on-Orange-Pi) Mali GPU support is up on Orange Pi. * [08/09/2023] [[Post]](https://blog.mlc.ai/2023/08/09/Making-AMD-GPUs-competitive-for-LLM-inference) ROCm backend is mature to use. * [08/02/2023] [Dockerfile](https://github.com/mlc-ai/llm-perf-bench/) is released for CUDA performance benchmarking. * [07/19/2023] Support for Llama2-7B/13B/70B is up. * [05/22/2023] [[Post]](https://blog.mlc.ai/2023/05/22/bringing-open-large-language-models-to-consumer-devices) RedPajama support is up. * [05/08/2023] [[Post]](https://blog.mlc.ai/2023/05/08/bringing-hardware-accelerated-language-models-to-android-devices) MLC LLM is now available on Android. * [05/01/2023] [[Post]](https://blog.mlc.ai/2023/05/01/bringing-accelerated-llm-to-consumer-hardware) MLC LLM is released with Metal, Vulkan and CUDA backends. * [04/14/2023] [WebLLM](https://github.com/mlc-ai/web-llm) is released prior to MLC LLM with WebGPU and WebAssembly backend.
https://github.com/mlc-ai/mlc-llm
-1
[ "language-model", "llm", "machine-learning-compilation", "tvm" ]
https://raw.githubusercontent.com/mlc-ai/mlc-llm/main/README.md
[ [ "getting", "started", "please", "visit", "[", "documentation", "]", "(", "http", ":", "//llm.mlc.ai/docs/index.html", "#", "getting-started", ")", "detailed", "instruction", "." ], [ "getting started please visit [ documentation ] ( http : //llm.mlc.ai/docs/index.html # getting-started ) detailed instruction ." ] ]
[ [ "getting", "started", "please", "visit", "[", "documentation", "]", "(", "http", ":", "//llm.mlc.ai/docs/index.html", "#", "getting-started", ")", "detailed", "instruction", "." ], [ "getting started please visit [ documentation ] ( http : //llm.mlc.ai/docs/index.html # getting-started ) detailed instruction ." ] ]
Getting Started Please visit our [documentation](https://llm.mlc.ai/docs/index.html#getting-started) for detailed instructions.
https://github.com/mlc-ai/mlc-llm
-1
[ "language-model", "llm", "machine-learning-compilation", "tvm" ]
https://raw.githubusercontent.com/mlc-ai/mlc-llm/main/README.md
[ [ "model", "support", "mlc", "llm", "support", "wide", "range", "model", "architecture", "variant", ".", "following", "prebuilts", "use", "off-the-shelf", ".", "visit", "[", "prebuilt", "model", "]", "(", "http", ":", "//llm.mlc.ai/docs/prebuilt_models.html", ")", "see", "full", "list", ",", "[", "compile", "model", "via", "mlc", "]", "(", "http", ":", "//llm.mlc.ai/docs/compilation/compile_models.html", ")", "see", "use", "model", "list", ".", "<", "table", "style=", "''", "width:100", "%", "''", ">", "<", "thead", ">", "<", "tr", ">", "<", "th", "style=", "''", "width:40", "%", "''", ">", "architecture", "<", "/th", ">", "<", "th", "style=", "''", "width:60", "%", "''", ">", "prebuilt", "model", "variant", "<", "/th", ">", "<", "/tr", ">", "<", "/thead", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "llama", "<", "/td", ">", "<", "td", ">", "llama-2", ",", "code", "llama", ",", "vicuna", ",", "wizardlm", ",", "wizardmath", ",", "openorca", "platypus2", ",", "flagalpha", "llama-2", "chinese", ",", "georgesung", "llama-2", "uncensored", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-neox", "<", "/td", ">", "<", "td", ">", "redpajama", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-j", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "rwkv", "<", "/td", ">", "<", "td", ">", "rwkv-raven", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "minigpt", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gptbigcode", "<", "/td", ">", "<", "td", ">", "wizardcoder", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "stablelm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "mistral", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "phi", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">" ], [ "model support mlc llm support wide range model architecture variant .", "following prebuilts use off-the-shelf .", "visit [ prebuilt model ] ( http : //llm.mlc.ai/docs/prebuilt_models.html ) see full list , [ compile model via mlc ] ( http : //llm.mlc.ai/docs/compilation/compile_models.html ) see use model list .", "< table style= '' width:100 % '' > < thead > < tr > < th style= '' width:40 % '' > architecture < /th > < th style= '' width:60 % '' > prebuilt model variant < /th > < /tr > < /thead > < tbody > < tr > < td > llama < /td > < td > llama-2 , code llama , vicuna , wizardlm , wizardmath , openorca platypus2 , flagalpha llama-2 chinese , georgesung llama-2 uncensored < /td > < /tr > < tr > < td > gpt-neox < /td > < td > redpajama < /td > < /tr > < tr > < td > gpt-j < /td > < td > < /td > < /tr > < tr > < td > rwkv < /td > < td > rwkv-raven < /td > < /tr > < tr > < td > minigpt < /td > < td > < /td > < /tr > < tr > < td > gptbigcode < /td > < td > wizardcoder < /td > < /tr > < tr > < td > chatglm < /td > < td > < /td > < /tr > < tr > < td > stablelm < /td > < td > < /td > < /tr > < tr > < td > mistral < /td > < td > < /td > < /tr > < tr > < td > phi < /td > < td > < /td > < /tr > < /tbody > < /table >" ] ]
[ [ "model", "support", "mlc", "llm", "support", "wide", "range", "model", "architecture", "variant", ".", "following", "prebuilts", "use", "off-the-shelf", ".", "visit", "[", "prebuilt", "model", "]", "(", "http", ":", "//llm.mlc.ai/docs/prebuilt_models.html", ")", "see", "full", "list", ",", "[", "compile", "model", "via", "mlc", "]", "(", "http", ":", "//llm.mlc.ai/docs/compilation/compile_models.html", ")", "see", "use", "model", "list", ".", "<", "table", "style=", "''", "width:100", "%", "''", ">", "<", "thead", ">", "<", "tr", ">", "<", "th", "style=", "''", "width:40", "%", "''", ">", "architecture", "<", "/th", ">", "<", "th", "style=", "''", "width:60", "%", "''", ">", "prebuilt", "model", "variant", "<", "/th", ">", "<", "/tr", ">", "<", "/thead", ">", "<", "tbody", ">", "<", "tr", ">", "<", "td", ">", "llama", "<", "/td", ">", "<", "td", ">", "llama-2", ",", "code", "llama", ",", "vicuna", ",", "wizardlm", ",", "wizardmath", ",", "openorca", "platypus2", ",", "flagalpha", "llama-2", "chinese", ",", "georgesung", "llama-2", "uncensored", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-neox", "<", "/td", ">", "<", "td", ">", "redpajama", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-j", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "rwkv", "<", "/td", ">", "<", "td", ">", "rwkv-raven", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "minigpt", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gptbigcode", "<", "/td", ">", "<", "td", ">", "wizardcoder", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "stablelm", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "mistral", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "phi", "<", "/td", ">", "<", "td", ">", "<", "/td", ">", "<", "/tr", ">", "<", "/tbody", ">", "<", "/table", ">" ], [ "model support mlc llm support wide range model architecture variant .", "following prebuilts use off-the-shelf .", "visit [ prebuilt model ] ( http : //llm.mlc.ai/docs/prebuilt_models.html ) see full list , [ compile model via mlc ] ( http : //llm.mlc.ai/docs/compilation/compile_models.html ) see use model list .", "< table style= '' width:100 % '' > < thead > < tr > < th style= '' width:40 % '' > architecture < /th > < th style= '' width:60 % '' > prebuilt model variant < /th > < /tr > < /thead > < tbody > < tr > < td > llama < /td > < td > llama-2 , code llama , vicuna , wizardlm , wizardmath , openorca platypus2 , flagalpha llama-2 chinese , georgesung llama-2 uncensored < /td > < /tr > < tr > < td > gpt-neox < /td > < td > redpajama < /td > < /tr > < tr > < td > gpt-j < /td > < td > < /td > < /tr > < tr > < td > rwkv < /td > < td > rwkv-raven < /td > < /tr > < tr > < td > minigpt < /td > < td > < /td > < /tr > < tr > < td > gptbigcode < /td > < td > wizardcoder < /td > < /tr > < tr > < td > chatglm < /td > < td > < /td > < /tr > < tr > < td > stablelm < /td > < td > < /td > < /tr > < tr > < td > mistral < /td > < td > < /td > < /tr > < tr > < td > phi < /td > < td > < /td > < /tr > < /tbody > < /table >" ] ]
Model Support MLC LLM supports a wide range of model architectures and variants. We have the following prebuilts which you can use off-the-shelf. Visit [Prebuilt Models](https://llm.mlc.ai/docs/prebuilt_models.html) to see the full list, and [Compile Models via MLC](https://llm.mlc.ai/docs/compilation/compile_models.html) to see how to use models not on this list. <table style="width:100%"> <thead> <tr> <th style="width:40%">Architecture</th> <th style="width:60%">Prebuilt Model Variants</th> </tr> </thead> <tbody> <tr> <td>Llama</td> <td>Llama-2, Code Llama, Vicuna, WizardLM, WizardMath, OpenOrca Platypus2, FlagAlpha Llama-2 Chinese, georgesung Llama-2 Uncensored</td> </tr> <tr> <td>GPT-NeoX</td> <td>RedPajama</td> </tr> <tr> <td>GPT-J</td> <td></td> </tr> <tr> <td>RWKV</td> <td>RWKV-raven</td> </tr> <tr> <td>MiniGPT</td> <td></td> </tr> <tr> <td>GPTBigCode</td> <td>WizardCoder</td> </tr> <tr> <td>ChatGLM</td> <td></td> </tr> <tr> <td>StableLM</td> <td></td> </tr> <tr> <td>Mistral</td> <td></td> </tr> <tr> <td>Phi</td> <td></td> </tr> </tbody> </table>
https://github.com/mlc-ai/mlc-llm
-1
[ "language-model", "llm", "machine-learning-compilation", "tvm" ]
https://raw.githubusercontent.com/langgenius/dify/main/README.md
[ [ "start", "*", "*", "star", "u", "github", ",", "instantly", "notified", "new", "release", "!", "*", "*", "!", "[", "star-us", "]", "(", "http", ":", "//github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f", ")", "-", "[", "website", "]", "(", "http", ":", "//dify.ai", ")", "-", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai", ")", "-", "[", "deployment", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted", ")", "-", "[", "faq", "]", "(", "http", ":", "//docs.dify.ai/getting-started/faq", ")" ], [ "start * * star u github , instantly notified new release !", "* * !", "[ star-us ] ( http : //github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f ) - [ website ] ( http : //dify.ai ) - [ doc ] ( http : //docs.dify.ai ) - [ deployment doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted ) - [ faq ] ( http : //docs.dify.ai/getting-started/faq )" ] ]
[ [ "start", "*", "*", "star", "u", "github", ",", "instantly", "notified", "new", "release", "!", "*", "*", "!", "[", "star-us", "]", "(", "http", ":", "//github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f", ")", "-", "[", "website", "]", "(", "http", ":", "//dify.ai", ")", "-", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai", ")", "-", "[", "deployment", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted", ")", "-", "[", "faq", "]", "(", "http", ":", "//docs.dify.ai/getting-started/faq", ")" ], [ "start * * star u github , instantly notified new release !", "* * !", "[ star-us ] ( http : //github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f ) - [ website ] ( http : //dify.ai ) - [ doc ] ( http : //docs.dify.ai ) - [ deployment doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted ) - [ faq ] ( http : //docs.dify.ai/getting-started/faq )" ] ]
Before You Start **Star us on GitHub, and be instantly notified for new releases!** ![star-us](https://github.com/langgenius/dify/assets/100913391/95f37259-7370-4456-a9f0-0bc01ef8642f) - [Website](https://dify.ai) - [Docs](https://docs.dify.ai) - [Deployment Docs](https://docs.dify.ai/getting-started/install-self-hosted) - [FAQ](https://docs.dify.ai/getting-started/faq)
https://github.com/langgenius/dify
-1
[ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ]
https://raw.githubusercontent.com/langgenius/dify/main/README.md
[ [ "install", "community", "edition" ], [ "install community edition" ] ]
[ [ "install", "community", "edition" ], [ "install community edition" ] ]
Install the Community Edition
https://github.com/langgenius/dify
-1
[ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ]
https://raw.githubusercontent.com/langgenius/dify/main/README.md
[ [ "system", "requirement", "installing", "dify", ",", "make", "sure", "machine", "meet", "following", "minimum", "system", "requirement", ":", "-", "cpu", ">", "=", "2", "core", "-", "ram", ">", "=", "4gb" ], [ "system requirement installing dify , make sure machine meet following minimum system requirement : - cpu > = 2 core - ram > = 4gb" ] ]
[ [ "system", "requirement", "installing", "dify", ",", "make", "sure", "machine", "meet", "following", "minimum", "system", "requirement", ":", "-", "cpu", ">", "=", "2", "core", "-", "ram", ">", "=", "4gb" ], [ "system requirement installing dify , make sure machine meet following minimum system requirement : - cpu > = 2 core - ram > = 4gb" ] ]
System Requirements Before installing Dify, make sure your machine meets the following minimum system requirements: - CPU >= 2 Core - RAM >= 4GB
https://github.com/langgenius/dify
-1
[ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ]
https://raw.githubusercontent.com/langgenius/dify/main/README.md
[ [ "quick", "start", "easiest", "way", "start", "dify", "server", "run", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", ".", "running", "installation", "command", ",", "make", "sure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/get-docker/", ")", "[", "docker", "compose", "]", "(", "http", ":", "//docs.docker.com/compose/install/", ")", "installed", "machine", ":", "``", "`", "bash", "cd", "docker", "docker", "compose", "-d", "``", "`", "running", ",", "access", "dify", "dashboard", "browser", "[", "http", ":", "//localhost/install", "]", "(", "http", ":", "//localhost/install", ")", "start", "initialization", "installation", "process", "." ], [ "quick start easiest way start dify server run [ docker-compose.yml ] ( docker/docker-compose.yaml ) file .", "running installation command , make sure [ docker ] ( http : //docs.docker.com/get-docker/ ) [ docker compose ] ( http : //docs.docker.com/compose/install/ ) installed machine : `` ` bash cd docker docker compose -d `` ` running , access dify dashboard browser [ http : //localhost/install ] ( http : //localhost/install ) start initialization installation process ." ] ]
[ [ "quick", "start", "easiest", "way", "start", "dify", "server", "run", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", ".", "running", "installation", "command", ",", "make", "sure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/get-docker/", ")", "[", "docker", "compose", "]", "(", "http", ":", "//docs.docker.com/compose/install/", ")", "installed", "machine", ":", "``", "`", "bash", "cd", "docker", "docker", "compose", "-d", "``", "`", "running", ",", "access", "dify", "dashboard", "browser", "[", "http", ":", "//localhost/install", "]", "(", "http", ":", "//localhost/install", ")", "start", "initialization", "installation", "process", "." ], [ "quick start easiest way start dify server run [ docker-compose.yml ] ( docker/docker-compose.yaml ) file .", "running installation command , make sure [ docker ] ( http : //docs.docker.com/get-docker/ ) [ docker compose ] ( http : //docs.docker.com/compose/install/ ) installed machine : `` ` bash cd docker docker compose -d `` ` running , access dify dashboard browser [ http : //localhost/install ] ( http : //localhost/install ) start initialization installation process ." ] ]
Quick Start The easiest way to start the Dify server is to run our [docker-compose.yml](docker/docker-compose.yaml) file. Before running the installation command, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine: ```bash cd docker docker compose up -d ``` After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization installation process.
https://github.com/langgenius/dify
1
[ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ]
https://raw.githubusercontent.com/langgenius/dify/main/README.md
[ [ "configuration", "need", "customize", "configuration", ",", "please", "refer", "comment", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", "manually", "set", "environment", "configuration", ".", "making", "change", ",", "please", "run", "`", "docker-compose", "-d", "`", ".", "see", "full", "list", "environment", "variable", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted/environments", ")", "." ], [ "configuration need customize configuration , please refer comment [ docker-compose.yml ] ( docker/docker-compose.yaml ) file manually set environment configuration .", "making change , please run ` docker-compose -d ` .", "see full list environment variable [ doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted/environments ) ." ] ]
[ [ "configuration", "need", "customize", "configuration", ",", "please", "refer", "comment", "[", "docker-compose.yml", "]", "(", "docker/docker-compose.yaml", ")", "file", "manually", "set", "environment", "configuration", ".", "making", "change", ",", "please", "run", "`", "docker-compose", "-d", "`", ".", "see", "full", "list", "environment", "variable", "[", "doc", "]", "(", "http", ":", "//docs.dify.ai/getting-started/install-self-hosted/environments", ")", "." ], [ "configuration need customize configuration , please refer comment [ docker-compose.yml ] ( docker/docker-compose.yaml ) file manually set environment configuration .", "making change , please run ` docker-compose -d ` .", "see full list environment variable [ doc ] ( http : //docs.dify.ai/getting-started/install-self-hosted/environments ) ." ] ]
Configuration If you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run `docker-compose up -d` again. You can see the full list of environment variables in our [docs](https://docs.dify.ai/getting-started/install-self-hosted/environments).
https://github.com/langgenius/dify
-1
[ "ai", "backend-as-a-service", "gpt", "gpt-4", "langchain", "llama2", "llm", "openai", "orchestration", "python", "rag" ]
https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md
[ [ "环境安装", "首先需要下载本仓库:", "``", "`", "shell", "git", "clone", "http", ":", "//github.com/thudm/chatglm2-6b", "cd", "chatglm2-6b", "``", "`", "然后使用", "pip", "安装依赖:", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`", "其中", "`", "transformer", "`", "库版本推荐为", "`", "4.30.2", "`", ",", "`", "torch", "`", "推荐使用", "2.0", "及以上的版本,以获得最佳的推理性能。" ], [ "环境安装 首先需要下载本仓库: `` ` shell git clone http : //github.com/thudm/chatglm2-6b cd chatglm2-6b `` ` 然后使用 pip 安装依赖: `` ` pip install -r requirements.txt `` ` 其中 ` transformer ` 库版本推荐为 ` 4.30.2 ` , ` torch ` 推荐使用 2.0 及以上的版本,以获得最佳的推理性能。" ] ]
[ [ "环境安装", "首先需要下载本仓库:", "``", "`", "shell", "git", "clone", "http", ":", "//github.com/thudm/chatglm2-6b", "cd", "chatglm2-6b", "``", "`", "然后使用", "pip", "安装依赖:", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`", "其中", "`", "transformer", "`", "库版本推荐为", "`", "4.30.2", "`", ",", "`", "torch", "`", "推荐使用", "2.0", "及以上的版本,以获得最佳的推理性能。" ], [ "环境安装 首先需要下载本仓库: `` ` shell git clone http : //github.com/thudm/chatglm2-6b cd chatglm2-6b `` ` 然后使用 pip 安装依赖: `` ` pip install -r requirements.txt `` ` 其中 ` transformer ` 库版本推荐为 ` 4.30.2 ` , ` torch ` 推荐使用 2.0 及以上的版本,以获得最佳的推理性能。" ] ]
环境安装 首先需要下载本仓库: ```shell git clone https://github.com/THUDM/ChatGLM2-6B cd ChatGLM2-6B ``` 然后使用 pip 安装依赖: ``` pip install -r requirements.txt ``` 其中 `transformers` 库版本推荐为 `4.30.2`,`torch` 推荐使用 2.0 及以上的版本,以获得最佳的推理性能。
https://github.com/THUDM/ChatGLM2-6B
0
[ "chatglm", "chatglm-6b", "large-language-models", "llm" ]
https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md
[ [ "从本地加载模型", "以上代码会由", "`", "transformer", "`", "自动下载模型实现和参数。完整的模型实现在", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", ")", "。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。", "从", "hugging", "face", "hub", "下载模型需要先", "[", "安装git", "lf", "]", "(", "http", ":", "//docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage", ")", ",然后运行", "``", "`", "shell", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "如果你从", "hugging", "face", "hub", "上下载", "checkpoint", "的速度较慢,可以只下载模型实现", "``", "`", "shell", "git_lfs_skip_smudge=1", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "然后从", "[", "这里", "]", "(", "http", ":", "//cloud.tsinghua.edu.cn/d/674208019e314311ab5c/", ")", "手动下载模型参数文件,并将下载的文件替换到本地的", "`", "chatglm2-6b", "`", "目录下。", "将模型下载到本地之后,将以上代码中的", "`", "thudm/chatglm2-6b", "`", "替换为你本地的", "`", "chatglm2-6b", "`", "文件夹的路径,即可从本地加载模型。", "模型的实现仍然处在变动中。如果希望固定使用的模型实现以保证兼容性,可以在", "`", "from_pretrained", "`", "的调用中增加", "`", "revision=", "''", "v1.0", "''", "`", "参数。", "`", "v1.0", "`", "是当前最新的版本号,完整的版本列表参见", "[", "change", "log", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "#", "change-log", ")", "。" ], [ "从本地加载模型 以上代码会由 ` transformer ` 自动下载模型实现和参数。完整的模型实现在 [ hugging face hub ] ( http : //huggingface.co/thudm/chatglm2-6b ) 。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。 从 hugging face hub 下载模型需要先 [ 安装git lf ] ( http : //docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage ) ,然后运行 `` ` shell git clone http : //huggingface.co/thudm/chatglm2-6b `` ` 如果你从 hugging face hub 上下载 checkpoint 的速度较慢,可以只下载模型实现 `` ` shell git_lfs_skip_smudge=1 git clone http : //huggingface.co/thudm/chatglm2-6b `` ` 然后从 [ 这里 ] ( http : //cloud.tsinghua.edu.cn/d/674208019e314311ab5c/ ) 手动下载模型参数文件,并将下载的文件替换到本地的 ` chatglm2-6b ` 目录下。 将模型下载到本地之后,将以上代码中的 ` thudm/chatglm2-6b ` 替换为你本地的 ` chatglm2-6b ` 文件夹的路径,即可从本地加载模型。 模型的实现仍然处在变动中。如果希望固定使用的模型实现以保证兼容性,可以在 ` from_pretrained ` 的调用中增加 ` revision= '' v1.0 '' ` 参数。 ` v1.0 ` 是当前最新的版本号,完整的版本列表参见 [ change log ] ( http : //huggingface.co/thudm/chatglm2-6b # change-log ) 。" ] ]
[ [ "从本地加载模型", "以上代码会由", "`", "transformer", "`", "自动下载模型实现和参数。完整的模型实现在", "[", "hugging", "face", "hub", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", ")", "。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。", "从", "hugging", "face", "hub", "下载模型需要先", "[", "安装git", "lf", "]", "(", "http", ":", "//docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage", ")", ",然后运行", "``", "`", "shell", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "如果你从", "hugging", "face", "hub", "上下载", "checkpoint", "的速度较慢,可以只下载模型实现", "``", "`", "shell", "git_lfs_skip_smudge=1", "git", "clone", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "``", "`", "然后从", "[", "这里", "]", "(", "http", ":", "//cloud.tsinghua.edu.cn/d/674208019e314311ab5c/", ")", "手动下载模型参数文件,并将下载的文件替换到本地的", "`", "chatglm2-6b", "`", "目录下。", "将模型下载到本地之后,将以上代码中的", "`", "thudm/chatglm2-6b", "`", "替换为你本地的", "`", "chatglm2-6b", "`", "文件夹的路径,即可从本地加载模型。", "模型的实现仍然处在变动中。如果希望固定使用的模型实现以保证兼容性,可以在", "`", "from_pretrained", "`", "的调用中增加", "`", "revision=", "''", "v1.0", "''", "`", "参数。", "`", "v1.0", "`", "是当前最新的版本号,完整的版本列表参见", "[", "change", "log", "]", "(", "http", ":", "//huggingface.co/thudm/chatglm2-6b", "#", "change-log", ")", "。" ], [ "从本地加载模型 以上代码会由 ` transformer ` 自动下载模型实现和参数。完整的模型实现在 [ hugging face hub ] ( http : //huggingface.co/thudm/chatglm2-6b ) 。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。 从 hugging face hub 下载模型需要先 [ 安装git lf ] ( http : //docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage ) ,然后运行 `` ` shell git clone http : //huggingface.co/thudm/chatglm2-6b `` ` 如果你从 hugging face hub 上下载 checkpoint 的速度较慢,可以只下载模型实现 `` ` shell git_lfs_skip_smudge=1 git clone http : //huggingface.co/thudm/chatglm2-6b `` ` 然后从 [ 这里 ] ( http : //cloud.tsinghua.edu.cn/d/674208019e314311ab5c/ ) 手动下载模型参数文件,并将下载的文件替换到本地的 ` chatglm2-6b ` 目录下。 将模型下载到本地之后,将以上代码中的 ` thudm/chatglm2-6b ` 替换为你本地的 ` chatglm2-6b ` 文件夹的路径,即可从本地加载模型。 模型的实现仍然处在变动中。如果希望固定使用的模型实现以保证兼容性,可以在 ` from_pretrained ` 的调用中增加 ` revision= '' v1.0 '' ` 参数。 ` v1.0 ` 是当前最新的版本号,完整的版本列表参见 [ change log ] ( http : //huggingface.co/thudm/chatglm2-6b # change-log ) 。" ] ]
从本地加载模型 以上代码会由 `transformers` 自动下载模型实现和参数。完整的模型实现在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm2-6b)。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。 从 Hugging Face Hub 下载模型需要先[安装Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage),然后运行 ```Shell git clone https://huggingface.co/THUDM/chatglm2-6b ``` 如果你从 Hugging Face Hub 上下载 checkpoint 的速度较慢,可以只下载模型实现 ```Shell GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/THUDM/chatglm2-6b ``` 然后从[这里](https://cloud.tsinghua.edu.cn/d/674208019e314311ab5c/)手动下载模型参数文件,并将下载的文件替换到本地的 `chatglm2-6b` 目录下。 将模型下载到本地之后,将以上代码中的 `THUDM/chatglm2-6b` 替换为你本地的 `chatglm2-6b` 文件夹的路径,即可从本地加载模型。 模型的实现仍然处在变动中。如果希望固定使用的模型实现以保证兼容性,可以在 `from_pretrained` 的调用中增加 `revision="v1.0"` 参数。`v1.0` 是当前最新的版本号,完整的版本列表参见 [Change Log](https://huggingface.co/THUDM/chatglm2-6b#change-log)。
https://github.com/THUDM/ChatGLM2-6B
2
[ "chatglm", "chatglm-6b", "large-language-models", "llm" ]
https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md
[ [ "api", "部署", "首先需要安装额外的依赖", "`", "pip", "install", "fastapi", "uvicorn", "`", ",然后运行仓库中的", "[", "api.py", "]", "(", "api.py", ")", ":", "``", "`", "shell", "python", "api.py", "``", "`", "默认部署在本地的", "8000", "端口,通过", "post", "方法进行调用", "``", "`", "shell", "curl", "-x", "post", "``", "http", ":", "//127.0.0.1:8000", "''", "\\", "-h", "'content-type", ":", "application/json", "'", "\\", "-d", "'", "{", "``", "prompt", "''", ":", "``", "你好", "''", ",", "``", "history", "''", ":", "[", "]", "}", "'", "``", "`", "得到的返回值为", "``", "`", "shell", "{", "``", "response", "''", ":", "''", "你好👋!我是人工智能助手", "chatglm2-6b,很高兴见到你,欢迎问我任何问题。", "''", ",", "``", "history", "''", ":", "[", "[", "``", "你好", "''", ",", "''", "你好👋!我是人工智能助手", "chatglm2-6b,很高兴见到你,欢迎问我任何问题。", "''", "]", "]", ",", "``", "status", "''", ":200", ",", "``", "time", "''", ":", "''", "2023-03-23", "21:38:40", "''", "}", "``", "`", "感谢", "[", "@", "hiyouga", "]", "(", ")", "实现了", "openai", "格式的流式", "api", "部署,可以作为任意基于", "chatgpt", "的应用的后端,比如", "[", "chatgpt-next-web", "]", "(", "http", ":", "//github.com/yidadaa/chatgpt-next-web", ")", "。可以通过运行仓库中的", "[", "openai_api.py", "]", "(", "openai_api.py", ")", "进行部署:", "``", "`", "shell", "python", "openai_api.py", "``", "`", "进行", "api", "调用的示例代码为", "``", "`", "python", "import", "openai", "__name__", "==", "``", "__main__", "''", ":", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''", "chunk", "openai.chatcompletion.create", "(", "model=", "''", "chatglm2-6b", "''", ",", "messages=", "[", "{", "``", "role", "''", ":", "``", "user", "''", ",", "``", "content", "''", ":", "``", "你好", "''", "}", "]", ",", "stream=true", ")", ":", "hasattr", "(", "chunk.choices", "[", "0", "]", ".delta", ",", "``", "content", "''", ")", ":", "print", "(", "chunk.choices", "[", "0", "]", ".delta.content", ",", "end=", "''", "''", ",", "flush=true", ")", "``", "`" ], [ "api 部署 首先需要安装额外的依赖 ` pip install fastapi uvicorn ` ,然后运行仓库中的 [ api.py ] ( api.py ) : `` ` shell python api.py `` ` 默认部署在本地的 8000 端口,通过 post 方法进行调用 `` ` shell curl -x post `` http : //127.0.0.1:8000 '' \\ -h 'content-type : application/json ' \\ -d ' { `` prompt '' : `` 你好 '' , `` history '' : [ ] } ' `` ` 得到的返回值为 `` ` shell { `` response '' : '' 你好👋!我是人工智能助手 chatglm2-6b,很高兴见到你,欢迎问我任何问题。 '' , `` history '' : [ [ `` 你好 '' , '' 你好👋!我是人工智能助手 chatglm2-6b,很高兴见到你,欢迎问我任何问题。 '' ] ] , `` status '' :200 , `` time '' : '' 2023-03-23 21:38:40 '' } `` ` 感谢 [ @ hiyouga ] ( ) 实现了 openai 格式的流式 api 部署,可以作为任意基于 chatgpt 的应用的后端,比如 [ chatgpt-next-web ] ( http : //github.com/yidadaa/chatgpt-next-web ) 。可以通过运行仓库中的 [ openai_api.py ] ( openai_api.py ) 进行部署: `` ` shell python openai_api.py `` ` 进行 api 调用的示例代码为 `` ` python import openai __name__ == `` __main__ '' : openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none '' chunk openai.chatcompletion.create ( model= '' chatglm2-6b '' , messages= [ { `` role '' : `` user '' , `` content '' : `` 你好 '' } ] , stream=true ) : hasattr ( chunk.choices [ 0 ] .delta , `` content '' ) : print ( chunk.choices [ 0 ] .delta.content , end= '' '' , flush=true ) `` `" ] ]
[ [ "api", "部署", "首先需要安装额外的依赖", "`", "pip", "install", "fastapi", "uvicorn", "`", ",然后运行仓库中的", "[", "api.py", "]", "(", "api.py", ")", ":", "``", "`", "shell", "python", "api.py", "``", "`", "默认部署在本地的", "8000", "端口,通过", "post", "方法进行调用", "``", "`", "shell", "curl", "-x", "post", "``", "http", ":", "//127.0.0.1:8000", "''", "\\", "-h", "'content-type", ":", "application/json", "'", "\\", "-d", "'", "{", "``", "prompt", "''", ":", "``", "你好", "''", ",", "``", "history", "''", ":", "[", "]", "}", "'", "``", "`", "得到的返回值为", "``", "`", "shell", "{", "``", "response", "''", ":", "''", "你好👋!我是人工智能助手", "chatglm2-6b,很高兴见到你,欢迎问我任何问题。", "''", ",", "``", "history", "''", ":", "[", "[", "``", "你好", "''", ",", "''", "你好👋!我是人工智能助手", "chatglm2-6b,很高兴见到你,欢迎问我任何问题。", "''", "]", "]", ",", "``", "status", "''", ":200", ",", "``", "time", "''", ":", "''", "2023-03-23", "21:38:40", "''", "}", "``", "`", "感谢", "[", "@", "hiyouga", "]", "(", ")", "实现了", "openai", "格式的流式", "api", "部署,可以作为任意基于", "chatgpt", "的应用的后端,比如", "[", "chatgpt-next-web", "]", "(", "http", ":", "//github.com/yidadaa/chatgpt-next-web", ")", "。可以通过运行仓库中的", "[", "openai_api.py", "]", "(", "openai_api.py", ")", "进行部署:", "``", "`", "shell", "python", "openai_api.py", "``", "`", "进行", "api", "调用的示例代码为", "``", "`", "python", "import", "openai", "__name__", "==", "``", "__main__", "''", ":", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''", "chunk", "openai.chatcompletion.create", "(", "model=", "''", "chatglm2-6b", "''", ",", "messages=", "[", "{", "``", "role", "''", ":", "``", "user", "''", ",", "``", "content", "''", ":", "``", "你好", "''", "}", "]", ",", "stream=true", ")", ":", "hasattr", "(", "chunk.choices", "[", "0", "]", ".delta", ",", "``", "content", "''", ")", ":", "print", "(", "chunk.choices", "[", "0", "]", ".delta.content", ",", "end=", "''", "''", ",", "flush=true", ")", "``", "`" ], [ "api 部署 首先需要安装额外的依赖 ` pip install fastapi uvicorn ` ,然后运行仓库中的 [ api.py ] ( api.py ) : `` ` shell python api.py `` ` 默认部署在本地的 8000 端口,通过 post 方法进行调用 `` ` shell curl -x post `` http : //127.0.0.1:8000 '' \\ -h 'content-type : application/json ' \\ -d ' { `` prompt '' : `` 你好 '' , `` history '' : [ ] } ' `` ` 得到的返回值为 `` ` shell { `` response '' : '' 你好👋!我是人工智能助手 chatglm2-6b,很高兴见到你,欢迎问我任何问题。 '' , `` history '' : [ [ `` 你好 '' , '' 你好👋!我是人工智能助手 chatglm2-6b,很高兴见到你,欢迎问我任何问题。 '' ] ] , `` status '' :200 , `` time '' : '' 2023-03-23 21:38:40 '' } `` ` 感谢 [ @ hiyouga ] ( ) 实现了 openai 格式的流式 api 部署,可以作为任意基于 chatgpt 的应用的后端,比如 [ chatgpt-next-web ] ( http : //github.com/yidadaa/chatgpt-next-web ) 。可以通过运行仓库中的 [ openai_api.py ] ( openai_api.py ) 进行部署: `` ` shell python openai_api.py `` ` 进行 api 调用的示例代码为 `` ` python import openai __name__ == `` __main__ '' : openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none '' chunk openai.chatcompletion.create ( model= '' chatglm2-6b '' , messages= [ { `` role '' : `` user '' , `` content '' : `` 你好 '' } ] , stream=true ) : hasattr ( chunk.choices [ 0 ] .delta , `` content '' ) : print ( chunk.choices [ 0 ] .delta.content , end= '' '' , flush=true ) `` `" ] ]
API 部署 首先需要安装额外的依赖 `pip install fastapi uvicorn`,然后运行仓库中的 [api.py](api.py): ```shell python api.py ``` 默认部署在本地的 8000 端口,通过 POST 方法进行调用 ```shell curl -X POST "http://127.0.0.1:8000" \ -H 'Content-Type: application/json' \ -d '{"prompt": "你好", "history": []}' ``` 得到的返回值为 ```shell { "response":"你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。", "history":[["你好","你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。"]], "status":200, "time":"2023-03-23 21:38:40" } ``` 感谢 [@hiyouga]() 实现了 OpenAI 格式的流式 API 部署,可以作为任意基于 ChatGPT 的应用的后端,比如 [ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web)。可以通过运行仓库中的[openai_api.py](openai_api.py) 进行部署: ```shell python openai_api.py ``` 进行 API 调用的示例代码为 ```python import openai if __name__ == "__main__": openai.api_base = "http://localhost:8000/v1" openai.api_key = "none" for chunk in openai.ChatCompletion.create( model="chatglm2-6b", messages=[ {"role": "user", "content": "你好"} ], stream=True ): if hasattr(chunk.choices[0].delta, "content"): print(chunk.choices[0].delta.content, end="", flush=True) ```
https://github.com/THUDM/ChatGLM2-6B
0
[ "chatglm", "chatglm-6b", "large-language-models", "llm" ]
https://raw.githubusercontent.com/THUDM/ChatGLM2-6B/main/README.md
[ [ "多卡部署", "如果你有多张", "gpu,但是每张", "gpu", "的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张gpu上。首先安装", "accelerate", ":", "`", "pip", "install", "accelerate", "`", ",然后通过如下方法加载模型:", "``", "`", "python", "utils", "import", "load_model_on_gpus", "model", "=", "load_model_on_gpus", "(", "``", "thudm/chatglm2-6b", "''", ",", "num_gpus=2", ")", "``", "`", "即可将模型部署到两张", "gpu", "上进行推理。你可以将", "`", "num_gpus", "`", "改为你希望使用的", "gpu", "数。默认是均匀切分的,你也可以传入", "`", "device_map", "`", "参数来自己指定。" ], [ "多卡部署 如果你有多张 gpu,但是每张 gpu 的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张gpu上。首先安装 accelerate : ` pip install accelerate ` ,然后通过如下方法加载模型: `` ` python utils import load_model_on_gpus model = load_model_on_gpus ( `` thudm/chatglm2-6b '' , num_gpus=2 ) `` ` 即可将模型部署到两张 gpu 上进行推理。你可以将 ` num_gpus ` 改为你希望使用的 gpu 数。默认是均匀切分的,你也可以传入 ` device_map ` 参数来自己指定。" ] ]
[ [ "多卡部署", "如果你有多张", "gpu,但是每张", "gpu", "的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张gpu上。首先安装", "accelerate", ":", "`", "pip", "install", "accelerate", "`", ",然后通过如下方法加载模型:", "``", "`", "python", "utils", "import", "load_model_on_gpus", "model", "=", "load_model_on_gpus", "(", "``", "thudm/chatglm2-6b", "''", ",", "num_gpus=2", ")", "``", "`", "即可将模型部署到两张", "gpu", "上进行推理。你可以将", "`", "num_gpus", "`", "改为你希望使用的", "gpu", "数。默认是均匀切分的,你也可以传入", "`", "device_map", "`", "参数来自己指定。" ], [ "多卡部署 如果你有多张 gpu,但是每张 gpu 的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张gpu上。首先安装 accelerate : ` pip install accelerate ` ,然后通过如下方法加载模型: `` ` python utils import load_model_on_gpus model = load_model_on_gpus ( `` thudm/chatglm2-6b '' , num_gpus=2 ) `` ` 即可将模型部署到两张 gpu 上进行推理。你可以将 ` num_gpus ` 改为你希望使用的 gpu 数。默认是均匀切分的,你也可以传入 ` device_map ` 参数来自己指定。" ] ]
多卡部署 如果你有多张 GPU,但是每张 GPU 的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张GPU上。首先安装 accelerate: `pip install accelerate`,然后通过如下方法加载模型: ```python from utils import load_model_on_gpus model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2) ``` 即可将模型部署到两张 GPU 上进行推理。你可以将 `num_gpus` 改为你希望使用的 GPU 数。默认是均匀切分的,你也可以传入 `device_map` 参数来自己指定。
https://github.com/THUDM/ChatGLM2-6B
0
[ "chatglm", "chatglm-6b", "large-language-models", "llm" ]
https://raw.githubusercontent.com/vllm-project/vllm/main/README.md
[ [ "vllm", "fast", "easy-to-use", "library", "llm", "inference", "serving", ".", "vllm", "fast", ":", "-", "state-of-the-art", "serving", "throughput", "-", "efficient", "management", "attention", "key", "value", "memory", "*", "*", "pagedattention", "*", "*", "-", "continuous", "batching", "incoming", "request", "-", "fast", "model", "execution", "cuda/hip", "graph", "-", "quantization", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "[", "awq", "]", "(", "http", ":", "//arxiv.org/abs/2306.00978", ")", ",", "[", "squeezellm", "]", "(", "http", ":", "//arxiv.org/abs/2306.07629", ")", ",", "fp8", "kv", "cache", "-", "optimized", "cuda", "kernel", "vllm", "flexible", "easy", "use", ":", "-", "seamless", "integration", "popular", "hugging", "face", "model", "-", "high-throughput", "serving", "various", "decoding", "algorithm", ",", "including", "*", "parallel", "sampling", "*", ",", "*", "beam", "search", "*", ",", "-", "tensor", "parallelism", "support", "distributed", "inference", "-", "streaming", "output", "-", "openai-compatible", "api", "server", "-", "support", "nvidia", "gpus", "amd", "gpus", "-", "(", "experimental", ")", "prefix", "caching", "support", "-", "(", "experimental", ")", "multi-lora", "support", "vllm", "seamlessly", "support", "many", "hugging", "face", "model", ",", "including", "following", "architecture", ":", "-", "aquila", "&", "aquila2", "(", "`", "baai/aquilachat2-7b", "`", ",", "`", "baai/aquilachat2-34b", "`", ",", "`", "baai/aquila-7b", "`", ",", "`", "baai/aquilachat-7b", "`", ",", "etc", ".", ")", "-", "baichuan", "&", "baichuan2", "(", "`", "baichuan-inc/baichuan2-13b-chat", "`", ",", "`", "baichuan-inc/baichuan-7b", "`", ",", "etc", ".", ")", "-", "bloom", "(", "`", "bigscience/bloom", "`", ",", "`", "bigscience/bloomz", "`", ",", "etc", ".", ")", "-", "chatglm", "(", "`", "thudm/chatglm2-6b", "`", ",", "`", "thudm/chatglm3-6b", "`", ",", "etc", ".", ")", "-", "decilm", "(", "`", "deci/decilm-7b", "`", ",", "`", "deci/decilm-7b-instruct", "`", ",", "etc", ".", ")", "-", "falcon", "(", "`", "tiiuae/falcon-7b", "`", ",", "`", "tiiuae/falcon-40b", "`", ",", "`", "tiiuae/falcon-rw-7b", "`", ",", "etc", ".", ")", "-", "gpt-2", "(", "`", "gpt2", "`", ",", "`", "gpt2-xl", "`", ",", "etc", ".", ")", "-", "gpt", "bigcode", "(", "`", "bigcode/starcoder", "`", ",", "`", "bigcode/gpt_bigcode-santacoder", "`", ",", "etc", ".", ")", "-", "gpt-j", "(", "`", "eleutherai/gpt-j-6b", "`", ",", "`", "nomic-ai/gpt4all-j", "`", ",", "etc", ".", ")", "-", "gpt-neox", "(", "`", "eleutherai/gpt-neox-20b", "`", ",", "`", "databricks/dolly-v2-12b", "`", ",", "`", "stabilityai/stablelm-tuned-alpha-7b", "`", ",", "etc", ".", ")", "-", "internlm", "(", "`", "internlm/internlm-7b", "`", ",", "`", "internlm/internlm-chat-7b", "`", ",", "etc", ".", ")", "-", "internlm2", "(", "`", "internlm/internlm2-7b", "`", ",", "`", "internlm/internlm2-chat-7b", "`", ",", "etc", ".", ")", "-", "llama", "&", "llama-2", "(", "`", "meta-llama/llama-2-70b-hf", "`", ",", "`", "lmsys/vicuna-13b-v1.3", "`", ",", "`", "young-geng/koala", "`", ",", "`", "openlm-research/open_llama_13b", "`", ",", "etc", ".", ")", "-", "mistral", "(", "`", "mistralai/mistral-7b-v0.1", "`", ",", "`", "mistralai/mistral-7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mixtral", "(", "`", "mistralai/mixtral-8x7b-v0.1", "`", ",", "`", "mistralai/mixtral-8x7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mpt", "(", "`", "mosaicml/mpt-7b", "`", ",", "`", "mosaicml/mpt-30b", "`", ",", "etc", ".", ")", "-", "opt", "(", "`", "facebook/opt-66b", "`", ",", "`", "facebook/opt-iml-max-30b", "`", ",", "etc", ".", ")", "-", "phi", "(", "`", "microsoft/phi-1_5", "`", ",", "`", "microsoft/phi-2", "`", ",", "etc", ".", ")", "-", "qwen", "(", "`", "qwen/qwen-7b", "`", ",", "`", "qwen/qwen-7b-chat", "`", ",", "etc", ".", ")", "-", "qwen2", "(", "`", "qwen/qwen2-7b-beta", "`", ",", "`", "qwen/qwen-7b-chat-beta", "`", ",", "etc", ".", ")", "-", "stablelm", "(", "`", "stabilityai/stablelm-3b-4e1t", "`", ",", "`", "stabilityai/stablelm-base-alpha-7b-v2", "`", ",", "etc", ".", ")", "-", "yi", "(", "`", "01-ai/yi-6b", "`", ",", "`", "01-ai/yi-34b", "`", ",", "etc", ".", ")", "install", "vllm", "pip", "[", "source", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", "#", "build-from-source", ")", ":", "``", "`", "bash", "pip", "install", "vllm", "``", "`" ], [ "vllm fast easy-to-use library llm inference serving .", "vllm fast : - state-of-the-art serving throughput - efficient management attention key value memory * * pagedattention * * - continuous batching incoming request - fast model execution cuda/hip graph - quantization : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , [ awq ] ( http : //arxiv.org/abs/2306.00978 ) , [ squeezellm ] ( http : //arxiv.org/abs/2306.07629 ) , fp8 kv cache - optimized cuda kernel vllm flexible easy use : - seamless integration popular hugging face model - high-throughput serving various decoding algorithm , including * parallel sampling * , * beam search * , - tensor parallelism support distributed inference - streaming output - openai-compatible api server - support nvidia gpus amd gpus - ( experimental ) prefix caching support - ( experimental ) multi-lora support vllm seamlessly support many hugging face model , including following architecture : - aquila & aquila2 ( ` baai/aquilachat2-7b ` , ` baai/aquilachat2-34b ` , ` baai/aquila-7b ` , ` baai/aquilachat-7b ` , etc . )", "- baichuan & baichuan2 ( ` baichuan-inc/baichuan2-13b-chat ` , ` baichuan-inc/baichuan-7b ` , etc . )", "- bloom ( ` bigscience/bloom ` , ` bigscience/bloomz ` , etc . )", "- chatglm ( ` thudm/chatglm2-6b ` , ` thudm/chatglm3-6b ` , etc . )", "- decilm ( ` deci/decilm-7b ` , ` deci/decilm-7b-instruct ` , etc . )", "- falcon ( ` tiiuae/falcon-7b ` , ` tiiuae/falcon-40b ` , ` tiiuae/falcon-rw-7b ` , etc . )", "- gpt-2 ( ` gpt2 ` , ` gpt2-xl ` , etc . )", "- gpt bigcode ( ` bigcode/starcoder ` , ` bigcode/gpt_bigcode-santacoder ` , etc . )", "- gpt-j ( ` eleutherai/gpt-j-6b ` , ` nomic-ai/gpt4all-j ` , etc . )", "- gpt-neox ( ` eleutherai/gpt-neox-20b ` , ` databricks/dolly-v2-12b ` , ` stabilityai/stablelm-tuned-alpha-7b ` , etc . )", "- internlm ( ` internlm/internlm-7b ` , ` internlm/internlm-chat-7b ` , etc . )", "- internlm2 ( ` internlm/internlm2-7b ` , ` internlm/internlm2-chat-7b ` , etc . )", "- llama & llama-2 ( ` meta-llama/llama-2-70b-hf ` , ` lmsys/vicuna-13b-v1.3 ` , ` young-geng/koala ` , ` openlm-research/open_llama_13b ` , etc . )", "- mistral ( ` mistralai/mistral-7b-v0.1 ` , ` mistralai/mistral-7b-instruct-v0.1 ` , etc . )", "- mixtral ( ` mistralai/mixtral-8x7b-v0.1 ` , ` mistralai/mixtral-8x7b-instruct-v0.1 ` , etc . )", "- mpt ( ` mosaicml/mpt-7b ` , ` mosaicml/mpt-30b ` , etc . )", "- opt ( ` facebook/opt-66b ` , ` facebook/opt-iml-max-30b ` , etc . )", "- phi ( ` microsoft/phi-1_5 ` , ` microsoft/phi-2 ` , etc . )", "- qwen ( ` qwen/qwen-7b ` , ` qwen/qwen-7b-chat ` , etc . )", "- qwen2 ( ` qwen/qwen2-7b-beta ` , ` qwen/qwen-7b-chat-beta ` , etc . )", "- stablelm ( ` stabilityai/stablelm-3b-4e1t ` , ` stabilityai/stablelm-base-alpha-7b-v2 ` , etc . )", "- yi ( ` 01-ai/yi-6b ` , ` 01-ai/yi-34b ` , etc . )", "install vllm pip [ source ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html # build-from-source ) : `` ` bash pip install vllm `` `" ] ]
[ [ "vllm", "fast", "easy-to-use", "library", "llm", "inference", "serving", ".", "vllm", "fast", ":", "-", "state-of-the-art", "serving", "throughput", "-", "efficient", "management", "attention", "key", "value", "memory", "*", "*", "pagedattention", "*", "*", "-", "continuous", "batching", "incoming", "request", "-", "fast", "model", "execution", "cuda/hip", "graph", "-", "quantization", ":", "[", "gptq", "]", "(", "http", ":", "//arxiv.org/abs/2210.17323", ")", ",", "[", "awq", "]", "(", "http", ":", "//arxiv.org/abs/2306.00978", ")", ",", "[", "squeezellm", "]", "(", "http", ":", "//arxiv.org/abs/2306.07629", ")", ",", "fp8", "kv", "cache", "-", "optimized", "cuda", "kernel", "vllm", "flexible", "easy", "use", ":", "-", "seamless", "integration", "popular", "hugging", "face", "model", "-", "high-throughput", "serving", "various", "decoding", "algorithm", ",", "including", "*", "parallel", "sampling", "*", ",", "*", "beam", "search", "*", ",", "-", "tensor", "parallelism", "support", "distributed", "inference", "-", "streaming", "output", "-", "openai-compatible", "api", "server", "-", "support", "nvidia", "gpus", "amd", "gpus", "-", "(", "experimental", ")", "prefix", "caching", "support", "-", "(", "experimental", ")", "multi-lora", "support", "vllm", "seamlessly", "support", "many", "hugging", "face", "model", ",", "including", "following", "architecture", ":", "-", "aquila", "&", "aquila2", "(", "`", "baai/aquilachat2-7b", "`", ",", "`", "baai/aquilachat2-34b", "`", ",", "`", "baai/aquila-7b", "`", ",", "`", "baai/aquilachat-7b", "`", ",", "etc", ".", ")", "-", "baichuan", "&", "baichuan2", "(", "`", "baichuan-inc/baichuan2-13b-chat", "`", ",", "`", "baichuan-inc/baichuan-7b", "`", ",", "etc", ".", ")", "-", "bloom", "(", "`", "bigscience/bloom", "`", ",", "`", "bigscience/bloomz", "`", ",", "etc", ".", ")", "-", "chatglm", "(", "`", "thudm/chatglm2-6b", "`", ",", "`", "thudm/chatglm3-6b", "`", ",", "etc", ".", ")", "-", "decilm", "(", "`", "deci/decilm-7b", "`", ",", "`", "deci/decilm-7b-instruct", "`", ",", "etc", ".", ")", "-", "falcon", "(", "`", "tiiuae/falcon-7b", "`", ",", "`", "tiiuae/falcon-40b", "`", ",", "`", "tiiuae/falcon-rw-7b", "`", ",", "etc", ".", ")", "-", "gpt-2", "(", "`", "gpt2", "`", ",", "`", "gpt2-xl", "`", ",", "etc", ".", ")", "-", "gpt", "bigcode", "(", "`", "bigcode/starcoder", "`", ",", "`", "bigcode/gpt_bigcode-santacoder", "`", ",", "etc", ".", ")", "-", "gpt-j", "(", "`", "eleutherai/gpt-j-6b", "`", ",", "`", "nomic-ai/gpt4all-j", "`", ",", "etc", ".", ")", "-", "gpt-neox", "(", "`", "eleutherai/gpt-neox-20b", "`", ",", "`", "databricks/dolly-v2-12b", "`", ",", "`", "stabilityai/stablelm-tuned-alpha-7b", "`", ",", "etc", ".", ")", "-", "internlm", "(", "`", "internlm/internlm-7b", "`", ",", "`", "internlm/internlm-chat-7b", "`", ",", "etc", ".", ")", "-", "internlm2", "(", "`", "internlm/internlm2-7b", "`", ",", "`", "internlm/internlm2-chat-7b", "`", ",", "etc", ".", ")", "-", "llama", "&", "llama-2", "(", "`", "meta-llama/llama-2-70b-hf", "`", ",", "`", "lmsys/vicuna-13b-v1.3", "`", ",", "`", "young-geng/koala", "`", ",", "`", "openlm-research/open_llama_13b", "`", ",", "etc", ".", ")", "-", "mistral", "(", "`", "mistralai/mistral-7b-v0.1", "`", ",", "`", "mistralai/mistral-7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mixtral", "(", "`", "mistralai/mixtral-8x7b-v0.1", "`", ",", "`", "mistralai/mixtral-8x7b-instruct-v0.1", "`", ",", "etc", ".", ")", "-", "mpt", "(", "`", "mosaicml/mpt-7b", "`", ",", "`", "mosaicml/mpt-30b", "`", ",", "etc", ".", ")", "-", "opt", "(", "`", "facebook/opt-66b", "`", ",", "`", "facebook/opt-iml-max-30b", "`", ",", "etc", ".", ")", "-", "phi", "(", "`", "microsoft/phi-1_5", "`", ",", "`", "microsoft/phi-2", "`", ",", "etc", ".", ")", "-", "qwen", "(", "`", "qwen/qwen-7b", "`", ",", "`", "qwen/qwen-7b-chat", "`", ",", "etc", ".", ")", "-", "qwen2", "(", "`", "qwen/qwen2-7b-beta", "`", ",", "`", "qwen/qwen-7b-chat-beta", "`", ",", "etc", ".", ")", "-", "stablelm", "(", "`", "stabilityai/stablelm-3b-4e1t", "`", ",", "`", "stabilityai/stablelm-base-alpha-7b-v2", "`", ",", "etc", ".", ")", "-", "yi", "(", "`", "01-ai/yi-6b", "`", ",", "`", "01-ai/yi-34b", "`", ",", "etc", ".", ")", "install", "vllm", "pip", "[", "source", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", "#", "build-from-source", ")", ":", "``", "`", "bash", "pip", "install", "vllm", "``", "`" ], [ "vllm fast easy-to-use library llm inference serving .", "vllm fast : - state-of-the-art serving throughput - efficient management attention key value memory * * pagedattention * * - continuous batching incoming request - fast model execution cuda/hip graph - quantization : [ gptq ] ( http : //arxiv.org/abs/2210.17323 ) , [ awq ] ( http : //arxiv.org/abs/2306.00978 ) , [ squeezellm ] ( http : //arxiv.org/abs/2306.07629 ) , fp8 kv cache - optimized cuda kernel vllm flexible easy use : - seamless integration popular hugging face model - high-throughput serving various decoding algorithm , including * parallel sampling * , * beam search * , - tensor parallelism support distributed inference - streaming output - openai-compatible api server - support nvidia gpus amd gpus - ( experimental ) prefix caching support - ( experimental ) multi-lora support vllm seamlessly support many hugging face model , including following architecture : - aquila & aquila2 ( ` baai/aquilachat2-7b ` , ` baai/aquilachat2-34b ` , ` baai/aquila-7b ` , ` baai/aquilachat-7b ` , etc . )", "- baichuan & baichuan2 ( ` baichuan-inc/baichuan2-13b-chat ` , ` baichuan-inc/baichuan-7b ` , etc . )", "- bloom ( ` bigscience/bloom ` , ` bigscience/bloomz ` , etc . )", "- chatglm ( ` thudm/chatglm2-6b ` , ` thudm/chatglm3-6b ` , etc . )", "- decilm ( ` deci/decilm-7b ` , ` deci/decilm-7b-instruct ` , etc . )", "- falcon ( ` tiiuae/falcon-7b ` , ` tiiuae/falcon-40b ` , ` tiiuae/falcon-rw-7b ` , etc . )", "- gpt-2 ( ` gpt2 ` , ` gpt2-xl ` , etc . )", "- gpt bigcode ( ` bigcode/starcoder ` , ` bigcode/gpt_bigcode-santacoder ` , etc . )", "- gpt-j ( ` eleutherai/gpt-j-6b ` , ` nomic-ai/gpt4all-j ` , etc . )", "- gpt-neox ( ` eleutherai/gpt-neox-20b ` , ` databricks/dolly-v2-12b ` , ` stabilityai/stablelm-tuned-alpha-7b ` , etc . )", "- internlm ( ` internlm/internlm-7b ` , ` internlm/internlm-chat-7b ` , etc . )", "- internlm2 ( ` internlm/internlm2-7b ` , ` internlm/internlm2-chat-7b ` , etc . )", "- llama & llama-2 ( ` meta-llama/llama-2-70b-hf ` , ` lmsys/vicuna-13b-v1.3 ` , ` young-geng/koala ` , ` openlm-research/open_llama_13b ` , etc . )", "- mistral ( ` mistralai/mistral-7b-v0.1 ` , ` mistralai/mistral-7b-instruct-v0.1 ` , etc . )", "- mixtral ( ` mistralai/mixtral-8x7b-v0.1 ` , ` mistralai/mixtral-8x7b-instruct-v0.1 ` , etc . )", "- mpt ( ` mosaicml/mpt-7b ` , ` mosaicml/mpt-30b ` , etc . )", "- opt ( ` facebook/opt-66b ` , ` facebook/opt-iml-max-30b ` , etc . )", "- phi ( ` microsoft/phi-1_5 ` , ` microsoft/phi-2 ` , etc . )", "- qwen ( ` qwen/qwen-7b ` , ` qwen/qwen-7b-chat ` , etc . )", "- qwen2 ( ` qwen/qwen2-7b-beta ` , ` qwen/qwen-7b-chat-beta ` , etc . )", "- stablelm ( ` stabilityai/stablelm-3b-4e1t ` , ` stabilityai/stablelm-base-alpha-7b-v2 ` , etc . )", "- yi ( ` 01-ai/yi-6b ` , ` 01-ai/yi-34b ` , etc . )", "install vllm pip [ source ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html # build-from-source ) : `` ` bash pip install vllm `` `" ] ]
About vLLM is a fast and easy-to-use library for LLM inference and serving. vLLM is fast with: - State-of-the-art serving throughput - Efficient management of attention key and value memory with **PagedAttention** - Continuous batching of incoming requests - Fast model execution with CUDA/HIP graph - Quantization: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [SqueezeLLM](https://arxiv.org/abs/2306.07629), FP8 KV Cache - Optimized CUDA kernels vLLM is flexible and easy to use with: - Seamless integration with popular Hugging Face models - High-throughput serving with various decoding algorithms, including *parallel sampling*, *beam search*, and more - Tensor parallelism support for distributed inference - Streaming outputs - OpenAI-compatible API server - Support NVIDIA GPUs and AMD GPUs - (Experimental) Prefix caching support - (Experimental) Multi-lora support vLLM seamlessly supports many Hugging Face models, including the following architectures: - Aquila & Aquila2 (`BAAI/AquilaChat2-7B`, `BAAI/AquilaChat2-34B`, `BAAI/Aquila-7B`, `BAAI/AquilaChat-7B`, etc.) - Baichuan & Baichuan2 (`baichuan-inc/Baichuan2-13B-Chat`, `baichuan-inc/Baichuan-7B`, etc.) - BLOOM (`bigscience/bloom`, `bigscience/bloomz`, etc.) - ChatGLM (`THUDM/chatglm2-6b`, `THUDM/chatglm3-6b`, etc.) - DeciLM (`Deci/DeciLM-7B`, `Deci/DeciLM-7B-instruct`, etc.) - Falcon (`tiiuae/falcon-7b`, `tiiuae/falcon-40b`, `tiiuae/falcon-rw-7b`, etc.) - GPT-2 (`gpt2`, `gpt2-xl`, etc.) - GPT BigCode (`bigcode/starcoder`, `bigcode/gpt_bigcode-santacoder`, etc.) - GPT-J (`EleutherAI/gpt-j-6b`, `nomic-ai/gpt4all-j`, etc.) - GPT-NeoX (`EleutherAI/gpt-neox-20b`, `databricks/dolly-v2-12b`, `stabilityai/stablelm-tuned-alpha-7b`, etc.) - InternLM (`internlm/internlm-7b`, `internlm/internlm-chat-7b`, etc.) - InternLM2 (`internlm/internlm2-7b`, `internlm/internlm2-chat-7b`, etc.) - LLaMA & LLaMA-2 (`meta-llama/Llama-2-70b-hf`, `lmsys/vicuna-13b-v1.3`, `young-geng/koala`, `openlm-research/open_llama_13b`, etc.) - Mistral (`mistralai/Mistral-7B-v0.1`, `mistralai/Mistral-7B-Instruct-v0.1`, etc.) - Mixtral (`mistralai/Mixtral-8x7B-v0.1`, `mistralai/Mixtral-8x7B-Instruct-v0.1`, etc.) - MPT (`mosaicml/mpt-7b`, `mosaicml/mpt-30b`, etc.) - OPT (`facebook/opt-66b`, `facebook/opt-iml-max-30b`, etc.) - Phi (`microsoft/phi-1_5`, `microsoft/phi-2`, etc.) - Qwen (`Qwen/Qwen-7B`, `Qwen/Qwen-7B-Chat`, etc.) - Qwen2 (`Qwen/Qwen2-7B-beta`, `Qwen/Qwen-7B-Chat-beta`, etc.) - StableLM(`stabilityai/stablelm-3b-4e1t`, `stabilityai/stablelm-base-alpha-7b-v2`, etc.) - Yi (`01-ai/Yi-6B`, `01-ai/Yi-34B`, etc.) Install vLLM with pip or [from source](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source): ```bash pip install vllm ```
https://github.com/vllm-project/vllm
0
[ "gpt", "inference", "llama", "llm", "llm-serving", "llmops", "mlops", "model-serving", "pytorch", "transformer" ]
https://raw.githubusercontent.com/vllm-project/vllm/main/README.md
[ [ "getting", "started", "visit", "[", "documentation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/", ")", "get", "started", ".", "-", "[", "installation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", ")", "-", "[", "quickstart", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/quickstart.html", ")", "-", "[", "supported", "model", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/models/supported_models.html", ")" ], [ "getting started visit [ documentation ] ( http : //vllm.readthedocs.io/en/latest/ ) get started .", "- [ installation ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html ) - [ quickstart ] ( http : //vllm.readthedocs.io/en/latest/getting_started/quickstart.html ) - [ supported model ] ( http : //vllm.readthedocs.io/en/latest/models/supported_models.html )" ] ]
[ [ "getting", "started", "visit", "[", "documentation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/", ")", "get", "started", ".", "-", "[", "installation", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/installation.html", ")", "-", "[", "quickstart", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/getting_started/quickstart.html", ")", "-", "[", "supported", "model", "]", "(", "http", ":", "//vllm.readthedocs.io/en/latest/models/supported_models.html", ")" ], [ "getting started visit [ documentation ] ( http : //vllm.readthedocs.io/en/latest/ ) get started .", "- [ installation ] ( http : //vllm.readthedocs.io/en/latest/getting_started/installation.html ) - [ quickstart ] ( http : //vllm.readthedocs.io/en/latest/getting_started/quickstart.html ) - [ supported model ] ( http : //vllm.readthedocs.io/en/latest/models/supported_models.html )" ] ]
Getting Started Visit our [documentation](https://vllm.readthedocs.io/en/latest/) to get started. - [Installation](https://vllm.readthedocs.io/en/latest/getting_started/installation.html) - [Quickstart](https://vllm.readthedocs.io/en/latest/getting_started/quickstart.html) - [Supported Models](https://vllm.readthedocs.io/en/latest/models/supported_models.html)
https://github.com/vllm-project/vllm
-1
[ "gpt", "inference", "llama", "llm", "llm-serving", "llmops", "mlops", "model-serving", "pytorch", "transformer" ]
https://raw.githubusercontent.com/vllm-project/vllm/main/README.md
[ [ "contributing", "welcome", "value", "contribution", "collaboration", ".", "please", "check", "[", "contributing.md", "]", "(", "./contributing.md", ")", "get", "involved", "." ], [ "contributing welcome value contribution collaboration .", "please check [ contributing.md ] ( ./contributing.md ) get involved ." ] ]
[ [ "contributing", "welcome", "value", "contribution", "collaboration", ".", "please", "check", "[", "contributing.md", "]", "(", "./contributing.md", ")", "get", "involved", "." ], [ "contributing welcome value contribution collaboration .", "please check [ contributing.md ] ( ./contributing.md ) get involved ." ] ]
Contributing We welcome and value any contributions and collaborations. Please check out [CONTRIBUTING.md](./CONTRIBUTING.md) for how to get involved.
https://github.com/vllm-project/vllm
-1
[ "gpt", "inference", "llama", "llm", "llm-serving", "llmops", "mlops", "model-serving", "pytorch", "transformer" ]
https://raw.githubusercontent.com/huggingface/peft/main/README.md
[ [ "<", "!", "--", "-", "copyright", "2023", "huggingface", "team", ".", "right", "reserved", ".", "licensed", "apache", "license", ",", "version", "2.0", "(", "``", "license", "''", ")", ";", "may", "use", "file", "except", "compliance", "license", ".", "may", "obtain", "copy", "license", "http", ":", "//www.apache.org/licenses/license-2.0", "unless", "required", "applicable", "law", "agreed", "writing", ",", "software", "distributed", "license", "distributed", "``", "''", "basis", ",", "without", "warranty", "condition", "kind", ",", "either", "express", "implied", ".", "see", "license", "specific", "language", "governing", "permission", "limitation", "license", ".", "--", ">", "<", "h1", "align=", "''", "center", "''", ">", "<", "p", ">", "🤗", "peft", "<", "/p", ">", "<", "/h1", ">", "<", "h3", "align=", "''", "center", "''", ">", "<", "p", ">", "state-of-the-art", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "<", "/p", ">", "<", "/h3", ">", "fine-tuning", "large", "pretrained", "model", "often", "prohibitively", "costly", "due", "scale", ".", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "enable", "efficient", "adaptation", "large", "pretrained", "model", "various", "downstream", "application", "fine-tuning", "small", "number", "(", "extra", ")", "model", "parameter", "instead", "model", "'s", "parameter", ".", "significantly", "decrease", "computational", "storage", "cost", ".", "recent", "state-of-the-art", "peft", "technique", "achieve", "performance", "comparable", "fully", "fine-tuned", "model", ".", "peft", "integrated", "transformer", "easy", "model", "training", "inference", ",", "diffuser", "conveniently", "managing", "different", "adapter", ",", "accelerate", "distributed", "training", "inference", "really", "big", "model", ".", ">", "[", "!", "tip", "]", ">", "visit", "[", "peft", "]", "(", "http", ":", "//huggingface.co/peft", ")", "organization", "read", "peft", "method", "implemented", "library", "see", "notebook", "demonstrating", "apply", "method", "variety", "downstream", "task", ".", "click", "``", "watch", "repos", "''", "button", "organization", "page", "notified", "newly", "implemented", "method", "notebook", "!", "check", "peft", "adapter", "api", "reference", "section", "list", "supported", "peft", "method", ",", "read", "[", "adapter", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/adapter", ")", ",", "[", "soft", "prompt", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/prompting", ")", ",", "[", "ia3", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/ia3", ")", "conceptual", "guide", "learn", "method", "work", "." ], [ "< ! -- - copyright 2023 huggingface team .", "right reserved .", "licensed apache license , version 2.0 ( `` license '' ) ; may use file except compliance license .", "may obtain copy license http : //www.apache.org/licenses/license-2.0 unless required applicable law agreed writing , software distributed license distributed `` '' basis , without warranty condition kind , either express implied .", "see license specific language governing permission limitation license .", "-- > < h1 align= '' center '' > < p > 🤗 peft < /p > < /h1 > < h3 align= '' center '' > < p > state-of-the-art parameter-efficient fine-tuning ( peft ) method < /p > < /h3 > fine-tuning large pretrained model often prohibitively costly due scale .", "parameter-efficient fine-tuning ( peft ) method enable efficient adaptation large pretrained model various downstream application fine-tuning small number ( extra ) model parameter instead model 's parameter .", "significantly decrease computational storage cost .", "recent state-of-the-art peft technique achieve performance comparable fully fine-tuned model .", "peft integrated transformer easy model training inference , diffuser conveniently managing different adapter , accelerate distributed training inference really big model .", "> [ ! tip ] > visit [ peft ] ( http : //huggingface.co/peft ) organization read peft method implemented library see notebook demonstrating apply method variety downstream task .", "click `` watch repos '' button organization page notified newly implemented method notebook !", "check peft adapter api reference section list supported peft method , read [ adapter ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/adapter ) , [ soft prompt ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/prompting ) , [ ia3 ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/ia3 ) conceptual guide learn method work ." ] ]
[ [ "<", "!", "--", "-", "copyright", "2023", "huggingface", "team", ".", "right", "reserved", ".", "licensed", "apache", "license", ",", "version", "2.0", "(", "``", "license", "''", ")", ";", "may", "use", "file", "except", "compliance", "license", ".", "may", "obtain", "copy", "license", "http", ":", "//www.apache.org/licenses/license-2.0", "unless", "required", "applicable", "law", "agreed", "writing", ",", "software", "distributed", "license", "distributed", "``", "''", "basis", ",", "without", "warranty", "condition", "kind", ",", "either", "express", "implied", ".", "see", "license", "specific", "language", "governing", "permission", "limitation", "license", ".", "--", ">", "<", "h1", "align=", "''", "center", "''", ">", "<", "p", ">", "🤗", "peft", "<", "/p", ">", "<", "/h1", ">", "<", "h3", "align=", "''", "center", "''", ">", "<", "p", ">", "state-of-the-art", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "<", "/p", ">", "<", "/h3", ">", "fine-tuning", "large", "pretrained", "model", "often", "prohibitively", "costly", "due", "scale", ".", "parameter-efficient", "fine-tuning", "(", "peft", ")", "method", "enable", "efficient", "adaptation", "large", "pretrained", "model", "various", "downstream", "application", "fine-tuning", "small", "number", "(", "extra", ")", "model", "parameter", "instead", "model", "'s", "parameter", ".", "significantly", "decrease", "computational", "storage", "cost", ".", "recent", "state-of-the-art", "peft", "technique", "achieve", "performance", "comparable", "fully", "fine-tuned", "model", ".", "peft", "integrated", "transformer", "easy", "model", "training", "inference", ",", "diffuser", "conveniently", "managing", "different", "adapter", ",", "accelerate", "distributed", "training", "inference", "really", "big", "model", ".", ">", "[", "!", "tip", "]", ">", "visit", "[", "peft", "]", "(", "http", ":", "//huggingface.co/peft", ")", "organization", "read", "peft", "method", "implemented", "library", "see", "notebook", "demonstrating", "apply", "method", "variety", "downstream", "task", ".", "click", "``", "watch", "repos", "''", "button", "organization", "page", "notified", "newly", "implemented", "method", "notebook", "!", "check", "peft", "adapter", "api", "reference", "section", "list", "supported", "peft", "method", ",", "read", "[", "adapter", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/adapter", ")", ",", "[", "soft", "prompt", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/prompting", ")", ",", "[", "ia3", "]", "(", "http", ":", "//huggingface.co/docs/peft/en/conceptual_guides/ia3", ")", "conceptual", "guide", "learn", "method", "work", "." ], [ "< ! -- - copyright 2023 huggingface team .", "right reserved .", "licensed apache license , version 2.0 ( `` license '' ) ; may use file except compliance license .", "may obtain copy license http : //www.apache.org/licenses/license-2.0 unless required applicable law agreed writing , software distributed license distributed `` '' basis , without warranty condition kind , either express implied .", "see license specific language governing permission limitation license .", "-- > < h1 align= '' center '' > < p > 🤗 peft < /p > < /h1 > < h3 align= '' center '' > < p > state-of-the-art parameter-efficient fine-tuning ( peft ) method < /p > < /h3 > fine-tuning large pretrained model often prohibitively costly due scale .", "parameter-efficient fine-tuning ( peft ) method enable efficient adaptation large pretrained model various downstream application fine-tuning small number ( extra ) model parameter instead model 's parameter .", "significantly decrease computational storage cost .", "recent state-of-the-art peft technique achieve performance comparable fully fine-tuned model .", "peft integrated transformer easy model training inference , diffuser conveniently managing different adapter , accelerate distributed training inference really big model .", "> [ ! tip ] > visit [ peft ] ( http : //huggingface.co/peft ) organization read peft method implemented library see notebook demonstrating apply method variety downstream task .", "click `` watch repos '' button organization page notified newly implemented method notebook !", "check peft adapter api reference section list supported peft method , read [ adapter ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/adapter ) , [ soft prompt ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/prompting ) , [ ia3 ] ( http : //huggingface.co/docs/peft/en/conceptual_guides/ia3 ) conceptual guide learn method work ." ] ]
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <h1 align="center"> <p>🤗 PEFT</p></h1> <h3 align="center"> <p>State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods</p> </h3> Fine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models. PEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models. > [!TIP] > Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the "Watch repos" button on the organization page to be notified of newly implemented methods and notebooks! Check the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work.
https://github.com/huggingface/peft
-1
[ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ]
https://raw.githubusercontent.com/huggingface/peft/main/README.md
[ [ "quickstart", "install", "peft", "pip", ":", "``", "`", "bash", "pip", "install", "peft", "``", "`", "prepare", "model", "training", "peft", "method", "lora", "wrapping", "base", "model", "peft", "configuration", "`", "get_peft_model", "`", ".", "bigscience/mt0-large", "model", ",", "'re", "training", "0.19", "%", "parameter", "!", "``", "`", "python", "transformer", "import", "automodelforseq2seqlm", "peft", "import", "get_peft_config", ",", "get_peft_model", ",", "loraconfig", ",", "tasktype", "model_name_or_path", "=", "``", "bigscience/mt0-large", "''", "tokenizer_name_or_path", "=", "``", "bigscience/mt0-large", "''", "peft_config", "=", "loraconfig", "(", "task_type=tasktype.seq_2_seq_lm", ",", "inference_mode=false", ",", "r=8", ",", "lora_alpha=32", ",", "lora_dropout=0.1", ")", "model", "=", "automodelforseq2seqlm.from_pretrained", "(", "model_name_or_path", ")", "model", "=", "get_peft_model", "(", "model", ",", "peft_config", ")", "model.print_trainable_parameters", "(", ")", "''", "trainable", "params", ":", "2359296", "||", "params", ":", "1231940608", "||", "trainable", "%", ":", "0.19151053100118282", "''", "``", "`", "load", "peft", "model", "inference", ":", "``", "`", "py", "peft", "import", "autopeftmodelforcausallm", "transformer", "import", "autotokenizer", "import", "torch", "model", "=", "autopeftmodelforcausallm.from_pretrained", "(", "``", "ybelkada/opt-350m-lora", "''", ")", ".to", "(", "``", "cuda", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "``", "facebook/opt-350m", "''", ")", "model.eval", "(", ")", "input", "=", "tokenizer", "(", "``", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "''", ",", "return_tensors=", "''", "pt", "''", ")", "output", "=", "model.generate", "(", "input_ids=inputs", "[", "``", "input_ids", "''", "]", ".to", "(", "``", "cuda", "''", ")", ",", "max_new_tokens=50", ")", "print", "(", "tokenizer.batch_decode", "(", "output", ",", "skip_special_tokens=true", ")", "[", "0", "]", ")", "''", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "center", "oven", ".", "large", "bowl", ",", "combine", "flour", ",", "baking", "powder", ",", "baking", "soda", ",", "salt", ",", "cinnamon", ".", "separate", "bowl", ",", "combine", "egg", "yolk", ",", "sugar", ",", "vanilla", ".", "''", "``", "`" ], [ "quickstart install peft pip : `` ` bash pip install peft `` ` prepare model training peft method lora wrapping base model peft configuration ` get_peft_model ` .", "bigscience/mt0-large model , 're training 0.19 % parameter !", "`` ` python transformer import automodelforseq2seqlm peft import get_peft_config , get_peft_model , loraconfig , tasktype model_name_or_path = `` bigscience/mt0-large '' tokenizer_name_or_path = `` bigscience/mt0-large '' peft_config = loraconfig ( task_type=tasktype.seq_2_seq_lm , inference_mode=false , r=8 , lora_alpha=32 , lora_dropout=0.1 ) model = automodelforseq2seqlm.from_pretrained ( model_name_or_path ) model = get_peft_model ( model , peft_config ) model.print_trainable_parameters ( ) '' trainable params : 2359296 || params : 1231940608 || trainable % : 0.19151053100118282 '' `` ` load peft model inference : `` ` py peft import autopeftmodelforcausallm transformer import autotokenizer import torch model = autopeftmodelforcausallm.from_pretrained ( `` ybelkada/opt-350m-lora '' ) .to ( `` cuda '' ) tokenizer = autotokenizer.from_pretrained ( `` facebook/opt-350m '' ) model.eval ( ) input = tokenizer ( `` preheat oven 350 degree place cookie dough '' , return_tensors= '' pt '' ) output = model.generate ( input_ids=inputs [ `` input_ids '' ] .to ( `` cuda '' ) , max_new_tokens=50 ) print ( tokenizer.batch_decode ( output , skip_special_tokens=true ) [ 0 ] ) '' preheat oven 350 degree place cookie dough center oven .", "large bowl , combine flour , baking powder , baking soda , salt , cinnamon .", "separate bowl , combine egg yolk , sugar , vanilla . ''", "`` `" ] ]
[ [ "quickstart", "install", "peft", "pip", ":", "``", "`", "bash", "pip", "install", "peft", "``", "`", "prepare", "model", "training", "peft", "method", "lora", "wrapping", "base", "model", "peft", "configuration", "`", "get_peft_model", "`", ".", "bigscience/mt0-large", "model", ",", "'re", "training", "0.19", "%", "parameter", "!", "``", "`", "python", "transformer", "import", "automodelforseq2seqlm", "peft", "import", "get_peft_config", ",", "get_peft_model", ",", "loraconfig", ",", "tasktype", "model_name_or_path", "=", "``", "bigscience/mt0-large", "''", "tokenizer_name_or_path", "=", "``", "bigscience/mt0-large", "''", "peft_config", "=", "loraconfig", "(", "task_type=tasktype.seq_2_seq_lm", ",", "inference_mode=false", ",", "r=8", ",", "lora_alpha=32", ",", "lora_dropout=0.1", ")", "model", "=", "automodelforseq2seqlm.from_pretrained", "(", "model_name_or_path", ")", "model", "=", "get_peft_model", "(", "model", ",", "peft_config", ")", "model.print_trainable_parameters", "(", ")", "''", "trainable", "params", ":", "2359296", "||", "params", ":", "1231940608", "||", "trainable", "%", ":", "0.19151053100118282", "''", "``", "`", "load", "peft", "model", "inference", ":", "``", "`", "py", "peft", "import", "autopeftmodelforcausallm", "transformer", "import", "autotokenizer", "import", "torch", "model", "=", "autopeftmodelforcausallm.from_pretrained", "(", "``", "ybelkada/opt-350m-lora", "''", ")", ".to", "(", "``", "cuda", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "``", "facebook/opt-350m", "''", ")", "model.eval", "(", ")", "input", "=", "tokenizer", "(", "``", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "''", ",", "return_tensors=", "''", "pt", "''", ")", "output", "=", "model.generate", "(", "input_ids=inputs", "[", "``", "input_ids", "''", "]", ".to", "(", "``", "cuda", "''", ")", ",", "max_new_tokens=50", ")", "print", "(", "tokenizer.batch_decode", "(", "output", ",", "skip_special_tokens=true", ")", "[", "0", "]", ")", "''", "preheat", "oven", "350", "degree", "place", "cookie", "dough", "center", "oven", ".", "large", "bowl", ",", "combine", "flour", ",", "baking", "powder", ",", "baking", "soda", ",", "salt", ",", "cinnamon", ".", "separate", "bowl", ",", "combine", "egg", "yolk", ",", "sugar", ",", "vanilla", ".", "''", "``", "`" ], [ "quickstart install peft pip : `` ` bash pip install peft `` ` prepare model training peft method lora wrapping base model peft configuration ` get_peft_model ` .", "bigscience/mt0-large model , 're training 0.19 % parameter !", "`` ` python transformer import automodelforseq2seqlm peft import get_peft_config , get_peft_model , loraconfig , tasktype model_name_or_path = `` bigscience/mt0-large '' tokenizer_name_or_path = `` bigscience/mt0-large '' peft_config = loraconfig ( task_type=tasktype.seq_2_seq_lm , inference_mode=false , r=8 , lora_alpha=32 , lora_dropout=0.1 ) model = automodelforseq2seqlm.from_pretrained ( model_name_or_path ) model = get_peft_model ( model , peft_config ) model.print_trainable_parameters ( ) '' trainable params : 2359296 || params : 1231940608 || trainable % : 0.19151053100118282 '' `` ` load peft model inference : `` ` py peft import autopeftmodelforcausallm transformer import autotokenizer import torch model = autopeftmodelforcausallm.from_pretrained ( `` ybelkada/opt-350m-lora '' ) .to ( `` cuda '' ) tokenizer = autotokenizer.from_pretrained ( `` facebook/opt-350m '' ) model.eval ( ) input = tokenizer ( `` preheat oven 350 degree place cookie dough '' , return_tensors= '' pt '' ) output = model.generate ( input_ids=inputs [ `` input_ids '' ] .to ( `` cuda '' ) , max_new_tokens=50 ) print ( tokenizer.batch_decode ( output , skip_special_tokens=true ) [ 0 ] ) '' preheat oven 350 degree place cookie dough center oven .", "large bowl , combine flour , baking powder , baking soda , salt , cinnamon .", "separate bowl , combine egg yolk , sugar , vanilla . ''", "`` `" ] ]
Quickstart Install PEFT from pip: ```bash pip install peft ``` Prepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters! ```python from transformers import AutoModelForSeq2SeqLM from peft import get_peft_config, get_peft_model, LoraConfig, TaskType model_name_or_path = "bigscience/mt0-large" tokenizer_name_or_path = "bigscience/mt0-large" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282" ``` To load a PEFT model for inference: ```py from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer import torch model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora").to("cuda") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model.eval() inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) "Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla." ```
https://github.com/huggingface/peft
0
[ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ]
https://raw.githubusercontent.com/huggingface/peft/main/README.md
[ [ "quantization", "quantization", "another", "method", "reducing", "memory", "requirement", "model", "representing", "data", "lower", "precision", ".", "combined", "peft", "method", "make", "even", "easier", "train", "load", "llm", "inference", ".", "*", "learn", "finetune", "[", "meta-llama/llama-2-7b-hf", "]", "(", "http", ":", "//huggingface.co/meta-llama/llama-2-7b-hf", ")", "qlora", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "16gb", "gpu", "[", "finetune", "llm", "consumer", "hardware", "using", "tool", "pytorch", "hugging", "face", "ecosystem", "]", "(", "http", ":", "//pytorch.org/blog/finetune-llms/", ")", "blog", "post", ".", "*", "learn", "finetune", "[", "openai/whisper-large-v2", "]", "(", "http", ":", "//huggingface.co/openai/whisper-large-v2", ")", "model", "multilingual", "automatic", "speech", "recognition", "lora", "8-bit", "quantization", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo", "?", "usp=sharing", ")", "(", "see", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs", "?", "usp=sharing", ")", "instead", "example", "streaming", "dataset", ")", "." ], [ "quantization quantization another method reducing memory requirement model representing data lower precision .", "combined peft method make even easier train load llm inference .", "* learn finetune [ meta-llama/llama-2-7b-hf ] ( http : //huggingface.co/meta-llama/llama-2-7b-hf ) qlora [ trl ] ( http : //huggingface.co/docs/trl/index ) library 16gb gpu [ finetune llm consumer hardware using tool pytorch hugging face ecosystem ] ( http : //pytorch.org/blog/finetune-llms/ ) blog post .", "* learn finetune [ openai/whisper-large-v2 ] ( http : //huggingface.co/openai/whisper-large-v2 ) model multilingual automatic speech recognition lora 8-bit quantization [ notebook ] ( http : //colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo ? usp=sharing ) ( see [ notebook ] ( http : //colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs ? usp=sharing ) instead example streaming dataset ) ." ] ]
[ [ "quantization", "quantization", "another", "method", "reducing", "memory", "requirement", "model", "representing", "data", "lower", "precision", ".", "combined", "peft", "method", "make", "even", "easier", "train", "load", "llm", "inference", ".", "*", "learn", "finetune", "[", "meta-llama/llama-2-7b-hf", "]", "(", "http", ":", "//huggingface.co/meta-llama/llama-2-7b-hf", ")", "qlora", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "16gb", "gpu", "[", "finetune", "llm", "consumer", "hardware", "using", "tool", "pytorch", "hugging", "face", "ecosystem", "]", "(", "http", ":", "//pytorch.org/blog/finetune-llms/", ")", "blog", "post", ".", "*", "learn", "finetune", "[", "openai/whisper-large-v2", "]", "(", "http", ":", "//huggingface.co/openai/whisper-large-v2", ")", "model", "multilingual", "automatic", "speech", "recognition", "lora", "8-bit", "quantization", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo", "?", "usp=sharing", ")", "(", "see", "[", "notebook", "]", "(", "http", ":", "//colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs", "?", "usp=sharing", ")", "instead", "example", "streaming", "dataset", ")", "." ], [ "quantization quantization another method reducing memory requirement model representing data lower precision .", "combined peft method make even easier train load llm inference .", "* learn finetune [ meta-llama/llama-2-7b-hf ] ( http : //huggingface.co/meta-llama/llama-2-7b-hf ) qlora [ trl ] ( http : //huggingface.co/docs/trl/index ) library 16gb gpu [ finetune llm consumer hardware using tool pytorch hugging face ecosystem ] ( http : //pytorch.org/blog/finetune-llms/ ) blog post .", "* learn finetune [ openai/whisper-large-v2 ] ( http : //huggingface.co/openai/whisper-large-v2 ) model multilingual automatic speech recognition lora 8-bit quantization [ notebook ] ( http : //colab.research.google.com/drive/1dokd_5oujfa0r5ik3sgywjljteo2qlxo ? usp=sharing ) ( see [ notebook ] ( http : //colab.research.google.com/drive/1vhf8yuefqha3y3cpthn6q9evcii9eyzs ? usp=sharing ) instead example streaming dataset ) ." ] ]
Quantization Quantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference. * Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post. * Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset).
https://github.com/huggingface/peft
-1
[ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ]
https://raw.githubusercontent.com/huggingface/peft/main/README.md
[ [ "accelerate", "[", "accelerate", "]", "(", "http", ":", "//huggingface.co/docs/accelerate/index", ")", "library", "distributed", "training", "inference", "various", "training", "setup", "hardware", "(", "gpus", ",", "tpus", ",", "apple", "silicon", ",", "etc", ".", ")", ".", "peft", "model", "work", "accelerate", "box", ",", "making", "really", "convenient", "train", "really", "large", "model", "use", "inference", "consumer", "hardware", "limited", "resource", "." ], [ "accelerate [ accelerate ] ( http : //huggingface.co/docs/accelerate/index ) library distributed training inference various training setup hardware ( gpus , tpus , apple silicon , etc . ) .", "peft model work accelerate box , making really convenient train really large model use inference consumer hardware limited resource ." ] ]
[ [ "accelerate", "[", "accelerate", "]", "(", "http", ":", "//huggingface.co/docs/accelerate/index", ")", "library", "distributed", "training", "inference", "various", "training", "setup", "hardware", "(", "gpus", ",", "tpus", ",", "apple", "silicon", ",", "etc", ".", ")", ".", "peft", "model", "work", "accelerate", "box", ",", "making", "really", "convenient", "train", "really", "large", "model", "use", "inference", "consumer", "hardware", "limited", "resource", "." ], [ "accelerate [ accelerate ] ( http : //huggingface.co/docs/accelerate/index ) library distributed training inference various training setup hardware ( gpus , tpus , apple silicon , etc . ) .", "peft model work accelerate box , making really convenient train really large model use inference consumer hardware limited resource ." ] ]
Accelerate [Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources.
https://github.com/huggingface/peft
-1
[ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ]
https://raw.githubusercontent.com/huggingface/peft/main/README.md
[ [ "trl", "peft", "also", "applied", "training", "llm", "rlhf", "component", "ranker", "policy", ".", "get", "started", "reading", ":", "*", "[", "fine-tune", "mistral-7b", "model", "direct", "preference", "optimization", "]", "(", "http", ":", "//towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "learn", "direct", "preference", "optimization", "(", "dpo", ")", "method", "apply", "llm", ".", "*", "[", "fine-tuning", "20b", "llm", "rlhf", "24gb", "consumer", "gpu", "]", "(", "http", ":", "//huggingface.co/blog/trl-peft", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", ",", "try", "[", "gpt2-sentiment_peft.ipynb", "]", "(", "http", ":", "//github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb", ")", "notebook", "optimize", "gpt2", "generate", "positive", "movie", "review", ".", "*", "[", "stackllama", ":", "hands-on", "guide", "train", "llama", "rlhf", "]", "(", "http", ":", "//huggingface.co/blog/stackllama", ")", "peft", ",", "try", "[", "stack_llama/scripts", "]", "(", "http", ":", "//github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts", ")", "supervised", "finetuning", ",", "reward", "modeling", ",", "rl", "finetuning", "." ], [ "trl peft also applied training llm rlhf component ranker policy .", "get started reading : * [ fine-tune mistral-7b model direct preference optimization ] ( http : //towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library learn direct preference optimization ( dpo ) method apply llm .", "* [ fine-tuning 20b llm rlhf 24gb consumer gpu ] ( http : //huggingface.co/blog/trl-peft ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library , try [ gpt2-sentiment_peft.ipynb ] ( http : //github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb ) notebook optimize gpt2 generate positive movie review .", "* [ stackllama : hands-on guide train llama rlhf ] ( http : //huggingface.co/blog/stackllama ) peft , try [ stack_llama/scripts ] ( http : //github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts ) supervised finetuning , reward modeling , rl finetuning ." ] ]
[ [ "trl", "peft", "also", "applied", "training", "llm", "rlhf", "component", "ranker", "policy", ".", "get", "started", "reading", ":", "*", "[", "fine-tune", "mistral-7b", "model", "direct", "preference", "optimization", "]", "(", "http", ":", "//towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", "learn", "direct", "preference", "optimization", "(", "dpo", ")", "method", "apply", "llm", ".", "*", "[", "fine-tuning", "20b", "llm", "rlhf", "24gb", "consumer", "gpu", "]", "(", "http", ":", "//huggingface.co/blog/trl-peft", ")", "peft", "[", "trl", "]", "(", "http", ":", "//huggingface.co/docs/trl/index", ")", "library", ",", "try", "[", "gpt2-sentiment_peft.ipynb", "]", "(", "http", ":", "//github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb", ")", "notebook", "optimize", "gpt2", "generate", "positive", "movie", "review", ".", "*", "[", "stackllama", ":", "hands-on", "guide", "train", "llama", "rlhf", "]", "(", "http", ":", "//huggingface.co/blog/stackllama", ")", "peft", ",", "try", "[", "stack_llama/scripts", "]", "(", "http", ":", "//github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts", ")", "supervised", "finetuning", ",", "reward", "modeling", ",", "rl", "finetuning", "." ], [ "trl peft also applied training llm rlhf component ranker policy .", "get started reading : * [ fine-tune mistral-7b model direct preference optimization ] ( http : //towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library learn direct preference optimization ( dpo ) method apply llm .", "* [ fine-tuning 20b llm rlhf 24gb consumer gpu ] ( http : //huggingface.co/blog/trl-peft ) peft [ trl ] ( http : //huggingface.co/docs/trl/index ) library , try [ gpt2-sentiment_peft.ipynb ] ( http : //github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb ) notebook optimize gpt2 generate positive movie review .", "* [ stackllama : hands-on guide train llama rlhf ] ( http : //huggingface.co/blog/stackllama ) peft , try [ stack_llama/scripts ] ( http : //github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts ) supervised finetuning , reward modeling , rl finetuning ." ] ]
TRL PEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading: * [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM. * [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews. * [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning.
https://github.com/huggingface/peft
-1
[ "adapter", "diffusion", "llm", "lora", "parameter-efficient-learning", "python", "pytorch", "transformers" ]
https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md
[ [ "llama", "board", ":", "one-stop", "web", "ui", "getting", "started", "llama", "factory", "preview", "llama", "board", "*", "*", "[", "🤗", "space", "]", "(", "http", ":", "//huggingface.co/spaces/hiyouga/llama-board", ")", "*", "*", "*", "*", "[", "modelscope", "]", "(", "http", ":", "//modelscope.cn/studios/hiyouga/llama-board", ")", "*", "*", ".", "launch", "llama", "board", "via", "`", "cuda_visible_devices=0", "python", "src/train_web.py", "`", ".", "(", "multiple", "gpus", "supported", "yet", "mode", ")", "example", "altering", "self-cognition", "instruction-tuned", "language", "model", "within", "10", "minute", "single", "gpu", ".", "http", ":", "//github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ], [ "llama board : one-stop web ui getting started llama factory preview llama board * * [ 🤗 space ] ( http : //huggingface.co/spaces/hiyouga/llama-board ) * * * * [ modelscope ] ( http : //modelscope.cn/studios/hiyouga/llama-board ) * * .", "launch llama board via ` cuda_visible_devices=0 python src/train_web.py ` .", "( multiple gpus supported yet mode ) example altering self-cognition instruction-tuned language model within 10 minute single gpu .", "http : //github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ] ]
[ [ "llama", "board", ":", "one-stop", "web", "ui", "getting", "started", "llama", "factory", "preview", "llama", "board", "*", "*", "[", "🤗", "space", "]", "(", "http", ":", "//huggingface.co/spaces/hiyouga/llama-board", ")", "*", "*", "*", "*", "[", "modelscope", "]", "(", "http", ":", "//modelscope.cn/studios/hiyouga/llama-board", ")", "*", "*", ".", "launch", "llama", "board", "via", "`", "cuda_visible_devices=0", "python", "src/train_web.py", "`", ".", "(", "multiple", "gpus", "supported", "yet", "mode", ")", "example", "altering", "self-cognition", "instruction-tuned", "language", "model", "within", "10", "minute", "single", "gpu", ".", "http", ":", "//github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ], [ "llama board : one-stop web ui getting started llama factory preview llama board * * [ 🤗 space ] ( http : //huggingface.co/spaces/hiyouga/llama-board ) * * * * [ modelscope ] ( http : //modelscope.cn/studios/hiyouga/llama-board ) * * .", "launch llama board via ` cuda_visible_devices=0 python src/train_web.py ` .", "( multiple gpus supported yet mode ) example altering self-cognition instruction-tuned language model within 10 minute single gpu .", "http : //github.com/hiyouga/llama-factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1" ] ]
LLaMA Board: A One-stop Web UI for Getting Started with LLaMA Factory Preview LLaMA Board at **[🤗 Spaces](https://huggingface.co/spaces/hiyouga/LLaMA-Board)** or **[ModelScope](https://modelscope.cn/studios/hiyouga/LLaMA-Board)**. Launch LLaMA Board via `CUDA_VISIBLE_DEVICES=0 python src/train_web.py`. (multiple GPUs are not supported yet in this mode) Here is an example of altering the self-cognition of an instruction-tuned language model within 10 minutes on a single GPU. https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1
https://github.com/hiyouga/LLaMA-Factory
-1
[ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ]
https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md
[ [ "table", "content", "-", "[", "benchmark", "]", "(", "#", "benchmark", ")", "-", "[", "changelog", "]", "(", "#", "changelog", ")", "-", "[", "supported", "model", "]", "(", "#", "supported-models", ")", "-", "[", "supported", "training", "approach", "]", "(", "#", "supported-training-approaches", ")", "-", "[", "provided", "datasets", "]", "(", "#", "provided-datasets", ")", "-", "[", "requirement", "]", "(", "#", "requirement", ")", "-", "[", "getting", "started", "]", "(", "#", "getting-started", ")", "-", "[", "project", "using", "llama", "factory", "]", "(", "#", "projects-using-llama-factory", ")", "-", "[", "license", "]", "(", "#", "license", ")", "-", "[", "citation", "]", "(", "#", "citation", ")", "-", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")" ], [ "table content - [ benchmark ] ( # benchmark ) - [ changelog ] ( # changelog ) - [ supported model ] ( # supported-models ) - [ supported training approach ] ( # supported-training-approaches ) - [ provided datasets ] ( # provided-datasets ) - [ requirement ] ( # requirement ) - [ getting started ] ( # getting-started ) - [ project using llama factory ] ( # projects-using-llama-factory ) - [ license ] ( # license ) - [ citation ] ( # citation ) - [ acknowledgement ] ( # acknowledgement )" ] ]
[ [ "table", "content", "-", "[", "benchmark", "]", "(", "#", "benchmark", ")", "-", "[", "changelog", "]", "(", "#", "changelog", ")", "-", "[", "supported", "model", "]", "(", "#", "supported-models", ")", "-", "[", "supported", "training", "approach", "]", "(", "#", "supported-training-approaches", ")", "-", "[", "provided", "datasets", "]", "(", "#", "provided-datasets", ")", "-", "[", "requirement", "]", "(", "#", "requirement", ")", "-", "[", "getting", "started", "]", "(", "#", "getting-started", ")", "-", "[", "project", "using", "llama", "factory", "]", "(", "#", "projects-using-llama-factory", ")", "-", "[", "license", "]", "(", "#", "license", ")", "-", "[", "citation", "]", "(", "#", "citation", ")", "-", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")" ], [ "table content - [ benchmark ] ( # benchmark ) - [ changelog ] ( # changelog ) - [ supported model ] ( # supported-models ) - [ supported training approach ] ( # supported-training-approaches ) - [ provided datasets ] ( # provided-datasets ) - [ requirement ] ( # requirement ) - [ getting started ] ( # getting-started ) - [ project using llama factory ] ( # projects-using-llama-factory ) - [ license ] ( # license ) - [ citation ] ( # citation ) - [ acknowledgement ] ( # acknowledgement )" ] ]
Table of Contents - [Benchmark](#benchmark) - [Changelog](#changelog) - [Supported Models](#supported-models) - [Supported Training Approaches](#supported-training-approaches) - [Provided Datasets](#provided-datasets) - [Requirement](#requirement) - [Getting Started](#getting-started) - [Projects using LLaMA Factory](#projects-using-llama-factory) - [License](#license) - [Citation](#citation) - [Acknowledgement](#acknowledgement)
https://github.com/hiyouga/LLaMA-Factory
-1
[ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ]
https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md
[ [ "provided", "datasets", "<", "detail", ">", "<", "summary", ">", "pre-training", "datasets", "<", "/summary", ">", "-", "[", "wiki", "demo", "(", "en", ")", "]", "(", "data/wiki_demo.txt", ")", "-", "[", "refinedweb", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiiuae/falcon-refinedweb", ")", "-", "[", "redpajama", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/togethercomputer/redpajama-data-v2", ")", "-", "[", "wikipedia", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/olm/olm-wikipedia-20221220", ")", "-", "[", "wikipedia", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered", ")", "-", "[", "pile", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/eleutherai/pile", ")", "-", "[", "skypile", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/skywork/skypile-150b", ")", "-", "[", "stack", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/the-stack", ")", "-", "[", "starcoder", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/starcoderdata", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "supervised", "fine-tuning", "datasets", "<", "/summary", ">", "-", "[", "stanford", "alpaca", "(", "en", ")", "]", "(", "http", ":", "//github.com/tatsu-lab/stanford_alpaca", ")", "-", "[", "stanford", "alpaca", "(", "zh", ")", "]", "(", "http", ":", "//github.com/ymcui/chinese-llama-alpaca", ")", "-", "[", "alpaca", "gpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "self", "cognition", "(", "zh", ")", "]", "(", "data/self_cognition.json", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "sharegpt", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection", ")", "-", "[", "guanaco", "dataset", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/josephuscheung/guanacodataset", ")", "-", "[", "belle", "2m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_2m_cn", ")", "-", "[", "belle", "1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_1m_cn", ")", "-", "[", "belle", "0.5m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_0.5m_cn", ")", "-", "[", "belle", "dialogue", "0.4m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/generated_chat_0.4m", ")", "-", "[", "belle", "school", "math", "0.25m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/school_math_0.25m", ")", "-", "[", "belle", "multiturn", "chat", "0.8m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/multiturn_chat_0.8m", ")", "-", "[", "ultrachat", "(", "en", ")", "]", "(", "http", ":", "//github.com/thunlp/ultrachat", ")", "-", "[", "lima", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/gair/lima", ")", "-", "[", "openplatypus", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/garage-baind/open-platypus", ")", "-", "[", "codealpaca", "20k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/sahil2801/codealpaca-20k", ")", "-", "[", "alpaca", "cot", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot", ")", "-", "[", "openorca", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/open-orca/openorca", ")", "-", "[", "mathinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiger-lab/mathinstruct", ")", "-", "[", "firefly", "1.1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/yeungnlp/firefly-train-1.1m", ")", "-", "[", "wiki", "qa", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wiki_qa", ")", "-", "[", "web", "qa", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/suolyer/webqa", ")", "-", "[", "webnovel", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/zxbsmk/webnovel_cn", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "deepctrl", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data", ")", "-", "[", "ad", "gen", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/hasturofficial/adgen", ")", "-", "[", "sharegpt", "hyperfiltered", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k", ")", "-", "[", "sharegpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/shibing624/sharegpt_gpt4", ")", "-", "[", "ultrachat", "200k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/huggingfaceh4/ultrachat_200k", ")", "-", "[", "agentinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/thudm/agentinstruct", ")", "-", "[", "lmsys", "chat", "1m", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/lmsys/lmsys-chat-1m", ")", "-", "[", "evol", "instruct", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k", ")", "-", "[", "glaive", "function", "calling", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/glaiveai/glaive-function-calling-v2", ")", "-", "[", "open", "assistant", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/oasst_de", ")", "-", "[", "dolly", "15k", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolly-15k_de", ")", "-", "[", "alpaca", "gpt4", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de", ")", "-", "[", "openschnabeltier", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/openschnabeltier_de", ")", "-", "[", "evol", "instruct", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/evol-instruct_de", ")", "-", "[", "dolphin", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolphin_de", ")", "-", "[", "booksum", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/booksum_de", ")", "-", "[", "airoboros", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de", ")", "-", "[", "ultrachat", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/ultra-chat_de", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "preference", "datasets", "<", "/summary", ">", "-", "[", "hh-rlhf", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/anthropic/hh-rlhf", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "gpt-4", "generated", "data", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "orca", "dpo", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de", ")", "<", "/details", ">", "please", "refer", "[", "data/readme.md", "]", "(", "data/readme.md", ")", "detail", ".", "datasets", "require", "confirmation", "using", ",", "recommend", "logging", "hugging", "face", "account", "using", "command", ".", "``", "`", "bash", "pip", "install", "--", "upgrade", "huggingface_hub", "huggingface-cli", "login", "``", "`" ], [ "provided datasets < detail > < summary > pre-training datasets < /summary > - [ wiki demo ( en ) ] ( data/wiki_demo.txt ) - [ refinedweb ( en ) ] ( http : //huggingface.co/datasets/tiiuae/falcon-refinedweb ) - [ redpajama v2 ( en ) ] ( http : //huggingface.co/datasets/togethercomputer/redpajama-data-v2 ) - [ wikipedia ( en ) ] ( http : //huggingface.co/datasets/olm/olm-wikipedia-20221220 ) - [ wikipedia ( zh ) ] ( http : //huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered ) - [ pile ( en ) ] ( http : //huggingface.co/datasets/eleutherai/pile ) - [ skypile ( zh ) ] ( http : //huggingface.co/datasets/skywork/skypile-150b ) - [ stack ( en ) ] ( http : //huggingface.co/datasets/bigcode/the-stack ) - [ starcoder ( en ) ] ( http : //huggingface.co/datasets/bigcode/starcoderdata ) < /details > < detail > < summary > supervised fine-tuning datasets < /summary > - [ stanford alpaca ( en ) ] ( http : //github.com/tatsu-lab/stanford_alpaca ) - [ stanford alpaca ( zh ) ] ( http : //github.com/ymcui/chinese-llama-alpaca ) - [ alpaca gpt4 ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ self cognition ( zh ) ] ( data/self_cognition.json ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ sharegpt ( zh ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection ) - [ guanaco dataset ( multilingual ) ] ( http : //huggingface.co/datasets/josephuscheung/guanacodataset ) - [ belle 2m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_2m_cn ) - [ belle 1m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_1m_cn ) - [ belle 0.5m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_0.5m_cn ) - [ belle dialogue 0.4m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/generated_chat_0.4m ) - [ belle school math 0.25m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/school_math_0.25m ) - [ belle multiturn chat 0.8m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/multiturn_chat_0.8m ) - [ ultrachat ( en ) ] ( http : //github.com/thunlp/ultrachat ) - [ lima ( en ) ] ( http : //huggingface.co/datasets/gair/lima ) - [ openplatypus ( en ) ] ( http : //huggingface.co/datasets/garage-baind/open-platypus ) - [ codealpaca 20k ( en ) ] ( http : //huggingface.co/datasets/sahil2801/codealpaca-20k ) - [ alpaca cot ( multilingual ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot ) - [ openorca ( en ) ] ( http : //huggingface.co/datasets/open-orca/openorca ) - [ mathinstruct ( en ) ] ( http : //huggingface.co/datasets/tiger-lab/mathinstruct ) - [ firefly 1.1m ( zh ) ] ( http : //huggingface.co/datasets/yeungnlp/firefly-train-1.1m ) - [ wiki qa ( en ) ] ( http : //huggingface.co/datasets/wiki_qa ) - [ web qa ( zh ) ] ( http : //huggingface.co/datasets/suolyer/webqa ) - [ webnovel ( zh ) ] ( http : //huggingface.co/datasets/zxbsmk/webnovel_cn ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ deepctrl ( en & zh ) ] ( http : //www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data ) - [ ad gen ( zh ) ] ( http : //huggingface.co/datasets/hasturofficial/adgen ) - [ sharegpt hyperfiltered ( en ) ] ( http : //huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k ) - [ sharegpt4 ( en & zh ) ] ( http : //huggingface.co/datasets/shibing624/sharegpt_gpt4 ) - [ ultrachat 200k ( en ) ] ( http : //huggingface.co/datasets/huggingfaceh4/ultrachat_200k ) - [ agentinstruct ( en ) ] ( http : //huggingface.co/datasets/thudm/agentinstruct ) - [ lmsys chat 1m ( en ) ] ( http : //huggingface.co/datasets/lmsys/lmsys-chat-1m ) - [ evol instruct v2 ( en ) ] ( http : //huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k ) - [ glaive function calling v2 ( en ) ] ( http : //huggingface.co/datasets/glaiveai/glaive-function-calling-v2 ) - [ open assistant ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/oasst_de ) - [ dolly 15k ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolly-15k_de ) - [ alpaca gpt4 ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de ) - [ openschnabeltier ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/openschnabeltier_de ) - [ evol instruct ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/evol-instruct_de ) - [ dolphin ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolphin_de ) - [ booksum ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/booksum_de ) - [ airoboros ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de ) - [ ultrachat ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/ultra-chat_de ) < /details > < detail > < summary > preference datasets < /summary > - [ hh-rlhf ( en ) ] ( http : //huggingface.co/datasets/anthropic/hh-rlhf ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ gpt-4 generated data ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ orca dpo ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de ) < /details > please refer [ data/readme.md ] ( data/readme.md ) detail .", "datasets require confirmation using , recommend logging hugging face account using command .", "`` ` bash pip install -- upgrade huggingface_hub huggingface-cli login `` `" ] ]
[ [ "provided", "datasets", "<", "detail", ">", "<", "summary", ">", "pre-training", "datasets", "<", "/summary", ">", "-", "[", "wiki", "demo", "(", "en", ")", "]", "(", "data/wiki_demo.txt", ")", "-", "[", "refinedweb", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiiuae/falcon-refinedweb", ")", "-", "[", "redpajama", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/togethercomputer/redpajama-data-v2", ")", "-", "[", "wikipedia", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/olm/olm-wikipedia-20221220", ")", "-", "[", "wikipedia", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered", ")", "-", "[", "pile", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/eleutherai/pile", ")", "-", "[", "skypile", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/skywork/skypile-150b", ")", "-", "[", "stack", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/the-stack", ")", "-", "[", "starcoder", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bigcode/starcoderdata", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "supervised", "fine-tuning", "datasets", "<", "/summary", ">", "-", "[", "stanford", "alpaca", "(", "en", ")", "]", "(", "http", ":", "//github.com/tatsu-lab/stanford_alpaca", ")", "-", "[", "stanford", "alpaca", "(", "zh", ")", "]", "(", "http", ":", "//github.com/ymcui/chinese-llama-alpaca", ")", "-", "[", "alpaca", "gpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "self", "cognition", "(", "zh", ")", "]", "(", "data/self_cognition.json", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "sharegpt", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection", ")", "-", "[", "guanaco", "dataset", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/josephuscheung/guanacodataset", ")", "-", "[", "belle", "2m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_2m_cn", ")", "-", "[", "belle", "1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_1m_cn", ")", "-", "[", "belle", "0.5m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/train_0.5m_cn", ")", "-", "[", "belle", "dialogue", "0.4m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/generated_chat_0.4m", ")", "-", "[", "belle", "school", "math", "0.25m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/school_math_0.25m", ")", "-", "[", "belle", "multiturn", "chat", "0.8m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/bellegroup/multiturn_chat_0.8m", ")", "-", "[", "ultrachat", "(", "en", ")", "]", "(", "http", ":", "//github.com/thunlp/ultrachat", ")", "-", "[", "lima", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/gair/lima", ")", "-", "[", "openplatypus", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/garage-baind/open-platypus", ")", "-", "[", "codealpaca", "20k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/sahil2801/codealpaca-20k", ")", "-", "[", "alpaca", "cot", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/qingyisi/alpaca-cot", ")", "-", "[", "openorca", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/open-orca/openorca", ")", "-", "[", "mathinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/tiger-lab/mathinstruct", ")", "-", "[", "firefly", "1.1m", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/yeungnlp/firefly-train-1.1m", ")", "-", "[", "wiki", "qa", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wiki_qa", ")", "-", "[", "web", "qa", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/suolyer/webqa", ")", "-", "[", "webnovel", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/zxbsmk/webnovel_cn", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "deepctrl", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data", ")", "-", "[", "ad", "gen", "(", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/hasturofficial/adgen", ")", "-", "[", "sharegpt", "hyperfiltered", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k", ")", "-", "[", "sharegpt4", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//huggingface.co/datasets/shibing624/sharegpt_gpt4", ")", "-", "[", "ultrachat", "200k", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/huggingfaceh4/ultrachat_200k", ")", "-", "[", "agentinstruct", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/thudm/agentinstruct", ")", "-", "[", "lmsys", "chat", "1m", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/lmsys/lmsys-chat-1m", ")", "-", "[", "evol", "instruct", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k", ")", "-", "[", "glaive", "function", "calling", "v2", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/glaiveai/glaive-function-calling-v2", ")", "-", "[", "open", "assistant", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/oasst_de", ")", "-", "[", "dolly", "15k", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolly-15k_de", ")", "-", "[", "alpaca", "gpt4", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de", ")", "-", "[", "openschnabeltier", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/openschnabeltier_de", ")", "-", "[", "evol", "instruct", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/evol-instruct_de", ")", "-", "[", "dolphin", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/dolphin_de", ")", "-", "[", "booksum", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/booksum_de", ")", "-", "[", "airoboros", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de", ")", "-", "[", "ultrachat", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/ultra-chat_de", ")", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "preference", "datasets", "<", "/summary", ">", "-", "[", "hh-rlhf", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/anthropic/hh-rlhf", ")", "-", "[", "open", "assistant", "(", "multilingual", ")", "]", "(", "http", ":", "//huggingface.co/datasets/openassistant/oasst1", ")", "-", "[", "gpt-4", "generated", "data", "(", "en", "&", "zh", ")", "]", "(", "http", ":", "//github.com/instruction-tuning-with-gpt-4/gpt-4-llm", ")", "-", "[", "nectar", "(", "en", ")", "]", "(", "http", ":", "//huggingface.co/datasets/berkeley-nest/nectar", ")", "-", "[", "orca", "dpo", "(", "de", ")", "]", "(", "http", ":", "//huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de", ")", "<", "/details", ">", "please", "refer", "[", "data/readme.md", "]", "(", "data/readme.md", ")", "detail", ".", "datasets", "require", "confirmation", "using", ",", "recommend", "logging", "hugging", "face", "account", "using", "command", ".", "``", "`", "bash", "pip", "install", "--", "upgrade", "huggingface_hub", "huggingface-cli", "login", "``", "`" ], [ "provided datasets < detail > < summary > pre-training datasets < /summary > - [ wiki demo ( en ) ] ( data/wiki_demo.txt ) - [ refinedweb ( en ) ] ( http : //huggingface.co/datasets/tiiuae/falcon-refinedweb ) - [ redpajama v2 ( en ) ] ( http : //huggingface.co/datasets/togethercomputer/redpajama-data-v2 ) - [ wikipedia ( en ) ] ( http : //huggingface.co/datasets/olm/olm-wikipedia-20221220 ) - [ wikipedia ( zh ) ] ( http : //huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered ) - [ pile ( en ) ] ( http : //huggingface.co/datasets/eleutherai/pile ) - [ skypile ( zh ) ] ( http : //huggingface.co/datasets/skywork/skypile-150b ) - [ stack ( en ) ] ( http : //huggingface.co/datasets/bigcode/the-stack ) - [ starcoder ( en ) ] ( http : //huggingface.co/datasets/bigcode/starcoderdata ) < /details > < detail > < summary > supervised fine-tuning datasets < /summary > - [ stanford alpaca ( en ) ] ( http : //github.com/tatsu-lab/stanford_alpaca ) - [ stanford alpaca ( zh ) ] ( http : //github.com/ymcui/chinese-llama-alpaca ) - [ alpaca gpt4 ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ self cognition ( zh ) ] ( data/self_cognition.json ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ sharegpt ( zh ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot/tree/main/chinese-instruction-collection ) - [ guanaco dataset ( multilingual ) ] ( http : //huggingface.co/datasets/josephuscheung/guanacodataset ) - [ belle 2m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_2m_cn ) - [ belle 1m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_1m_cn ) - [ belle 0.5m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/train_0.5m_cn ) - [ belle dialogue 0.4m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/generated_chat_0.4m ) - [ belle school math 0.25m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/school_math_0.25m ) - [ belle multiturn chat 0.8m ( zh ) ] ( http : //huggingface.co/datasets/bellegroup/multiturn_chat_0.8m ) - [ ultrachat ( en ) ] ( http : //github.com/thunlp/ultrachat ) - [ lima ( en ) ] ( http : //huggingface.co/datasets/gair/lima ) - [ openplatypus ( en ) ] ( http : //huggingface.co/datasets/garage-baind/open-platypus ) - [ codealpaca 20k ( en ) ] ( http : //huggingface.co/datasets/sahil2801/codealpaca-20k ) - [ alpaca cot ( multilingual ) ] ( http : //huggingface.co/datasets/qingyisi/alpaca-cot ) - [ openorca ( en ) ] ( http : //huggingface.co/datasets/open-orca/openorca ) - [ mathinstruct ( en ) ] ( http : //huggingface.co/datasets/tiger-lab/mathinstruct ) - [ firefly 1.1m ( zh ) ] ( http : //huggingface.co/datasets/yeungnlp/firefly-train-1.1m ) - [ wiki qa ( en ) ] ( http : //huggingface.co/datasets/wiki_qa ) - [ web qa ( zh ) ] ( http : //huggingface.co/datasets/suolyer/webqa ) - [ webnovel ( zh ) ] ( http : //huggingface.co/datasets/zxbsmk/webnovel_cn ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ deepctrl ( en & zh ) ] ( http : //www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data ) - [ ad gen ( zh ) ] ( http : //huggingface.co/datasets/hasturofficial/adgen ) - [ sharegpt hyperfiltered ( en ) ] ( http : //huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k ) - [ sharegpt4 ( en & zh ) ] ( http : //huggingface.co/datasets/shibing624/sharegpt_gpt4 ) - [ ultrachat 200k ( en ) ] ( http : //huggingface.co/datasets/huggingfaceh4/ultrachat_200k ) - [ agentinstruct ( en ) ] ( http : //huggingface.co/datasets/thudm/agentinstruct ) - [ lmsys chat 1m ( en ) ] ( http : //huggingface.co/datasets/lmsys/lmsys-chat-1m ) - [ evol instruct v2 ( en ) ] ( http : //huggingface.co/datasets/wizardlm/wizardlm_evol_instruct_v2_196k ) - [ glaive function calling v2 ( en ) ] ( http : //huggingface.co/datasets/glaiveai/glaive-function-calling-v2 ) - [ open assistant ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/oasst_de ) - [ dolly 15k ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolly-15k_de ) - [ alpaca gpt4 ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de ) - [ openschnabeltier ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/openschnabeltier_de ) - [ evol instruct ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/evol-instruct_de ) - [ dolphin ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/dolphin_de ) - [ booksum ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/booksum_de ) - [ airoboros ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de ) - [ ultrachat ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/ultra-chat_de ) < /details > < detail > < summary > preference datasets < /summary > - [ hh-rlhf ( en ) ] ( http : //huggingface.co/datasets/anthropic/hh-rlhf ) - [ open assistant ( multilingual ) ] ( http : //huggingface.co/datasets/openassistant/oasst1 ) - [ gpt-4 generated data ( en & zh ) ] ( http : //github.com/instruction-tuning-with-gpt-4/gpt-4-llm ) - [ nectar ( en ) ] ( http : //huggingface.co/datasets/berkeley-nest/nectar ) - [ orca dpo ( de ) ] ( http : //huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de ) < /details > please refer [ data/readme.md ] ( data/readme.md ) detail .", "datasets require confirmation using , recommend logging hugging face account using command .", "`` ` bash pip install -- upgrade huggingface_hub huggingface-cli login `` `" ] ]
Provided Datasets <details><summary>Pre-training datasets</summary> - [Wiki Demo (en)](data/wiki_demo.txt) - [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) - [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2) - [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220) - [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered) - [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile) - [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B) - [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack) - [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata) </details> <details><summary>Supervised fine-tuning datasets</summary> - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca) - [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca) - [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) - [Self Cognition (zh)](data/self_cognition.json) - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) - [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection) - [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) - [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN) - [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN) - [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) - [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) - [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) - [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) - [UltraChat (en)](https://github.com/thunlp/UltraChat) - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima) - [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) - [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) - [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca) - [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) - [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) - [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa) - [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa) - [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn) - [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar) - [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data) - [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen) - [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k) - [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4) - [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) - [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct) - [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) - [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k) - [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2) - [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de) - [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de) - [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de) - [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de) - [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de) - [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de) - [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de) - [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de) - [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de) </details> <details><summary>Preference datasets</summary> - [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf) - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) - [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar) - [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de) </details> Please refer to [data/README.md](data/README.md) for details. Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands. ```bash pip install --upgrade huggingface_hub huggingface-cli login ```
https://github.com/hiyouga/LLaMA-Factory
0
[ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ]
https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md
[ [ "getting", "started" ], [ "getting started" ] ]
[ [ "getting", "started" ], [ "getting started" ] ]
Getting Started
https://github.com/hiyouga/LLaMA-Factory
-1
[ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ]
https://raw.githubusercontent.com/hiyouga/LLaMA-Factory/main/README.md
[ [ "dependence", "installation", "(", "optional", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/hiyouga/llama-factory.git", "conda", "create", "-n", "llama_factory", "python=3.10", "conda", "activate", "llama_factory", "cd", "llama-factory", "pip", "install", "-r", "requirements.txt", "``", "`", "want", "enable", "quantized", "lora", "(", "qlora", ")", "window", "platform", ",", "required", "install", "pre-built", "version", "`", "bitsandbytes", "`", "library", ",", "support", "cuda", "11.1", "12.1", ".", "``", "`", "bash", "pip", "install", "http", ":", "//github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl", "``", "`" ], [ "dependence installation ( optional ) `` ` bash git clone http : //github.com/hiyouga/llama-factory.git conda create -n llama_factory python=3.10 conda activate llama_factory cd llama-factory pip install -r requirements.txt `` ` want enable quantized lora ( qlora ) window platform , required install pre-built version ` bitsandbytes ` library , support cuda 11.1 12.1 .", "`` ` bash pip install http : //github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl `` `" ] ]
[ [ "dependence", "installation", "(", "optional", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/hiyouga/llama-factory.git", "conda", "create", "-n", "llama_factory", "python=3.10", "conda", "activate", "llama_factory", "cd", "llama-factory", "pip", "install", "-r", "requirements.txt", "``", "`", "want", "enable", "quantized", "lora", "(", "qlora", ")", "window", "platform", ",", "required", "install", "pre-built", "version", "`", "bitsandbytes", "`", "library", ",", "support", "cuda", "11.1", "12.1", ".", "``", "`", "bash", "pip", "install", "http", ":", "//github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl", "``", "`" ], [ "dependence installation ( optional ) `` ` bash git clone http : //github.com/hiyouga/llama-factory.git conda create -n llama_factory python=3.10 conda activate llama_factory cd llama-factory pip install -r requirements.txt `` ` want enable quantized lora ( qlora ) window platform , required install pre-built version ` bitsandbytes ` library , support cuda 11.1 12.1 .", "`` ` bash pip install http : //github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl `` `" ] ]
Dependence Installation (optional) ```bash git clone https://github.com/hiyouga/LLaMA-Factory.git conda create -n llama_factory python=3.10 conda activate llama_factory cd LLaMA-Factory pip install -r requirements.txt ``` If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you will be required to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.1. ```bash pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl ```
https://github.com/hiyouga/LLaMA-Factory
0
[ "agent", "baichuan", "chatglm", "fine-tuning", "generative-ai", "gpt", "instruction-tuning", "language-model", "large-language-models", "llama", "llm", "lora", "mistral", "mixture-of-experts", "peft", "qlora", "quantization", "qwen", "rlhf", "transformers" ]
https://raw.githubusercontent.com/PaddlePaddle/PaddleNLP/main/README.md
[ [ "pip安装", "``", "`", "shell", "pip", "install", "--", "upgrade", "paddlenlp", "``", "`", "或者可通过以下命令安装最新", "develop", "分支代码:", "``", "`", "shell", "pip", "install", "--", "pre", "--", "upgrade", "paddlenlp", "-f", "http", ":", "//www.paddlepaddle.org.cn/whl/paddlenlp.html", "``", "`", "更多关于paddlepaddle和paddlenlp安装的详细教程请查看", "[", "installation", "]", "(", "./docs/get_started/installation.rst", ")", "。" ], [ "pip安装 `` ` shell pip install -- upgrade paddlenlp `` ` 或者可通过以下命令安装最新 develop 分支代码: `` ` shell pip install -- pre -- upgrade paddlenlp -f http : //www.paddlepaddle.org.cn/whl/paddlenlp.html `` ` 更多关于paddlepaddle和paddlenlp安装的详细教程请查看 [ installation ] ( ./docs/get_started/installation.rst ) 。" ] ]
[ [ "pip安装", "``", "`", "shell", "pip", "install", "--", "upgrade", "paddlenlp", "``", "`", "或者可通过以下命令安装最新", "develop", "分支代码:", "``", "`", "shell", "pip", "install", "--", "pre", "--", "upgrade", "paddlenlp", "-f", "http", ":", "//www.paddlepaddle.org.cn/whl/paddlenlp.html", "``", "`", "更多关于paddlepaddle和paddlenlp安装的详细教程请查看", "[", "installation", "]", "(", "./docs/get_started/installation.rst", ")", "。" ], [ "pip安装 `` ` shell pip install -- upgrade paddlenlp `` ` 或者可通过以下命令安装最新 develop 分支代码: `` ` shell pip install -- pre -- upgrade paddlenlp -f http : //www.paddlepaddle.org.cn/whl/paddlenlp.html `` ` 更多关于paddlepaddle和paddlenlp安装的详细教程请查看 [ installation ] ( ./docs/get_started/installation.rst ) 。" ] ]
pip安装 ```shell pip install --upgrade paddlenlp ``` 或者可通过以下命令安装最新 develop 分支代码: ```shell pip install --pre --upgrade paddlenlp -f https://www.paddlepaddle.org.cn/whl/paddlenlp.html ``` 更多关于PaddlePaddle和PaddleNLP安装的详细教程请查看[Installation](./docs/get_started/installation.rst)。
https://github.com/PaddlePaddle/PaddleNLP
0
[ "bert", "compression", "distributed-training", "document-intelligence", "embedding", "ernie", "information-extraction", "llama", "llm", "neural-search", "nlp", "paddlenlp", "pretrained-models", "question-answering", "search-engine", "semantic-analysis", "sentiment-analysis", "transformers", "uie" ]
https://raw.githubusercontent.com/eosphoros-ai/DB-GPT/main/README.md
[ [ "content", "-", "[", "introduction", "]", "(", "#", "introduction", ")", "-", "[", "install", "]", "(", "#", "install", ")", "-", "[", "feature", "]", "(", "#", "feature", ")", "-", "[", "contribution", "]", "(", "#", "contribution", ")", "-", "[", "contact", "]", "(", "#", "contact-information", ")" ], [ "content - [ introduction ] ( # introduction ) - [ install ] ( # install ) - [ feature ] ( # feature ) - [ contribution ] ( # contribution ) - [ contact ] ( # contact-information )" ] ]
[ [ "content", "-", "[", "introduction", "]", "(", "#", "introduction", ")", "-", "[", "install", "]", "(", "#", "install", ")", "-", "[", "feature", "]", "(", "#", "feature", ")", "-", "[", "contribution", "]", "(", "#", "contribution", ")", "-", "[", "contact", "]", "(", "#", "contact-information", ")" ], [ "content - [ introduction ] ( # introduction ) - [ install ] ( # install ) - [ feature ] ( # feature ) - [ contribution ] ( # contribution ) - [ contact ] ( # contact-information )" ] ]
Contents - [Introduction](#introduction) - [Install](#install) - [Features](#features) - [Contribution](#contribution) - [Contact](#contact-information)
https://github.com/eosphoros-ai/DB-GPT
-1
[ "agents", "bgi", "database", "gpt", "gpt-4", "langchain", "llm", "private", "rag", "security", "vicuna" ]
https://raw.githubusercontent.com/eosphoros-ai/DB-GPT/main/README.md
[ [ "install", "!", "[", "docker", "]", "(", "http", ":", "//img.shields.io/badge/docker-", "%", "230db7ed.svg", "?", "style=for-the-badge", "&", "logo=docker", "&", "logocolor=white", ")", "!", "[", "linux", "]", "(", "http", ":", "//img.shields.io/badge/linux-fcc624", "?", "style=for-the-badge", "&", "logo=linux", "&", "logocolor=black", ")", "!", "[", "macos", "]", "(", "http", ":", "//img.shields.io/badge/mac", "%", "20os-000000", "?", "style=for-the-badge", "&", "logo=macos", "&", "logocolor=f0f0f0", ")", "!", "[", "window", "]", "(", "http", ":", "//img.shields.io/badge/windows-0078d6", "?", "style=for-the-badge", "&", "logo=windows", "&", "logocolor=white", ")", "[", "*", "*", "usage", "tutorial", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/overview", ")", "-", "[", "*", "*", "install", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/installation", ")", "-", "[", "*", "*", "quickstart", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/quickstart", ")", "-", "[", "*", "*", "application", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual", ")", "-", "[", "*", "*", "debugging", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging", ")" ], [ "install !", "[ docker ] ( http : //img.shields.io/badge/docker- % 230db7ed.svg ? style=for-the-badge & logo=docker & logocolor=white ) !", "[ linux ] ( http : //img.shields.io/badge/linux-fcc624 ? style=for-the-badge & logo=linux & logocolor=black ) !", "[ macos ] ( http : //img.shields.io/badge/mac % 20os-000000 ? style=for-the-badge & logo=macos & logocolor=f0f0f0 ) !", "[ window ] ( http : //img.shields.io/badge/windows-0078d6 ? style=for-the-badge & logo=windows & logocolor=white ) [ * * usage tutorial * * ] ( http : //docs.dbgpt.site/docs/overview ) - [ * * install * * ] ( http : //docs.dbgpt.site/docs/installation ) - [ * * quickstart * * ] ( http : //docs.dbgpt.site/docs/quickstart ) - [ * * application * * ] ( http : //docs.dbgpt.site/docs/operation_manual ) - [ * * debugging * * ] ( http : //docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging )" ] ]
[ [ "install", "!", "[", "docker", "]", "(", "http", ":", "//img.shields.io/badge/docker-", "%", "230db7ed.svg", "?", "style=for-the-badge", "&", "logo=docker", "&", "logocolor=white", ")", "!", "[", "linux", "]", "(", "http", ":", "//img.shields.io/badge/linux-fcc624", "?", "style=for-the-badge", "&", "logo=linux", "&", "logocolor=black", ")", "!", "[", "macos", "]", "(", "http", ":", "//img.shields.io/badge/mac", "%", "20os-000000", "?", "style=for-the-badge", "&", "logo=macos", "&", "logocolor=f0f0f0", ")", "!", "[", "window", "]", "(", "http", ":", "//img.shields.io/badge/windows-0078d6", "?", "style=for-the-badge", "&", "logo=windows", "&", "logocolor=white", ")", "[", "*", "*", "usage", "tutorial", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/overview", ")", "-", "[", "*", "*", "install", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/installation", ")", "-", "[", "*", "*", "quickstart", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/quickstart", ")", "-", "[", "*", "*", "application", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual", ")", "-", "[", "*", "*", "debugging", "*", "*", "]", "(", "http", ":", "//docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging", ")" ], [ "install !", "[ docker ] ( http : //img.shields.io/badge/docker- % 230db7ed.svg ? style=for-the-badge & logo=docker & logocolor=white ) !", "[ linux ] ( http : //img.shields.io/badge/linux-fcc624 ? style=for-the-badge & logo=linux & logocolor=black ) !", "[ macos ] ( http : //img.shields.io/badge/mac % 20os-000000 ? style=for-the-badge & logo=macos & logocolor=f0f0f0 ) !", "[ window ] ( http : //img.shields.io/badge/windows-0078d6 ? style=for-the-badge & logo=windows & logocolor=white ) [ * * usage tutorial * * ] ( http : //docs.dbgpt.site/docs/overview ) - [ * * install * * ] ( http : //docs.dbgpt.site/docs/installation ) - [ * * quickstart * * ] ( http : //docs.dbgpt.site/docs/quickstart ) - [ * * application * * ] ( http : //docs.dbgpt.site/docs/operation_manual ) - [ * * debugging * * ] ( http : //docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging )" ] ]
Install ![Docker](https://img.shields.io/badge/docker-%230db7ed.svg?style=for-the-badge&logo=docker&logoColor=white) ![Linux](https://img.shields.io/badge/Linux-FCC624?style=for-the-badge&logo=linux&logoColor=black) ![macOS](https://img.shields.io/badge/mac%20os-000000?style=for-the-badge&logo=macos&logoColor=F0F0F0) ![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=windows&logoColor=white) [**Usage Tutorial**](http://docs.dbgpt.site/docs/overview) - [**Install**](http://docs.dbgpt.site/docs/installation) - [**Quickstart**](http://docs.dbgpt.site/docs/quickstart) - [**Application**](http://docs.dbgpt.site/docs/operation_manual) - [**Debugging**](http://docs.dbgpt.site/docs/operation_manual/advanced_tutorial/debugging)
https://github.com/eosphoros-ai/DB-GPT
-1
[ "agents", "bgi", "database", "gpt", "gpt-4", "langchain", "llm", "private", "rag", "security", "vicuna" ]
https://raw.githubusercontent.com/eosphoros-ai/DB-GPT/main/README.md
[ [ "contribution", "-", "please", "run", "`", "black", ".", "`", "submitting", "code", ".", "-", "check", "detailed", "guideline", "new", "contribution", ",", "please", "refer", "[", "contribute", "]", "(", "http", ":", "//github.com/eosphoros-ai/db-gpt/blob/main/contributing.md", ")" ], [ "contribution - please run ` black . ` submitting code .", "- check detailed guideline new contribution , please refer [ contribute ] ( http : //github.com/eosphoros-ai/db-gpt/blob/main/contributing.md )" ] ]
[ [ "contribution", "-", "please", "run", "`", "black", ".", "`", "submitting", "code", ".", "-", "check", "detailed", "guideline", "new", "contribution", ",", "please", "refer", "[", "contribute", "]", "(", "http", ":", "//github.com/eosphoros-ai/db-gpt/blob/main/contributing.md", ")" ], [ "contribution - please run ` black . ` submitting code .", "- check detailed guideline new contribution , please refer [ contribute ] ( http : //github.com/eosphoros-ai/db-gpt/blob/main/contributing.md )" ] ]
Contribution - Please run `black .` before submitting the code. - To check detailed guidelines for new contributions, please refer [how to contribute](https://github.com/eosphoros-ai/DB-GPT/blob/main/CONTRIBUTING.md)
https://github.com/eosphoros-ai/DB-GPT
-1
[ "agents", "bgi", "database", "gpt", "gpt-4", "langchain", "llm", "private", "rag", "security", "vicuna" ]
https://raw.githubusercontent.com/gventuri/pandas-ai/main/README.md
[ [ "🔧", "quick", "install", "``", "`", "bash", "pip", "install", "pandasai", "``", "`" ], [ "🔧 quick install `` ` bash pip install pandasai `` `" ] ]
[ [ "🔧", "quick", "install", "``", "`", "bash", "pip", "install", "pandasai", "``", "`" ], [ "🔧 quick install `` ` bash pip install pandasai `` `" ] ]
🔧 Quick install ```bash pip install pandasai ```
https://github.com/gventuri/pandas-ai
0
[ "ai", "csv", "data", "data-analysis", "data-science", "gpt-3", "gpt-4", "llm", "pandas", "sql" ]
https://raw.githubusercontent.com/gventuri/pandas-ai/main/README.md
[ [ "🤝", "contributing", "contribution", "welcome", "!", "please", "check", "todos", ",", "feel", "free", "open", "pull", "request", ".", "information", ",", "please", "see", "[", "contributing", "guideline", "]", "(", "contributing.md", ")", ".", "installing", "virtual", "environment", ",", "please", "remember", "install", "`", "pre-commit", "`", "compliant", "standard", ":", "``", "`", "bash", "pre-commit", "install", "``", "`" ], [ "🤝 contributing contribution welcome !", "please check todos , feel free open pull request .", "information , please see [ contributing guideline ] ( contributing.md ) .", "installing virtual environment , please remember install ` pre-commit ` compliant standard : `` ` bash pre-commit install `` `" ] ]
[ [ "🤝", "contributing", "contribution", "welcome", "!", "please", "check", "todos", ",", "feel", "free", "open", "pull", "request", ".", "information", ",", "please", "see", "[", "contributing", "guideline", "]", "(", "contributing.md", ")", ".", "installing", "virtual", "environment", ",", "please", "remember", "install", "`", "pre-commit", "`", "compliant", "standard", ":", "``", "`", "bash", "pre-commit", "install", "``", "`" ], [ "🤝 contributing contribution welcome !", "please check todos , feel free open pull request .", "information , please see [ contributing guideline ] ( contributing.md ) .", "installing virtual environment , please remember install ` pre-commit ` compliant standard : `` ` bash pre-commit install `` `" ] ]
🤝 Contributing Contributions are welcome! Please check out the todos below, and feel free to open a pull request. For more information, please see the [contributing guidelines](CONTRIBUTING.md). After installing the virtual environment, please remember to install `pre-commit` to be compliant with our standards: ```bash pre-commit install ```
https://github.com/gventuri/pandas-ai
-1
[ "ai", "csv", "data", "data-analysis", "data-science", "gpt-3", "gpt-4", "llm", "pandas", "sql" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "h2ogpt", "turn", "★", "⭐", "(", "top-right", "corner", ")", "like", "project", "!", "query", "summarize", "document", "chat", "local", "private", "gpt", "llm", "using", "h2ogpt", ",", "apache", "v2", "open-source", "project", ".", "-", "*", "*", "private", "*", "*", "offline", "database", "document", "[", "(", "pdfs", ",", "excel", ",", "word", ",", "image", ",", "video", "frame", ",", "youtube", ",", "audio", ",", "code", ",", "text", ",", "markdown", ",", "etc", ".", ")", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "-", "*", "*", "persistent", "*", "*", "database", "(", "chroma", ",", "weaviate", ",", "in-memory", "faiss", ")", "using", "accurate", "embeddings", "(", "instructor-large", ",", "all-minilm-l6-v2", ",", "etc", ".", ")", "-", "*", "*", "efficient", "*", "*", "use", "context", "using", "instruct-tuned", "llm", "(", "need", "langchain", "'s", "few-shot", "approach", ")", "-", "*", "*", "parallel", "*", "*", "summarization", "extraction", ",", "reaching", "output", "80", "token", "per", "second", "13b", "llama2", "model", "-", "*", "*", "hyde", "*", "*", "(", "hypothetical", "document", "embeddings", ")", "enhanced", "retrieval", "based", "upon", "llm", "response", "-", "*", "*", "variety", "*", "*", "model", "supported", "(", "llama2", ",", "mistral", ",", "falcon", ",", "vicuna", ",", "wizardlm", ".", "autogptq", ",", "4-bit/8-bit", ",", "lora", ",", "etc", ".", ")", "-", "*", "*", "gpu", "*", "*", "support", "hf", "llama.cpp", "ggml", "model", ",", "*", "*", "cpu", "*", "*", "support", "using", "hf", ",", "llama.cpp", ",", "gpt4all", "model", "-", "*", "*", "attention", "sink", "*", "*", "[", "arbitrarily", "long", "]", "(", "http", ":", "//github.com/tomaarsen/attention_sinks", ")", "generation", "(", "llama-2", ",", "mistral", ",", "mpt", ",", "pythia", ",", "falcon", ",", "etc", ".", ")", "-", "*", "*", "ui", "*", "*", "cli", "streaming", "model", "-", "*", "*", "upload", "*", "*", "*", "*", "view", "*", "*", "document", "ui", "(", "control", "multiple", "collaborative", "personal", "collection", ")", "-", "*", "*", "vision", "llava", "*", "*", "model", "*", "*", "stable", "diffusion", "*", "*", "image", "generation", "-", "*", "*", "voice", "stt", "*", "*", "using", "whisper", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mit-licensed", "microsoft", "speech", "t5", "multiple", "voice", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mpl2-licensed", "tt", "including", "voice", "cloning", "streaming", "audio", "conversion", "-", "*", "*", "ai", "assistant", "voice", "control", "mode", "*", "*", "hands-free", "control", "h2ogpt", "chat", "-", "*", "*", "bake-off", "*", "*", "ui", "mode", "many", "model", "time", "-", "*", "*", "easy", "download", "*", "*", "model", "artifact", "control", "model", "like", "llama.cpp", "ui", "-", "*", "*", "authentication", "*", "*", "ui", "user/password", "-", "*", "*", "state", "preservation", "*", "*", "ui", "user/password", "-", "*", "*", "linux", ",", "docker", ",", "macos", ",", "window", "*", "*", "support", "-", "[", "*", "*", "easy", "window", "installer", "*", "*", "]", "(", "#", "windows-1011-64-bit-with-full-document-qa-capability", ")", "window", "10", "64-bit", "(", "cpu/cuda", ")", "-", "[", "*", "*", "easy", "macos", "installer", "*", "*", "]", "(", "#", "macos-cpum1m2-with-full-document-qa-capability", ")", "macos", "(", "cpu/m1/m2", ")", "-", "*", "*", "inference", "server", "*", "*", "support", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ",", "anthropic", ")", "-", "*", "*", "openai-compliant", "*", "*", "-", "server", "proxy", "api", "(", "h2ogpt", "act", "drop-in-replacement", "openai", "server", ")", "-", "python", "client", "api", "(", "talk", "gradio", "server", ")", "-", "*", "*", "web-search", "*", "*", "integration", "chat", "document", "q/a", "-", "*", "*", "agent", "*", "*", "search", ",", "document", "q/a", ",", "python", "code", ",", "csv", "frame", "(", "experimental", ",", "best", "openai", "currently", ")", "-", "*", "*", "evaluate", "*", "*", "performance", "using", "reward", "model", "-", "*", "*", "quality", "*", "*", "maintained", "1000", "unit", "integration", "test", "taking", "4", "gpu-hours" ], [ "h2ogpt turn ★ ⭐ ( top-right corner ) like project !", "query summarize document chat local private gpt llm using h2ogpt , apache v2 open-source project .", "- * * private * * offline database document [ ( pdfs , excel , word , image , video frame , youtube , audio , code , text , markdown , etc .", ") ] ( docs/readme_langchain.md # supported-datatypes ) - * * persistent * * database ( chroma , weaviate , in-memory faiss ) using accurate embeddings ( instructor-large , all-minilm-l6-v2 , etc . )", "- * * efficient * * use context using instruct-tuned llm ( need langchain 's few-shot approach ) - * * parallel * * summarization extraction , reaching output 80 token per second 13b llama2 model - * * hyde * * ( hypothetical document embeddings ) enhanced retrieval based upon llm response - * * variety * * model supported ( llama2 , mistral , falcon , vicuna , wizardlm .", "autogptq , 4-bit/8-bit , lora , etc . )", "- * * gpu * * support hf llama.cpp ggml model , * * cpu * * support using hf , llama.cpp , gpt4all model - * * attention sink * * [ arbitrarily long ] ( http : //github.com/tomaarsen/attention_sinks ) generation ( llama-2 , mistral , mpt , pythia , falcon , etc . )", "- * * ui * * cli streaming model - * * upload * * * * view * * document ui ( control multiple collaborative personal collection ) - * * vision llava * * model * * stable diffusion * * image generation - * * voice stt * * using whisper streaming audio conversion - * * voice tt * * using mit-licensed microsoft speech t5 multiple voice streaming audio conversion - * * voice tt * * using mpl2-licensed tt including voice cloning streaming audio conversion - * * ai assistant voice control mode * * hands-free control h2ogpt chat - * * bake-off * * ui mode many model time - * * easy download * * model artifact control model like llama.cpp ui - * * authentication * * ui user/password - * * state preservation * * ui user/password - * * linux , docker , macos , window * * support - [ * * easy window installer * * ] ( # windows-1011-64-bit-with-full-document-qa-capability ) window 10 64-bit ( cpu/cuda ) - [ * * easy macos installer * * ] ( # macos-cpum1m2-with-full-document-qa-capability ) macos ( cpu/m1/m2 ) - * * inference server * * support ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai , anthropic ) - * * openai-compliant * * - server proxy api ( h2ogpt act drop-in-replacement openai server ) - python client api ( talk gradio server ) - * * web-search * * integration chat document q/a - * * agent * * search , document q/a , python code , csv frame ( experimental , best openai currently ) - * * evaluate * * performance using reward model - * * quality * * maintained 1000 unit integration test taking 4 gpu-hours" ] ]
[ [ "h2ogpt", "turn", "★", "⭐", "(", "top-right", "corner", ")", "like", "project", "!", "query", "summarize", "document", "chat", "local", "private", "gpt", "llm", "using", "h2ogpt", ",", "apache", "v2", "open-source", "project", ".", "-", "*", "*", "private", "*", "*", "offline", "database", "document", "[", "(", "pdfs", ",", "excel", ",", "word", ",", "image", ",", "video", "frame", ",", "youtube", ",", "audio", ",", "code", ",", "text", ",", "markdown", ",", "etc", ".", ")", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "-", "*", "*", "persistent", "*", "*", "database", "(", "chroma", ",", "weaviate", ",", "in-memory", "faiss", ")", "using", "accurate", "embeddings", "(", "instructor-large", ",", "all-minilm-l6-v2", ",", "etc", ".", ")", "-", "*", "*", "efficient", "*", "*", "use", "context", "using", "instruct-tuned", "llm", "(", "need", "langchain", "'s", "few-shot", "approach", ")", "-", "*", "*", "parallel", "*", "*", "summarization", "extraction", ",", "reaching", "output", "80", "token", "per", "second", "13b", "llama2", "model", "-", "*", "*", "hyde", "*", "*", "(", "hypothetical", "document", "embeddings", ")", "enhanced", "retrieval", "based", "upon", "llm", "response", "-", "*", "*", "variety", "*", "*", "model", "supported", "(", "llama2", ",", "mistral", ",", "falcon", ",", "vicuna", ",", "wizardlm", ".", "autogptq", ",", "4-bit/8-bit", ",", "lora", ",", "etc", ".", ")", "-", "*", "*", "gpu", "*", "*", "support", "hf", "llama.cpp", "ggml", "model", ",", "*", "*", "cpu", "*", "*", "support", "using", "hf", ",", "llama.cpp", ",", "gpt4all", "model", "-", "*", "*", "attention", "sink", "*", "*", "[", "arbitrarily", "long", "]", "(", "http", ":", "//github.com/tomaarsen/attention_sinks", ")", "generation", "(", "llama-2", ",", "mistral", ",", "mpt", ",", "pythia", ",", "falcon", ",", "etc", ".", ")", "-", "*", "*", "ui", "*", "*", "cli", "streaming", "model", "-", "*", "*", "upload", "*", "*", "*", "*", "view", "*", "*", "document", "ui", "(", "control", "multiple", "collaborative", "personal", "collection", ")", "-", "*", "*", "vision", "llava", "*", "*", "model", "*", "*", "stable", "diffusion", "*", "*", "image", "generation", "-", "*", "*", "voice", "stt", "*", "*", "using", "whisper", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mit-licensed", "microsoft", "speech", "t5", "multiple", "voice", "streaming", "audio", "conversion", "-", "*", "*", "voice", "tt", "*", "*", "using", "mpl2-licensed", "tt", "including", "voice", "cloning", "streaming", "audio", "conversion", "-", "*", "*", "ai", "assistant", "voice", "control", "mode", "*", "*", "hands-free", "control", "h2ogpt", "chat", "-", "*", "*", "bake-off", "*", "*", "ui", "mode", "many", "model", "time", "-", "*", "*", "easy", "download", "*", "*", "model", "artifact", "control", "model", "like", "llama.cpp", "ui", "-", "*", "*", "authentication", "*", "*", "ui", "user/password", "-", "*", "*", "state", "preservation", "*", "*", "ui", "user/password", "-", "*", "*", "linux", ",", "docker", ",", "macos", ",", "window", "*", "*", "support", "-", "[", "*", "*", "easy", "window", "installer", "*", "*", "]", "(", "#", "windows-1011-64-bit-with-full-document-qa-capability", ")", "window", "10", "64-bit", "(", "cpu/cuda", ")", "-", "[", "*", "*", "easy", "macos", "installer", "*", "*", "]", "(", "#", "macos-cpum1m2-with-full-document-qa-capability", ")", "macos", "(", "cpu/m1/m2", ")", "-", "*", "*", "inference", "server", "*", "*", "support", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ",", "anthropic", ")", "-", "*", "*", "openai-compliant", "*", "*", "-", "server", "proxy", "api", "(", "h2ogpt", "act", "drop-in-replacement", "openai", "server", ")", "-", "python", "client", "api", "(", "talk", "gradio", "server", ")", "-", "*", "*", "web-search", "*", "*", "integration", "chat", "document", "q/a", "-", "*", "*", "agent", "*", "*", "search", ",", "document", "q/a", ",", "python", "code", ",", "csv", "frame", "(", "experimental", ",", "best", "openai", "currently", ")", "-", "*", "*", "evaluate", "*", "*", "performance", "using", "reward", "model", "-", "*", "*", "quality", "*", "*", "maintained", "1000", "unit", "integration", "test", "taking", "4", "gpu-hours" ], [ "h2ogpt turn ★ ⭐ ( top-right corner ) like project !", "query summarize document chat local private gpt llm using h2ogpt , apache v2 open-source project .", "- * * private * * offline database document [ ( pdfs , excel , word , image , video frame , youtube , audio , code , text , markdown , etc .", ") ] ( docs/readme_langchain.md # supported-datatypes ) - * * persistent * * database ( chroma , weaviate , in-memory faiss ) using accurate embeddings ( instructor-large , all-minilm-l6-v2 , etc . )", "- * * efficient * * use context using instruct-tuned llm ( need langchain 's few-shot approach ) - * * parallel * * summarization extraction , reaching output 80 token per second 13b llama2 model - * * hyde * * ( hypothetical document embeddings ) enhanced retrieval based upon llm response - * * variety * * model supported ( llama2 , mistral , falcon , vicuna , wizardlm .", "autogptq , 4-bit/8-bit , lora , etc . )", "- * * gpu * * support hf llama.cpp ggml model , * * cpu * * support using hf , llama.cpp , gpt4all model - * * attention sink * * [ arbitrarily long ] ( http : //github.com/tomaarsen/attention_sinks ) generation ( llama-2 , mistral , mpt , pythia , falcon , etc . )", "- * * ui * * cli streaming model - * * upload * * * * view * * document ui ( control multiple collaborative personal collection ) - * * vision llava * * model * * stable diffusion * * image generation - * * voice stt * * using whisper streaming audio conversion - * * voice tt * * using mit-licensed microsoft speech t5 multiple voice streaming audio conversion - * * voice tt * * using mpl2-licensed tt including voice cloning streaming audio conversion - * * ai assistant voice control mode * * hands-free control h2ogpt chat - * * bake-off * * ui mode many model time - * * easy download * * model artifact control model like llama.cpp ui - * * authentication * * ui user/password - * * state preservation * * ui user/password - * * linux , docker , macos , window * * support - [ * * easy window installer * * ] ( # windows-1011-64-bit-with-full-document-qa-capability ) window 10 64-bit ( cpu/cuda ) - [ * * easy macos installer * * ] ( # macos-cpum1m2-with-full-document-qa-capability ) macos ( cpu/m1/m2 ) - * * inference server * * support ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai , anthropic ) - * * openai-compliant * * - server proxy api ( h2ogpt act drop-in-replacement openai server ) - python client api ( talk gradio server ) - * * web-search * * integration chat document q/a - * * agent * * search , document q/a , python code , csv frame ( experimental , best openai currently ) - * * evaluate * * performance using reward model - * * quality * * maintained 1000 unit integration test taking 4 gpu-hours" ] ]
h2oGPT Turn ★ into ⭐ (top-right corner) if you like the project! Query and summarize your documents or just chat with local private GPT LLMs using h2oGPT, an Apache V2 open-source project. - **Private** offline database of any documents [(PDFs, Excel, Word, Images, Video Frames, Youtube, Audio, Code, Text, MarkDown, etc.)](docs/README_LangChain.md#supported-datatypes) - **Persistent** database (Chroma, Weaviate, or in-memory FAISS) using accurate embeddings (instructor-large, all-MiniLM-L6-v2, etc.) - **Efficient** use of context using instruct-tuned LLMs (no need for LangChain's few-shot approach) - **Parallel** summarization and extraction, reaching an output of 80 tokens per second with the 13B LLaMa2 model - **HYDE** (Hypothetical Document Embeddings) for enhanced retrieval based upon LLM responses - **Variety** of models supported (LLaMa2, Mistral, Falcon, Vicuna, WizardLM. With AutoGPTQ, 4-bit/8-bit, LORA, etc.) - **GPU** support from HF and LLaMa.cpp GGML models, and **CPU** support using HF, LLaMa.cpp, and GPT4ALL models - **Attention Sinks** for [arbitrarily long](https://github.com/tomaarsen/attention_sinks) generation (LLaMa-2, Mistral, MPT, Pythia, Falcon, etc.) - **UI** or CLI with streaming of all models - **Upload** and **View** documents through the UI (control multiple collaborative or personal collections) - **Vision LLaVa** Model and **Stable Diffusion** Image Generation - **Voice STT** using Whisper with streaming audio conversion - **Voice TTS** using MIT-Licensed Microsoft Speech T5 with multiple voices and Streaming audio conversion - **Voice TTS** using MPL2-Licensed TTS including Voice Cloning and Streaming audio conversion - **AI Assistant Voice Control Mode** for hands-free control of h2oGPT chat - **Bake-off** UI mode against many models at the same time - **Easy Download** of model artifacts and control over models like LLaMa.cpp through the UI - **Authentication** in the UI by user/password - **State Preservation** in the UI by user/password - **Linux, Docker, macOS, and Windows** support - [**Easy Windows Installer**](#windows-1011-64-bit-with-full-document-qa-capability) for Windows 10 64-bit (CPU/CUDA) - [**Easy macOS Installer**](#macos-cpum1m2-with-full-document-qa-capability) for macOS (CPU/M1/M2) - **Inference Servers** support (HF TGI server, vLLM, Gradio, ExLLaMa, Replicate, OpenAI, Azure OpenAI, Anthropic) - **OpenAI-compliant** - Server Proxy API (h2oGPT acts as drop-in-replacement to OpenAI server) - Python client API (to talk to Gradio server) - **Web-Search** integration with Chat and Document Q/A - **Agents** for Search, Document Q/A, Python Code, CSV frames (Experimental, best with OpenAI currently) - **Evaluate** performance using reward models - **Quality** maintained with over 1000 unit and integration tests taking over 4 GPU-hours
https://github.com/h2oai/h2ogpt
-1
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "cu118", "use", "export", "pip_extra_index_url=", "''", "http", ":", "//download.pytorch.org/whl/cu118", "http", ":", "//huggingface.github.io/autogptq-index/whl/cu118", "''", "``", "`", "run", "following", "command", "system", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/h2oai/h2ogpt.git", "cd", "h2ogpt", "pip", "install", "-r", "requirements.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.txt" ], [ "cu118 use export pip_extra_index_url= '' http : //download.pytorch.org/whl/cu118 http : //huggingface.github.io/autogptq-index/whl/cu118 '' `` ` run following command system : `` ` bash git clone http : //github.com/h2oai/h2ogpt.git cd h2ogpt pip install -r requirements.txt pip install -r reqs_optional/requirements_optional_langchain.txt" ] ]
[ [ "cu118", "use", "export", "pip_extra_index_url=", "''", "http", ":", "//download.pytorch.org/whl/cu118", "http", ":", "//huggingface.github.io/autogptq-index/whl/cu118", "''", "``", "`", "run", "following", "command", "system", ":", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/h2oai/h2ogpt.git", "cd", "h2ogpt", "pip", "install", "-r", "requirements.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.txt" ], [ "cu118 use export pip_extra_index_url= '' http : //download.pytorch.org/whl/cu118 http : //huggingface.github.io/autogptq-index/whl/cu118 '' `` ` run following command system : `` ` bash git clone http : //github.com/h2oai/h2ogpt.git cd h2ogpt pip install -r requirements.txt pip install -r reqs_optional/requirements_optional_langchain.txt" ] ]
for cu118 use export PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cu118 https://huggingface.github.io/autogptq-index/whl/cu118" ``` Then run the following commands on any system: ```bash git clone https://github.com/h2oai/h2ogpt.git cd h2ogpt pip install -r requirements.txt pip install -r reqs_optional/requirements_optional_langchain.txt
https://github.com/h2oai/h2ogpt
0
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "amd", "roc", ",", "comment-out", "except", "correct", "roc", "wheel", "pip", "install", "-r", "reqs_optional/requirements_optional_gpt4all.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.urls.txt" ], [ "amd roc , comment-out except correct roc wheel pip install -r reqs_optional/requirements_optional_gpt4all.txt pip install -r reqs_optional/requirements_optional_langchain.urls.txt" ] ]
[ [ "amd", "roc", ",", "comment-out", "except", "correct", "roc", "wheel", "pip", "install", "-r", "reqs_optional/requirements_optional_gpt4all.txt", "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.urls.txt" ], [ "amd roc , comment-out except correct roc wheel pip install -r reqs_optional/requirements_optional_gpt4all.txt pip install -r reqs_optional/requirements_optional_langchain.urls.txt" ] ]
for AMD ROC, comment-out all except the correct ROC wheel pip install -r reqs_optional/requirements_optional_gpt4all.txt pip install -r reqs_optional/requirements_optional_langchain.urls.txt
https://github.com/h2oai/h2ogpt
0
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.gpllike.txt", "python", "generate.py", "--", "base_model=thebloke/zephyr-7b-beta-gguf", "--", "prompt_type=zephyr", "--", "max_seq_len=4096", "``", "`", "next", ",", "go", "browser", "visiting", "[", "http", ":", "//127.0.0.1:7860", "]", "(", "http", ":", "//127.0.0.1:7860", ")", "[", "http", ":", "//localhost:7860", "]", "(", "http", ":", "//localhost:7860", ")", ".", "choose", "13b", "better", "model", "7b", ".", "encounter", "issue", "`", "llama-cpp-python", "`", "package", "try", "compile", "fail", ",", "try", "binary", "wheel", "platform", "linked", "detailed", "instruction", ".", "avx1", "amd", "roc", "system", ",", "edit", "`", "reqs_optional/requirements_optional_gpt4all.txt", "`", "choose", "valid", "package", ".", "recommend", "quantized", "model", "small-gpu", "system", ",", "e.g", ".", "[", "llama-2-7b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf", ")", "9gb+", "gpu", "memory", "larger", "model", "like", "[", "llama-2-13b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf", ")", "16gb+", "gpu", "memory", ".", "see", "[", "offline", "]", "(", "docs/readme_offline.md", "#", "tldr", ")", "run", "h2ogpt", "offline", ".", "--", "-", "note", "platform", ",", "package", "doctr", ",", "unstructured", ",", "blip", ",", "stable", "diffusion", ",", "etc", ".", "download", "model", "runtime", "appear", "delay", "operation", "ui", ".", "progress", "appears", "console", "log", "." ], [ "pip install -r reqs_optional/requirements_optional_langchain.gpllike.txt python generate.py -- base_model=thebloke/zephyr-7b-beta-gguf -- prompt_type=zephyr -- max_seq_len=4096 `` ` next , go browser visiting [ http : //127.0.0.1:7860 ] ( http : //127.0.0.1:7860 ) [ http : //localhost:7860 ] ( http : //localhost:7860 ) .", "choose 13b better model 7b .", "encounter issue ` llama-cpp-python ` package try compile fail , try binary wheel platform linked detailed instruction .", "avx1 amd roc system , edit ` reqs_optional/requirements_optional_gpt4all.txt ` choose valid package .", "recommend quantized model small-gpu system , e.g .", "[ llama-2-7b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf ) 9gb+ gpu memory larger model like [ llama-2-13b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf ) 16gb+ gpu memory .", "see [ offline ] ( docs/readme_offline.md # tldr ) run h2ogpt offline .", "-- - note platform , package doctr , unstructured , blip , stable diffusion , etc .", "download model runtime appear delay operation ui .", "progress appears console log ." ] ]
[ [ "pip", "install", "-r", "reqs_optional/requirements_optional_langchain.gpllike.txt", "python", "generate.py", "--", "base_model=thebloke/zephyr-7b-beta-gguf", "--", "prompt_type=zephyr", "--", "max_seq_len=4096", "``", "`", "next", ",", "go", "browser", "visiting", "[", "http", ":", "//127.0.0.1:7860", "]", "(", "http", ":", "//127.0.0.1:7860", ")", "[", "http", ":", "//localhost:7860", "]", "(", "http", ":", "//localhost:7860", ")", ".", "choose", "13b", "better", "model", "7b", ".", "encounter", "issue", "`", "llama-cpp-python", "`", "package", "try", "compile", "fail", ",", "try", "binary", "wheel", "platform", "linked", "detailed", "instruction", ".", "avx1", "amd", "roc", "system", ",", "edit", "`", "reqs_optional/requirements_optional_gpt4all.txt", "`", "choose", "valid", "package", ".", "recommend", "quantized", "model", "small-gpu", "system", ",", "e.g", ".", "[", "llama-2-7b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf", ")", "9gb+", "gpu", "memory", "larger", "model", "like", "[", "llama-2-13b-chat-gguf", "]", "(", "http", ":", "//huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf", ")", "16gb+", "gpu", "memory", ".", "see", "[", "offline", "]", "(", "docs/readme_offline.md", "#", "tldr", ")", "run", "h2ogpt", "offline", ".", "--", "-", "note", "platform", ",", "package", "doctr", ",", "unstructured", ",", "blip", ",", "stable", "diffusion", ",", "etc", ".", "download", "model", "runtime", "appear", "delay", "operation", "ui", ".", "progress", "appears", "console", "log", "." ], [ "pip install -r reqs_optional/requirements_optional_langchain.gpllike.txt python generate.py -- base_model=thebloke/zephyr-7b-beta-gguf -- prompt_type=zephyr -- max_seq_len=4096 `` ` next , go browser visiting [ http : //127.0.0.1:7860 ] ( http : //127.0.0.1:7860 ) [ http : //localhost:7860 ] ( http : //localhost:7860 ) .", "choose 13b better model 7b .", "encounter issue ` llama-cpp-python ` package try compile fail , try binary wheel platform linked detailed instruction .", "avx1 amd roc system , edit ` reqs_optional/requirements_optional_gpt4all.txt ` choose valid package .", "recommend quantized model small-gpu system , e.g .", "[ llama-2-7b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-7b-chat.q6_k.gguf ) 9gb+ gpu memory larger model like [ llama-2-13b-chat-gguf ] ( http : //huggingface.co/thebloke/llama-2-7b-chat-gguf/resolve/main/llama-2-13b-chat.q6_k.gguf ) 16gb+ gpu memory .", "see [ offline ] ( docs/readme_offline.md # tldr ) run h2ogpt offline .", "-- - note platform , package doctr , unstructured , blip , stable diffusion , etc .", "download model runtime appear delay operation ui .", "progress appears console log ." ] ]
pip install -r reqs_optional/requirements_optional_langchain.gpllike.txt python generate.py --base_model=TheBloke/zephyr-7B-beta-GGUF --prompt_type=zephyr --max_seq_len=4096 ``` Next, go to your browser by visiting [http://127.0.0.1:7860](http://127.0.0.1:7860) or [http://localhost:7860](http://localhost:7860). Choose 13B for a better model than 7B. If you encounter issues with `llama-cpp-python` or other packages that try to compile and fail, try binary wheels for your platform as linked in the detailed instructions below. For AVX1 or AMD ROC systems, edit `reqs_optional/requirements_optional_gpt4all.txt` to choose valid packages. We recommend quantized models for most small-GPU systems, e.g. [LLaMa-2-7B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q6_K.gguf) for 9GB+ GPU memory or larger models like [LLaMa-2-13B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-13b-chat.Q6_K.gguf) if you have 16GB+ GPU memory. See [Offline](docs/README_offline.md#tldr) for how to run h2oGPT offline. --- Note that for all platforms, some packages such as DocTR, Unstructured, BLIP, Stable Diffusion, etc. download models at runtime that appear to delay operations in the UI. The progress appears in the console logs.
https://github.com/h2oai/h2ogpt
0
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "window", "10/11", "64-bit", "full", "document", "q/a", "capability", "*", "one-click", "installer", "*", "cpu", "gpu", ":", "download", "[", "h2ogpt", "window", "installer", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe", ")", "(", "1.3gb", "file", ")", "*", "installed", ",", "feel", "free", "change", "start", "directory", "icon", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "`", "(", "e.g", ".", ")", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "\\h2ogpt_data", "`", "created", "file", "(", "like", "database", ")", "go", ".", "path", "saved", "relative", "path", ".", "*", "cpu", ":", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "15", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "gpu", ":", "starting", ",", "run", "following", "command", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "uninstall", "-y", "torch", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "install", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2", "%", "2bcu118-cp310-cp310-win_amd64.whl", "``", "`", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "20", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "user", "may", "python", "located", ":", "`", "c", ":", "\\program", "file", "(", "x86", ")", "\\h2ogpt\\python\\python.exe", "`", ".", "*", "debug", "issue", ",", "run", "following", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "``", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw", "''", "``", "`", "start-up", "exception", "appended", "log", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\h2ogpt_exception.log", "`", ".", "*", "control", "startup", ",", "tweak", "python", "startup", "file", ",", "e.g", ".", "user", "`", "pseud", "`", ":", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py", "`", "*", "python", "code", ",", "set", "envs", "anywhere", "main_h2ogpt", "(", ")", "called", "*", "e.g", ".", "`", "os.environ", "[", "'name", "'", "]", "=", "'value", "'", "`", ",", "e.g", ".", "`", "os.environ", "[", "'n_jobs", "'", "]", "=", "'10", "'", "`", "(", "must", "always", "string", ")", ".", "*", "environment", "variable", "changed", ",", "e.g", ".", ":", "*", "`", "n_jobs", "`", ":", "number", "core", "various", "task", "*", "`", "omp_num_threads", "`", "thread", "count", "llama", "*", "`", "cuda_visible_devices", "`", "gpus", "used", ".", "recommend", "set", "single", "fast", "gpu", ",", "e.g", ".", "`", "cuda_visible_devices=0", "`", "multiple", "gpus", ".", "note", "ui", "control", "gpus", "(", "cpu", "mode", ")", "llama", "model", ".", "*", "cli", "argument", "`", "python", "generate.py", "--", "help", "`", "environment", "variable", "set", "`", "h2ogpt_x", "`", ",", "e.g", ".", "`", "h2ogpt_h2ocolors", "`", "`", "false", "`", ".", "*", "set", "env", "`", "h2ogpt_server_name", "`", "actual", "ip", "address", "lan", "see", "app", ",", "e.g", ".", "`", "h2ogpt_server_name", "`", "`", "192.168.1.172", "`", "allow", "access", "firewall", "window", "defender", "activated", ".", "*", "one", "tweak", "installed", "h2ogpt", "code", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt", "`", ".", "*", "terminate", "app", ",", "go", "system", "tab", "click", "admin", "click", "shutdown", "h2ogpt", ".", "*", "startup", "fails", ",", "run", "console", "check", "error", ",", "e.g", ".", "kill", "old", "python", "process", ".", "*", "[", "full", "window", "10/11", "manual", "installation", "script", "]", "(", "docs/readme_windows.md", ")", "*", "single", "`", ".bat", "`", "file", "installation", "(", "skip", "optional", "package", ",", "take", "9gb", "filled", "disk", ")", ".", "*", "recommend", "base", "conda", "env", ",", "allows", "doctr", "requires", "pygobject", "otherwise", "support", "(", "except", "`", "mysys2", "`", "used", "h2ogpt", ")", ".", "*", "also", "allows", "tt", "package", "coqui", ",", "otherwise", "currently", "enabled", "one-click", "installer", ".", "--", "-" ], [ "window 10/11 64-bit full document q/a capability * one-click installer * cpu gpu : download [ h2ogpt window installer ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe ) ( 1.3gb file ) * installed , feel free change start directory icon ` % homedrive % \\ % homepath % ` ( e.g . )", "` % homedrive % \\ % homepath % \\h2ogpt_data ` created file ( like database ) go .", "path saved relative path .", "* cpu : click h2ogpt icon start menu .", "give 15 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* gpu : starting , run following command ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip uninstall -y torch c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip install http : //h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2 % 2bcu118-cp310-cp310-win_amd64.whl `` ` click h2ogpt icon start menu .", "give 20 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* user may python located : ` c : \\program file ( x86 ) \\h2ogpt\\python\\python.exe ` .", "* debug issue , run following ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe `` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw '' `` ` start-up exception appended log , e.g .", "` c : \\users\\pseud\\h2ogpt_exception.log ` .", "* control startup , tweak python startup file , e.g .", "user ` pseud ` : ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py ` * python code , set envs anywhere main_h2ogpt ( ) called * e.g .", "` os.environ [ 'name ' ] = 'value ' ` , e.g .", "` os.environ [ 'n_jobs ' ] = '10 ' ` ( must always string ) .", "* environment variable changed , e.g .", ": * ` n_jobs ` : number core various task * ` omp_num_threads ` thread count llama * ` cuda_visible_devices ` gpus used .", "recommend set single fast gpu , e.g .", "` cuda_visible_devices=0 ` multiple gpus .", "note ui control gpus ( cpu mode ) llama model .", "* cli argument ` python generate.py -- help ` environment variable set ` h2ogpt_x ` , e.g .", "` h2ogpt_h2ocolors ` ` false ` .", "* set env ` h2ogpt_server_name ` actual ip address lan see app , e.g .", "` h2ogpt_server_name ` ` 192.168.1.172 ` allow access firewall window defender activated .", "* one tweak installed h2ogpt code , e.g .", "` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt ` .", "* terminate app , go system tab click admin click shutdown h2ogpt .", "* startup fails , run console check error , e.g .", "kill old python process .", "* [ full window 10/11 manual installation script ] ( docs/readme_windows.md ) * single ` .bat ` file installation ( skip optional package , take 9gb filled disk ) .", "* recommend base conda env , allows doctr requires pygobject otherwise support ( except ` mysys2 ` used h2ogpt ) .", "* also allows tt package coqui , otherwise currently enabled one-click installer .", "-- -" ] ]
[ [ "window", "10/11", "64-bit", "full", "document", "q/a", "capability", "*", "one-click", "installer", "*", "cpu", "gpu", ":", "download", "[", "h2ogpt", "window", "installer", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe", ")", "(", "1.3gb", "file", ")", "*", "installed", ",", "feel", "free", "change", "start", "directory", "icon", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "`", "(", "e.g", ".", ")", "`", "%", "homedrive", "%", "\\", "%", "homepath", "%", "\\h2ogpt_data", "`", "created", "file", "(", "like", "database", ")", "go", ".", "path", "saved", "relative", "path", ".", "*", "cpu", ":", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "15", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "gpu", ":", "starting", ",", "run", "following", "command", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "uninstall", "-y", "torch", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "-m", "pip", "install", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2", "%", "2bcu118-cp310-cp310-win_amd64.whl", "``", "`", "click", "h2ogpt", "icon", "start", "menu", ".", "give", "20", "second", "open", "browser", "many", "optional", "package", "included", ".", "default", ",", "browser", "launch", "actual", "local", "ip", "address", ",", "localhost", ".", "*", "user", "may", "python", "located", ":", "`", "c", ":", "\\program", "file", "(", "x86", ")", "\\h2ogpt\\python\\python.exe", "`", ".", "*", "debug", "issue", ",", "run", "following", "(", "replace", "`", "pseud", "`", "user", ")", ":", "``", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe", "``", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw", "''", "``", "`", "start-up", "exception", "appended", "log", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\h2ogpt_exception.log", "`", ".", "*", "control", "startup", ",", "tweak", "python", "startup", "file", ",", "e.g", ".", "user", "`", "pseud", "`", ":", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py", "`", "*", "python", "code", ",", "set", "envs", "anywhere", "main_h2ogpt", "(", ")", "called", "*", "e.g", ".", "`", "os.environ", "[", "'name", "'", "]", "=", "'value", "'", "`", ",", "e.g", ".", "`", "os.environ", "[", "'n_jobs", "'", "]", "=", "'10", "'", "`", "(", "must", "always", "string", ")", ".", "*", "environment", "variable", "changed", ",", "e.g", ".", ":", "*", "`", "n_jobs", "`", ":", "number", "core", "various", "task", "*", "`", "omp_num_threads", "`", "thread", "count", "llama", "*", "`", "cuda_visible_devices", "`", "gpus", "used", ".", "recommend", "set", "single", "fast", "gpu", ",", "e.g", ".", "`", "cuda_visible_devices=0", "`", "multiple", "gpus", ".", "note", "ui", "control", "gpus", "(", "cpu", "mode", ")", "llama", "model", ".", "*", "cli", "argument", "`", "python", "generate.py", "--", "help", "`", "environment", "variable", "set", "`", "h2ogpt_x", "`", ",", "e.g", ".", "`", "h2ogpt_h2ocolors", "`", "`", "false", "`", ".", "*", "set", "env", "`", "h2ogpt_server_name", "`", "actual", "ip", "address", "lan", "see", "app", ",", "e.g", ".", "`", "h2ogpt_server_name", "`", "`", "192.168.1.172", "`", "allow", "access", "firewall", "window", "defender", "activated", ".", "*", "one", "tweak", "installed", "h2ogpt", "code", ",", "e.g", ".", "`", "c", ":", "\\users\\pseud\\appdata\\local\\programs\\h2ogpt", "`", ".", "*", "terminate", "app", ",", "go", "system", "tab", "click", "admin", "click", "shutdown", "h2ogpt", ".", "*", "startup", "fails", ",", "run", "console", "check", "error", ",", "e.g", ".", "kill", "old", "python", "process", ".", "*", "[", "full", "window", "10/11", "manual", "installation", "script", "]", "(", "docs/readme_windows.md", ")", "*", "single", "`", ".bat", "`", "file", "installation", "(", "skip", "optional", "package", ",", "take", "9gb", "filled", "disk", ")", ".", "*", "recommend", "base", "conda", "env", ",", "allows", "doctr", "requires", "pygobject", "otherwise", "support", "(", "except", "`", "mysys2", "`", "used", "h2ogpt", ")", ".", "*", "also", "allows", "tt", "package", "coqui", ",", "otherwise", "currently", "enabled", "one-click", "installer", ".", "--", "-" ], [ "window 10/11 64-bit full document q/a capability * one-click installer * cpu gpu : download [ h2ogpt window installer ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/jan2024/h2ogpt_0.0.1.exe ) ( 1.3gb file ) * installed , feel free change start directory icon ` % homedrive % \\ % homepath % ` ( e.g . )", "` % homedrive % \\ % homepath % \\h2ogpt_data ` created file ( like database ) go .", "path saved relative path .", "* cpu : click h2ogpt icon start menu .", "give 15 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* gpu : starting , run following command ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip uninstall -y torch c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe -m pip install http : //h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2 % 2bcu118-cp310-cp310-win_amd64.whl `` ` click h2ogpt icon start menu .", "give 20 second open browser many optional package included .", "default , browser launch actual local ip address , localhost .", "* user may python located : ` c : \\program file ( x86 ) \\h2ogpt\\python\\python.exe ` .", "* debug issue , run following ( replace ` pseud ` user ) : `` ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\python\\python.exe `` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\h2ogpt.launch.pyw '' `` ` start-up exception appended log , e.g .", "` c : \\users\\pseud\\h2ogpt_exception.log ` .", "* control startup , tweak python startup file , e.g .", "user ` pseud ` : ` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt\\pkgs\\win_run_app.py ` * python code , set envs anywhere main_h2ogpt ( ) called * e.g .", "` os.environ [ 'name ' ] = 'value ' ` , e.g .", "` os.environ [ 'n_jobs ' ] = '10 ' ` ( must always string ) .", "* environment variable changed , e.g .", ": * ` n_jobs ` : number core various task * ` omp_num_threads ` thread count llama * ` cuda_visible_devices ` gpus used .", "recommend set single fast gpu , e.g .", "` cuda_visible_devices=0 ` multiple gpus .", "note ui control gpus ( cpu mode ) llama model .", "* cli argument ` python generate.py -- help ` environment variable set ` h2ogpt_x ` , e.g .", "` h2ogpt_h2ocolors ` ` false ` .", "* set env ` h2ogpt_server_name ` actual ip address lan see app , e.g .", "` h2ogpt_server_name ` ` 192.168.1.172 ` allow access firewall window defender activated .", "* one tweak installed h2ogpt code , e.g .", "` c : \\users\\pseud\\appdata\\local\\programs\\h2ogpt ` .", "* terminate app , go system tab click admin click shutdown h2ogpt .", "* startup fails , run console check error , e.g .", "kill old python process .", "* [ full window 10/11 manual installation script ] ( docs/readme_windows.md ) * single ` .bat ` file installation ( skip optional package , take 9gb filled disk ) .", "* recommend base conda env , allows doctr requires pygobject otherwise support ( except ` mysys2 ` used h2ogpt ) .", "* also allows tt package coqui , otherwise currently enabled one-click installer .", "-- -" ] ]
Windows 10/11 64-bit with full document Q/A capability * One-Click Installer * CPU or GPU: Download [h2oGPT Windows Installer](https://h2o-release.s3.amazonaws.com/h2ogpt/Jan2024/h2oGPT_0.0.1.exe) (1.3GB file) * Once installed, feel free to change start directory for icon from `%HOMEDRIVE%\%HOMEPATH%` to (e.g.) `%HOMEDRIVE%\%HOMEPATH%\h2ogpt_data` so all created files (like database) go there. All paths saved are relative to this path. * CPU: Click the h2oGPT icon in the Start menu. Give it about 15 seconds to open in a browser if many optional packages are included. By default, the browser will launch with the actual local IP address, not localhost. * GPU: Before starting, run the following commands (replace `pseud` with your user): ``` C:\Users\pseud\AppData\Local\Programs\h2oGPT\Python\python.exe -m pip uninstall -y torch C:\Users\pseud\AppData\Local\Programs\h2oGPT\Python\python.exe -m pip install https://h2o-release.s3.amazonaws.com/h2ogpt/torch-2.1.2%2Bcu118-cp310-cp310-win_amd64.whl ``` Now click the h2oGPT icon in the Start menu. Give it about 20 seconds to open in a browser if many optional packages are included. By default, the browser will launch with the actual local IP address, not localhost. * Some other users may have python located here: `C:\Program Files (x86)\h2oGPT\Python\python.exe`. * To debug any issues, run the following (replace `pseud` with your user): ``` C:\Users\pseud\AppData\Local\Programs\h2oGPT\Python\python.exe "C:\Users\pseud\AppData\Local\Programs\h2oGPT\h2oGPT.launch.pyw" ``` Any start-up exceptions are appended to log, e.g. `C:\Users\pseud\h2ogpt_exception.log`. * To control startup, tweak the python startup file, e.g. for user `pseud`: `C:\Users\pseud\AppData\Local\Programs\h2oGPT\pkgs\win_run_app.py` * In this Python code, set ENVs anywhere before main_h2ogpt() is called * E.g. `os.environ['name'] = 'value'`, e.g. `os.environ['n_jobs'] = '10'` (must be always a string). * Environment variables can be changed, e.g.: * `n_jobs`: number of cores for various tasks * `OMP_NUM_THREADS` thread count for LLaMa * `CUDA_VISIBLE_DEVICES` which GPUs are used. Recommend set to single fast GPU, e.g. `CUDA_VISIBLE_DEVICES=0` if have multiple GPUs. Note that UI cannot control which GPUs (or CPU mode) for LLaMa models. * Any CLI argument from `python generate.py --help` with environment variable set as `h2ogpt_x`, e.g. `h2ogpt_h2ocolors` to `False`. * Set env `h2ogpt_server_name` to actual IP address for LAN to see app, e.g. `h2ogpt_server_name` to `192.168.1.172` and allow access through firewall if have Windows Defender activated. * One can tweak installed h2oGPT code at, e.g. `C:\Users\pseud\AppData\Local\Programs\h2oGPT`. * To terminate the app, go to System Tab and click Admin and click Shutdown h2oGPT. * If startup fails, run as console and check for errors, e.g. and kill any old Python processes. * [Full Windows 10/11 Manual Installation Script](docs/README_WINDOWS.md) * Single `.bat` file for installation (if you do not skip any optional packages, takes about 9GB filled on disk). * Recommend base Conda env, which allows for DocTR that requires pygobject that has otherwise no support (except `mysys2` that cannot be used by h2oGPT). * Also allows for the TTS package by Coqui, which is otherwise not currently enabled in the one-click installer. ---
https://github.com/h2oai/h2ogpt
0
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "linux", "(", "cpu/cuda", ")", "full", "document", "q/a", "capability", "*", "[", "docker", "build", "run", "doc", "]", "(", "docs/readme_docker.md", ")", "*", "[", "linux", "manual", "install", "run", "doc", "]", "(", "docs/readme_linux.md", ")", "--", "-" ], [ "linux ( cpu/cuda ) full document q/a capability * [ docker build run doc ] ( docs/readme_docker.md ) * [ linux manual install run doc ] ( docs/readme_linux.md ) -- -" ] ]
[ [ "linux", "(", "cpu/cuda", ")", "full", "document", "q/a", "capability", "*", "[", "docker", "build", "run", "doc", "]", "(", "docs/readme_docker.md", ")", "*", "[", "linux", "manual", "install", "run", "doc", "]", "(", "docs/readme_linux.md", ")", "--", "-" ], [ "linux ( cpu/cuda ) full document q/a capability * [ docker build run doc ] ( docs/readme_docker.md ) * [ linux manual install run doc ] ( docs/readme_linux.md ) -- -" ] ]
Linux (CPU/CUDA) with full document Q/A capability * [Docker Build and Run Docs](docs/README_DOCKER.md) * [Linux Manual Install and Run Docs](docs/README_LINUX.md) ---
https://github.com/h2oai/h2ogpt
-1
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "macos", "(", "cpu/m1/m2", ")", "full", "document", "q/a", "capability", "*", "one-click", "installers", "(", "experimental", "subject", "change", ")", "nov", "08", ",", "2023", "-", "[", "h2ogpt-osx-m1-cpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu", ")", "-", "[", "h2ogpt-osx-m1-gpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu", ")", "download", "runnable", "file", "open", "finder", ".", "take", "minute", "unpack", "run", "application", ".", "one-click", "installers", "experimental", ".", "report", "issue", "step", "reproduce", "http", ":", "//github.com/h2oai/h2ogpt/issues", ".", "*", "*", "note", ":", "*", "*", "app", "bundle", "unsigned", ".", "experience", "issue", "running", "app", ",", "run", "following", "command", ":", "``", "`", "bash", "$", "xattr", "-dr", "com.apple.quarantine", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "$", "chmod", "+x", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "``", "`", "*", "[", "macos", "manual", "install", "run", "doc", "]", "(", "docs/readme_macos.md", ")", "--", "-" ], [ "macos ( cpu/m1/m2 ) full document q/a capability * one-click installers ( experimental subject change ) nov 08 , 2023 - [ h2ogpt-osx-m1-cpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu ) - [ h2ogpt-osx-m1-gpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu ) download runnable file open finder .", "take minute unpack run application .", "one-click installers experimental .", "report issue step reproduce http : //github.com/h2oai/h2ogpt/issues .", "* * note : * * app bundle unsigned .", "experience issue running app , run following command : `` ` bash $ xattr -dr com.apple.quarantine { file-path } /h2ogpt-osx-m1-gpu $ chmod +x { file-path } /h2ogpt-osx-m1-gpu `` ` * [ macos manual install run doc ] ( docs/readme_macos.md ) -- -" ] ]
[ [ "macos", "(", "cpu/m1/m2", ")", "full", "document", "q/a", "capability", "*", "one-click", "installers", "(", "experimental", "subject", "change", ")", "nov", "08", ",", "2023", "-", "[", "h2ogpt-osx-m1-cpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu", ")", "-", "[", "h2ogpt-osx-m1-gpu", "]", "(", "http", ":", "//h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu", ")", "download", "runnable", "file", "open", "finder", ".", "take", "minute", "unpack", "run", "application", ".", "one-click", "installers", "experimental", ".", "report", "issue", "step", "reproduce", "http", ":", "//github.com/h2oai/h2ogpt/issues", ".", "*", "*", "note", ":", "*", "*", "app", "bundle", "unsigned", ".", "experience", "issue", "running", "app", ",", "run", "following", "command", ":", "``", "`", "bash", "$", "xattr", "-dr", "com.apple.quarantine", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "$", "chmod", "+x", "{", "file-path", "}", "/h2ogpt-osx-m1-gpu", "``", "`", "*", "[", "macos", "manual", "install", "run", "doc", "]", "(", "docs/readme_macos.md", ")", "--", "-" ], [ "macos ( cpu/m1/m2 ) full document q/a capability * one-click installers ( experimental subject change ) nov 08 , 2023 - [ h2ogpt-osx-m1-cpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-cpu ) - [ h2ogpt-osx-m1-gpu ] ( http : //h2o-release.s3.amazonaws.com/h2ogpt/nov2023/h2ogpt-osx-m1-gpu ) download runnable file open finder .", "take minute unpack run application .", "one-click installers experimental .", "report issue step reproduce http : //github.com/h2oai/h2ogpt/issues .", "* * note : * * app bundle unsigned .", "experience issue running app , run following command : `` ` bash $ xattr -dr com.apple.quarantine { file-path } /h2ogpt-osx-m1-gpu $ chmod +x { file-path } /h2ogpt-osx-m1-gpu `` ` * [ macos manual install run doc ] ( docs/readme_macos.md ) -- -" ] ]
macOS (CPU/M1/M2) with full document Q/A capability * One-click Installers (Experimental and subject to changes) Nov 08, 2023 - [h2ogpt-osx-m1-cpu](https://h2o-release.s3.amazonaws.com/h2ogpt/Nov2023/h2ogpt-osx-m1-cpu) - [h2ogpt-osx-m1-gpu](https://h2o-release.s3.amazonaws.com/h2ogpt/Nov2023/h2ogpt-osx-m1-gpu) Download the runnable file and open it from the Finder. It will take a few minutes to unpack and run the application. These one-click installers are experimental. Report any issues with steps to reproduce at https://github.com/h2oai/h2ogpt/issues. **Note:** The app bundle is unsigned. If you experience any issues with running the app, run the following commands: ```bash $ xattr -dr com.apple.quarantine {file-path}/h2ogpt-osx-m1-gpu $ chmod +x {file-path}/h2ogpt-osx-m1-gpu ``` * [macOS Manual Install and Run Docs](docs/README_MACOS.md) ---
https://github.com/h2oai/h2ogpt
-1
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "doc", "guide", "<", "!", "--", "cat", "readme.md", "|", "./gh-md-toc", "-", "help", "heavily", "processed", "--", ">", "*", "[", "get", "started", "]", "(", "#", "get-started", ")", "*", "[", "linux", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_linux.md", ")", "*", "[", "macos", "(", "cpu", "m1/m2", ")", "]", "(", "docs/readme_macos.md", ")", "*", "[", "window", "10/11", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_windows.md", ")", "*", "[", "gpu", "(", "cuda", ",", "autogptq", ",", "exllama", ")", "running", "detail", "]", "(", "docs/readme_gpu.md", ")", "*", "[", "cpu", "running", "detail", "]", "(", "docs/readme_cpu.md", ")", "*", "[", "cli", "chat", "]", "(", "docs/readme_cli.md", ")", "*", "[", "gradio", "ui", "]", "(", "docs/readme_ui.md", ")", "*", "[", "client", "api", "(", "gradio", ",", "openai-compliant", ")", "]", "(", "docs/readme_client.md", ")", "*", "[", "inference", "server", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ")", "]", "(", "docs/readme_inferenceservers.md", ")", "*", "[", "python", "wheel", "]", "(", "docs/readme_wheel.md", ")", "*", "[", "offline", "installation", "]", "(", "docs/readme_offline.md", ")", "*", "[", "low", "memory", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "docker", "]", "(", "docs/readme_docker.md", ")", "*", "[", "langchain", "document", "support", "]", "(", "docs/readme_langchain.md", ")", "*", "[", "compare", "privategpt", "et", "al", ".", "]", "(", "docs/readme_langchain.md", "#", "what-is-h2ogpts-langchain-integration-like", ")", "*", "[", "roadmap", "]", "(", "#", "roadmap", ")", "*", "[", "development", "]", "(", "#", "development", ")", "*", "[", "help", "]", "(", "#", "help", ")", "*", "[", "langchain", "file", "type", "supported", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "*", "[", "cli", "database", "control", "]", "(", "docs/readme_langchain.md", "#", "database-creation", ")", "*", "[", "faq", "]", "(", "docs/faq.md", ")", "*", "[", "model", "usage", "note", "]", "(", "docs/faq.md", "#", "model-usage-notes", ")", "*", "[", "adding", "llm", "model", "(", "including", "using", "gguf", "attention", "sink", ")", "]", "(", "docs/faq.md", "#", "adding-models", ")", "*", "[", "adding", "embedding", "model", "]", "(", "docs/faq.md", "#", "add-new-embedding-model", ")", "*", "[", "adding", "prompt", "]", "(", "docs/faq.md", "#", "adding-prompt-templates", ")", "*", "[", "in-context", "learning", "]", "(", "docs/faq.md", "#", "in-context-learning-via-prompt-engineering", ")", "*", "[", "multiple", "gpus", "]", "(", "docs/faq.md", "#", "multiple-gpus", ")", "*", "[", "low-memory", "usage", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "environment", "variable", "]", "(", "docs/faq.md", "#", "what-envs-can-i-pass-to-control-h2ogpt", ")", "*", "[", "http", "access", "server", "client", "]", "(", "docs/faq.md", "#", "https-access-for-server-and-client", ")", "*", "[", "useful", "link", "]", "(", "docs/links.md", ")", "*", "[", "fine-tuning", "]", "(", "docs/finetune.md", ")", "*", "[", "triton", "]", "(", "docs/triton.md", ")", "*", "[", "commercial", "viability", "]", "(", "docs/faq.md", "#", "commercial-viability", ")", "*", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")", "*", "[", "h2o.ai", "?", "]", "(", "#", "why-h2oai", ")", "*", "[", "disclaimer", "]", "(", "#", "disclaimer", ")" ], [ "doc guide < ! -- cat readme.md | ./gh-md-toc - help heavily processed -- > * [ get started ] ( # get-started ) * [ linux ( cpu cuda ) ] ( docs/readme_linux.md ) * [ macos ( cpu m1/m2 ) ] ( docs/readme_macos.md ) * [ window 10/11 ( cpu cuda ) ] ( docs/readme_windows.md ) * [ gpu ( cuda , autogptq , exllama ) running detail ] ( docs/readme_gpu.md ) * [ cpu running detail ] ( docs/readme_cpu.md ) * [ cli chat ] ( docs/readme_cli.md ) * [ gradio ui ] ( docs/readme_ui.md ) * [ client api ( gradio , openai-compliant ) ] ( docs/readme_client.md ) * [ inference server ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai ) ] ( docs/readme_inferenceservers.md ) * [ python wheel ] ( docs/readme_wheel.md ) * [ offline installation ] ( docs/readme_offline.md ) * [ low memory ] ( docs/faq.md # low-memory-mode ) * [ docker ] ( docs/readme_docker.md ) * [ langchain document support ] ( docs/readme_langchain.md ) * [ compare privategpt et al .", "] ( docs/readme_langchain.md # what-is-h2ogpts-langchain-integration-like ) * [ roadmap ] ( # roadmap ) * [ development ] ( # development ) * [ help ] ( # help ) * [ langchain file type supported ] ( docs/readme_langchain.md # supported-datatypes ) * [ cli database control ] ( docs/readme_langchain.md # database-creation ) * [ faq ] ( docs/faq.md ) * [ model usage note ] ( docs/faq.md # model-usage-notes ) * [ adding llm model ( including using gguf attention sink ) ] ( docs/faq.md # adding-models ) * [ adding embedding model ] ( docs/faq.md # add-new-embedding-model ) * [ adding prompt ] ( docs/faq.md # adding-prompt-templates ) * [ in-context learning ] ( docs/faq.md # in-context-learning-via-prompt-engineering ) * [ multiple gpus ] ( docs/faq.md # multiple-gpus ) * [ low-memory usage ] ( docs/faq.md # low-memory-mode ) * [ environment variable ] ( docs/faq.md # what-envs-can-i-pass-to-control-h2ogpt ) * [ http access server client ] ( docs/faq.md # https-access-for-server-and-client ) * [ useful link ] ( docs/links.md ) * [ fine-tuning ] ( docs/finetune.md ) * [ triton ] ( docs/triton.md ) * [ commercial viability ] ( docs/faq.md # commercial-viability ) * [ acknowledgement ] ( # acknowledgement ) * [ h2o.ai ?", "] ( # why-h2oai ) * [ disclaimer ] ( # disclaimer )" ] ]
[ [ "doc", "guide", "<", "!", "--", "cat", "readme.md", "|", "./gh-md-toc", "-", "help", "heavily", "processed", "--", ">", "*", "[", "get", "started", "]", "(", "#", "get-started", ")", "*", "[", "linux", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_linux.md", ")", "*", "[", "macos", "(", "cpu", "m1/m2", ")", "]", "(", "docs/readme_macos.md", ")", "*", "[", "window", "10/11", "(", "cpu", "cuda", ")", "]", "(", "docs/readme_windows.md", ")", "*", "[", "gpu", "(", "cuda", ",", "autogptq", ",", "exllama", ")", "running", "detail", "]", "(", "docs/readme_gpu.md", ")", "*", "[", "cpu", "running", "detail", "]", "(", "docs/readme_cpu.md", ")", "*", "[", "cli", "chat", "]", "(", "docs/readme_cli.md", ")", "*", "[", "gradio", "ui", "]", "(", "docs/readme_ui.md", ")", "*", "[", "client", "api", "(", "gradio", ",", "openai-compliant", ")", "]", "(", "docs/readme_client.md", ")", "*", "[", "inference", "server", "(", "hf", "tgi", "server", ",", "vllm", ",", "gradio", ",", "exllama", ",", "replicate", ",", "openai", ",", "azure", "openai", ")", "]", "(", "docs/readme_inferenceservers.md", ")", "*", "[", "python", "wheel", "]", "(", "docs/readme_wheel.md", ")", "*", "[", "offline", "installation", "]", "(", "docs/readme_offline.md", ")", "*", "[", "low", "memory", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "docker", "]", "(", "docs/readme_docker.md", ")", "*", "[", "langchain", "document", "support", "]", "(", "docs/readme_langchain.md", ")", "*", "[", "compare", "privategpt", "et", "al", ".", "]", "(", "docs/readme_langchain.md", "#", "what-is-h2ogpts-langchain-integration-like", ")", "*", "[", "roadmap", "]", "(", "#", "roadmap", ")", "*", "[", "development", "]", "(", "#", "development", ")", "*", "[", "help", "]", "(", "#", "help", ")", "*", "[", "langchain", "file", "type", "supported", "]", "(", "docs/readme_langchain.md", "#", "supported-datatypes", ")", "*", "[", "cli", "database", "control", "]", "(", "docs/readme_langchain.md", "#", "database-creation", ")", "*", "[", "faq", "]", "(", "docs/faq.md", ")", "*", "[", "model", "usage", "note", "]", "(", "docs/faq.md", "#", "model-usage-notes", ")", "*", "[", "adding", "llm", "model", "(", "including", "using", "gguf", "attention", "sink", ")", "]", "(", "docs/faq.md", "#", "adding-models", ")", "*", "[", "adding", "embedding", "model", "]", "(", "docs/faq.md", "#", "add-new-embedding-model", ")", "*", "[", "adding", "prompt", "]", "(", "docs/faq.md", "#", "adding-prompt-templates", ")", "*", "[", "in-context", "learning", "]", "(", "docs/faq.md", "#", "in-context-learning-via-prompt-engineering", ")", "*", "[", "multiple", "gpus", "]", "(", "docs/faq.md", "#", "multiple-gpus", ")", "*", "[", "low-memory", "usage", "]", "(", "docs/faq.md", "#", "low-memory-mode", ")", "*", "[", "environment", "variable", "]", "(", "docs/faq.md", "#", "what-envs-can-i-pass-to-control-h2ogpt", ")", "*", "[", "http", "access", "server", "client", "]", "(", "docs/faq.md", "#", "https-access-for-server-and-client", ")", "*", "[", "useful", "link", "]", "(", "docs/links.md", ")", "*", "[", "fine-tuning", "]", "(", "docs/finetune.md", ")", "*", "[", "triton", "]", "(", "docs/triton.md", ")", "*", "[", "commercial", "viability", "]", "(", "docs/faq.md", "#", "commercial-viability", ")", "*", "[", "acknowledgement", "]", "(", "#", "acknowledgement", ")", "*", "[", "h2o.ai", "?", "]", "(", "#", "why-h2oai", ")", "*", "[", "disclaimer", "]", "(", "#", "disclaimer", ")" ], [ "doc guide < ! -- cat readme.md | ./gh-md-toc - help heavily processed -- > * [ get started ] ( # get-started ) * [ linux ( cpu cuda ) ] ( docs/readme_linux.md ) * [ macos ( cpu m1/m2 ) ] ( docs/readme_macos.md ) * [ window 10/11 ( cpu cuda ) ] ( docs/readme_windows.md ) * [ gpu ( cuda , autogptq , exllama ) running detail ] ( docs/readme_gpu.md ) * [ cpu running detail ] ( docs/readme_cpu.md ) * [ cli chat ] ( docs/readme_cli.md ) * [ gradio ui ] ( docs/readme_ui.md ) * [ client api ( gradio , openai-compliant ) ] ( docs/readme_client.md ) * [ inference server ( hf tgi server , vllm , gradio , exllama , replicate , openai , azure openai ) ] ( docs/readme_inferenceservers.md ) * [ python wheel ] ( docs/readme_wheel.md ) * [ offline installation ] ( docs/readme_offline.md ) * [ low memory ] ( docs/faq.md # low-memory-mode ) * [ docker ] ( docs/readme_docker.md ) * [ langchain document support ] ( docs/readme_langchain.md ) * [ compare privategpt et al .", "] ( docs/readme_langchain.md # what-is-h2ogpts-langchain-integration-like ) * [ roadmap ] ( # roadmap ) * [ development ] ( # development ) * [ help ] ( # help ) * [ langchain file type supported ] ( docs/readme_langchain.md # supported-datatypes ) * [ cli database control ] ( docs/readme_langchain.md # database-creation ) * [ faq ] ( docs/faq.md ) * [ model usage note ] ( docs/faq.md # model-usage-notes ) * [ adding llm model ( including using gguf attention sink ) ] ( docs/faq.md # adding-models ) * [ adding embedding model ] ( docs/faq.md # add-new-embedding-model ) * [ adding prompt ] ( docs/faq.md # adding-prompt-templates ) * [ in-context learning ] ( docs/faq.md # in-context-learning-via-prompt-engineering ) * [ multiple gpus ] ( docs/faq.md # multiple-gpus ) * [ low-memory usage ] ( docs/faq.md # low-memory-mode ) * [ environment variable ] ( docs/faq.md # what-envs-can-i-pass-to-control-h2ogpt ) * [ http access server client ] ( docs/faq.md # https-access-for-server-and-client ) * [ useful link ] ( docs/links.md ) * [ fine-tuning ] ( docs/finetune.md ) * [ triton ] ( docs/triton.md ) * [ commercial viability ] ( docs/faq.md # commercial-viability ) * [ acknowledgement ] ( # acknowledgement ) * [ h2o.ai ?", "] ( # why-h2oai ) * [ disclaimer ] ( # disclaimer )" ] ]
Docs Guide <!-- cat README.md | ./gh-md-toc - But Help is heavily processed --> * [Get Started](#get-started) * [Linux (CPU or CUDA)](docs/README_LINUX.md) * [macOS (CPU or M1/M2)](docs/README_MACOS.md) * [Windows 10/11 (CPU or CUDA)](docs/README_WINDOWS.md) * [GPU (CUDA, AutoGPTQ, exllama) Running Details](docs/README_GPU.md) * [CPU Running Details](docs/README_CPU.md) * [CLI chat](docs/README_CLI.md) * [Gradio UI](docs/README_ui.md) * [Client API (Gradio, OpenAI-Compliant)](docs/README_CLIENT.md) * [Inference Servers (HF TGI server, vLLM, Gradio, ExLLaMa, Replicate, OpenAI, Azure OpenAI)](docs/README_InferenceServers.md) * [Python Wheel](docs/README_WHEEL.md) * [Offline Installation](docs/README_offline.md) * [Low Memory](docs/FAQ.md#low-memory-mode) * [Docker](docs/README_DOCKER.md) * [LangChain Document Support](docs/README_LangChain.md) * [Compare to PrivateGPT et al.](docs/README_LangChain.md#what-is-h2ogpts-langchain-integration-like) * [Roadmap](#roadmap) * [Development](#development) * [Help](#help) * [LangChain file types supported](docs/README_LangChain.md#supported-datatypes) * [CLI Database control](docs/README_LangChain.md#database-creation) * [FAQ](docs/FAQ.md) * [Model Usage Notes](docs/FAQ.md#model-usage-notes) * [Adding LLM Models (including using GGUF and Attention Sinks)](docs/FAQ.md#adding-models) * [Adding Embedding Models](docs/FAQ.md#add-new-embedding-model) * [Adding Prompts](docs/FAQ.md#adding-prompt-templates) * [In-Context Learning](docs/FAQ.md#in-context-learning-via-prompt-engineering) * [Multiple GPUs](docs/FAQ.md#multiple-gpus) * [Low-Memory Usage](docs/FAQ.md#low-memory-mode) * [Environment Variables](docs/FAQ.md#what-envs-can-i-pass-to-control-h2ogpt) * [HTTPS access for server and client](docs/FAQ.md#https-access-for-server-and-client) * [Useful Links](docs/LINKS.md) * [Fine-Tuning](docs/FINETUNE.md) * [Triton](docs/TRITON.md) * [Commercial viability](docs/FAQ.md#commercial-viability) * [Acknowledgements](#acknowledgements) * [Why H2O.ai?](#why-h2oai) * [Disclaimer](#disclaimer)
https://github.com/h2oai/h2ogpt
-1
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "experimental", "feature", "part", "normal", "installation", "instruction", "experimental", ".", "*", "[", "agent", "]", "(", "docs/readme_agents.md", ")", "--", "alpha", "testing", ".", "optimal", "openai", ",", "also", "fails", "sometimes", "." ], [ "experimental feature part normal installation instruction experimental .", "* [ agent ] ( docs/readme_agents.md ) -- alpha testing .", "optimal openai , also fails sometimes ." ] ]
[ [ "experimental", "feature", "part", "normal", "installation", "instruction", "experimental", ".", "*", "[", "agent", "]", "(", "docs/readme_agents.md", ")", "--", "alpha", "testing", ".", "optimal", "openai", ",", "also", "fails", "sometimes", "." ], [ "experimental feature part normal installation instruction experimental .", "* [ agent ] ( docs/readme_agents.md ) -- alpha testing .", "optimal openai , also fails sometimes ." ] ]
Experimental features These are not part of normal installation instructions and are experimental. * [Agents](docs/README_Agents.md) -- in Alpha testing. Optimal for OpenAI, but that also fails sometimes.
https://github.com/h2oai/h2ogpt
-1
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "development", "-", "create", "development", "environment", "training", "generation", ",", "follow", "[", "installation", "instruction", "]", "(", "docs/install.md", ")", ".", "-", "fine-tune", "llm", "model", "data", ",", "follow", "[", "fine-tuning", "instruction", "]", "(", "docs/finetune.md", ")", ".", "-", "run", "h2ogpt", "test", ":", "``", "`", "bash", "pip", "install", "requirements-parser", "pytest-instafail", "pytest-random-order", "playsound==1.3.0", "pytest", "--", "instafail", "-s", "-v", "test" ], [ "development - create development environment training generation , follow [ installation instruction ] ( docs/install.md ) .", "- fine-tune llm model data , follow [ fine-tuning instruction ] ( docs/finetune.md ) .", "- run h2ogpt test : `` ` bash pip install requirements-parser pytest-instafail pytest-random-order playsound==1.3.0 pytest -- instafail -s -v test" ] ]
[ [ "development", "-", "create", "development", "environment", "training", "generation", ",", "follow", "[", "installation", "instruction", "]", "(", "docs/install.md", ")", ".", "-", "fine-tune", "llm", "model", "data", ",", "follow", "[", "fine-tuning", "instruction", "]", "(", "docs/finetune.md", ")", ".", "-", "run", "h2ogpt", "test", ":", "``", "`", "bash", "pip", "install", "requirements-parser", "pytest-instafail", "pytest-random-order", "playsound==1.3.0", "pytest", "--", "instafail", "-s", "-v", "test" ], [ "development - create development environment training generation , follow [ installation instruction ] ( docs/install.md ) .", "- fine-tune llm model data , follow [ fine-tuning instruction ] ( docs/finetune.md ) .", "- run h2ogpt test : `` ` bash pip install requirements-parser pytest-instafail pytest-random-order playsound==1.3.0 pytest -- instafail -s -v test" ] ]
Development - To create a development environment for training and generation, follow the [installation instructions](docs/INSTALL.md). - To fine-tune any LLM models on your data, follow the [fine-tuning instructions](docs/FINETUNE.md). - To run h2oGPT tests: ```bash pip install requirements-parser pytest-instafail pytest-random-order playsound==1.3.0 pytest --instafail -s -v tests
https://github.com/h2oai/h2ogpt
0
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/h2oai/h2ogpt/main/README.md
[ [ "client", "test", "make", "-c", "client", "setup", "make", "-c", "client", "build", "pytest", "--", "instafail", "-s", "-v", "client/tests" ], [ "client test make -c client setup make -c client build pytest -- instafail -s -v client/tests" ] ]
[ [ "client", "test", "make", "-c", "client", "setup", "make", "-c", "client", "build", "pytest", "--", "instafail", "-s", "-v", "client/tests" ], [ "client test make -c client setup make -c client build pytest -- instafail -s -v client/tests" ] ]
for client tests make -C client setup make -C client build pytest --instafail -s -v client/tests
https://github.com/h2oai/h2ogpt
-1
[ "ai", "chatgpt", "embeddings", "generative", "gpt", "gpt4all", "llama2", "llm", "mixtral", "pdf", "private", "privategpt", "vectorstore" ]
https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md
[ [ "gorilla", ":", "large", "language", "model", "connected", "massive", "apis", "[", "[", "project", "website", "]", "(", "http", ":", "//shishirpatil.github.io/gorilla/", ")", "]", "<", "img", "src=", "''", "http", ":", "//github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png", "''", "width=50", "%", "height=50", "%", ">", "*", "*", ":", "fire", ":", "gorilla", "openfunctions", "*", "*", "drop-in", "alternative", "function", "calling", "!", "[", "release", "blog", "]", "(", "http", ":", "//gorilla.cs.berkeley.edu/blogs/4_open_functions.html", ")", "*", "*", "🟢", "gorilla", "apache", "2.0", "*", "*", "gorilla", "fine-tuned", "mpt", ",", "falcon", ",", "use", "gorilla", "commercially", "obligation", "!", ":", "golf", ":", "*", "*", ":", "rocket", ":", "try", "gorilla", "60", "*", "*", "[", "!", "[", "colab", "]", "(", "http", ":", "//colab.research.google.com/assets/colab-badge.svg", ")", "]", "(", "http", ":", "//colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup", "?", "usp=sharing", ")", ":", "computer", ":", "use", "[", "gorilla", "cli", "]", "(", "http", ":", "//github.com/gorilla-llm/gorilla-cli", ")", "`", "pip", "install", "gorilla-cli", "`", "*", "*", ":", "newspaper_roll", ":", "checkout", "paper", "!", "*", "*", "[", "!", "[", "arxiv", "]", "(", "http", ":", "//img.shields.io/badge/arxiv-2305.15334-", "<", "color", ">", ".svg", "?", "style=flat-square", ")", "]", "(", "http", ":", "//arxiv.org/abs/2305.15334", ")", "*", "*", ":", "wave", ":", "join", "discord", "!", "*", "*", "[", "!", "[", "discord", "]", "(", "http", ":", "//img.shields.io/discord/1111172801899012102", "?", "label=discord", "&", "logo=discord", "&", "logocolor=green", "&", "style=flat-square", ")", "]", "(", "http", ":", "//discord.gg/swtyutaxx3", ")", "`", "gorilla", "`", "enables", "llm", "use", "tool", "invoking", "apis", ".", "given", "natural", "language", "query", ",", "gorilla", "come", "semantically-", "syntactically-", "correct", "api", "invoke", ".", "gorilla", ",", "first", "demonstrate", "use", "llm", "invoke", "1,600+", "(", "growing", ")", "api", "call", "accurately", "reducing", "hallucination", ".", "also", "release", "apibench", ",", "largest", "collection", "apis", ",", "curated", "easy", "trained", "!", "join", "u", ",", "try", "expand", "largest", "api", "store", "teach", "llm", "write", "!", "hop", "discord", ",", "open", "pr", ",", "email", "u", "would", "like", "api", "incorporated", "well", "." ], [ "gorilla : large language model connected massive apis [ [ project website ] ( http : //shishirpatil.github.io/gorilla/ ) ] < img src= '' http : //github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png '' width=50 % height=50 % > * * : fire : gorilla openfunctions * * drop-in alternative function calling !", "[ release blog ] ( http : //gorilla.cs.berkeley.edu/blogs/4_open_functions.html ) * * 🟢 gorilla apache 2.0 * * gorilla fine-tuned mpt , falcon , use gorilla commercially obligation !", ": golf : * * : rocket : try gorilla 60 * * [ !", "[ colab ] ( http : //colab.research.google.com/assets/colab-badge.svg ) ] ( http : //colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup ? usp=sharing ) : computer : use [ gorilla cli ] ( http : //github.com/gorilla-llm/gorilla-cli ) ` pip install gorilla-cli ` * * : newspaper_roll : checkout paper !", "* * [ !", "[ arxiv ] ( http : //img.shields.io/badge/arxiv-2305.15334- < color > .svg ? style=flat-square ) ] ( http : //arxiv.org/abs/2305.15334 ) * * : wave : join discord !", "* * [ !", "[ discord ] ( http : //img.shields.io/discord/1111172801899012102 ? label=discord & logo=discord & logocolor=green & style=flat-square ) ] ( http : //discord.gg/swtyutaxx3 ) ` gorilla ` enables llm use tool invoking apis .", "given natural language query , gorilla come semantically- syntactically- correct api invoke .", "gorilla , first demonstrate use llm invoke 1,600+ ( growing ) api call accurately reducing hallucination .", "also release apibench , largest collection apis , curated easy trained !", "join u , try expand largest api store teach llm write !", "hop discord , open pr , email u would like api incorporated well ." ] ]
[ [ "gorilla", ":", "large", "language", "model", "connected", "massive", "apis", "[", "[", "project", "website", "]", "(", "http", ":", "//shishirpatil.github.io/gorilla/", ")", "]", "<", "img", "src=", "''", "http", ":", "//github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png", "''", "width=50", "%", "height=50", "%", ">", "*", "*", ":", "fire", ":", "gorilla", "openfunctions", "*", "*", "drop-in", "alternative", "function", "calling", "!", "[", "release", "blog", "]", "(", "http", ":", "//gorilla.cs.berkeley.edu/blogs/4_open_functions.html", ")", "*", "*", "🟢", "gorilla", "apache", "2.0", "*", "*", "gorilla", "fine-tuned", "mpt", ",", "falcon", ",", "use", "gorilla", "commercially", "obligation", "!", ":", "golf", ":", "*", "*", ":", "rocket", ":", "try", "gorilla", "60", "*", "*", "[", "!", "[", "colab", "]", "(", "http", ":", "//colab.research.google.com/assets/colab-badge.svg", ")", "]", "(", "http", ":", "//colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup", "?", "usp=sharing", ")", ":", "computer", ":", "use", "[", "gorilla", "cli", "]", "(", "http", ":", "//github.com/gorilla-llm/gorilla-cli", ")", "`", "pip", "install", "gorilla-cli", "`", "*", "*", ":", "newspaper_roll", ":", "checkout", "paper", "!", "*", "*", "[", "!", "[", "arxiv", "]", "(", "http", ":", "//img.shields.io/badge/arxiv-2305.15334-", "<", "color", ">", ".svg", "?", "style=flat-square", ")", "]", "(", "http", ":", "//arxiv.org/abs/2305.15334", ")", "*", "*", ":", "wave", ":", "join", "discord", "!", "*", "*", "[", "!", "[", "discord", "]", "(", "http", ":", "//img.shields.io/discord/1111172801899012102", "?", "label=discord", "&", "logo=discord", "&", "logocolor=green", "&", "style=flat-square", ")", "]", "(", "http", ":", "//discord.gg/swtyutaxx3", ")", "`", "gorilla", "`", "enables", "llm", "use", "tool", "invoking", "apis", ".", "given", "natural", "language", "query", ",", "gorilla", "come", "semantically-", "syntactically-", "correct", "api", "invoke", ".", "gorilla", ",", "first", "demonstrate", "use", "llm", "invoke", "1,600+", "(", "growing", ")", "api", "call", "accurately", "reducing", "hallucination", ".", "also", "release", "apibench", ",", "largest", "collection", "apis", ",", "curated", "easy", "trained", "!", "join", "u", ",", "try", "expand", "largest", "api", "store", "teach", "llm", "write", "!", "hop", "discord", ",", "open", "pr", ",", "email", "u", "would", "like", "api", "incorporated", "well", "." ], [ "gorilla : large language model connected massive apis [ [ project website ] ( http : //shishirpatil.github.io/gorilla/ ) ] < img src= '' http : //github.com/shishirpatil/gorilla/blob/gh-pages/assets/img/logo.png '' width=50 % height=50 % > * * : fire : gorilla openfunctions * * drop-in alternative function calling !", "[ release blog ] ( http : //gorilla.cs.berkeley.edu/blogs/4_open_functions.html ) * * 🟢 gorilla apache 2.0 * * gorilla fine-tuned mpt , falcon , use gorilla commercially obligation !", ": golf : * * : rocket : try gorilla 60 * * [ !", "[ colab ] ( http : //colab.research.google.com/assets/colab-badge.svg ) ] ( http : //colab.research.google.com/drive/1debpsccvlf_aunmd0fwpehfrtdc0qiup ? usp=sharing ) : computer : use [ gorilla cli ] ( http : //github.com/gorilla-llm/gorilla-cli ) ` pip install gorilla-cli ` * * : newspaper_roll : checkout paper !", "* * [ !", "[ arxiv ] ( http : //img.shields.io/badge/arxiv-2305.15334- < color > .svg ? style=flat-square ) ] ( http : //arxiv.org/abs/2305.15334 ) * * : wave : join discord !", "* * [ !", "[ discord ] ( http : //img.shields.io/discord/1111172801899012102 ? label=discord & logo=discord & logocolor=green & style=flat-square ) ] ( http : //discord.gg/swtyutaxx3 ) ` gorilla ` enables llm use tool invoking apis .", "given natural language query , gorilla come semantically- syntactically- correct api invoke .", "gorilla , first demonstrate use llm invoke 1,600+ ( growing ) api call accurately reducing hallucination .", "also release apibench , largest collection apis , curated easy trained !", "join u , try expand largest api store teach llm write !", "hop discord , open pr , email u would like api incorporated well ." ] ]
Gorilla: Large Language Model Connected with Massive APIs [[Project Website](https://shishirpatil.github.io/gorilla/)] <img src="https://github.com/ShishirPatil/gorilla/blob/gh-pages/assets/img/logo.png" width=50% height=50%> **:fire: Gorilla OpenFunctions** is a drop-in alternative for function calling! [Release Blog](https://gorilla.cs.berkeley.edu/blogs/4_open_functions.html) **🟢 Gorilla is Apache 2.0** With Gorilla being fine-tuned on MPT, and Falcon, you can use Gorilla commercially with no obligations! :golf: **:rocket: Try Gorilla in 60s** [![Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1DEBPsccVLF_aUnmD0FwPeHFrtdC0QIUP?usp=sharing) :computer: Use [Gorilla in your CLI](https://github.com/gorilla-llm/gorilla-cli) with `pip install gorilla-cli` **:newspaper_roll: Checkout our paper!** [![arXiv](https://img.shields.io/badge/arXiv-2305.15334-<COLOR>.svg?style=flat-square)](https://arxiv.org/abs/2305.15334) **:wave: Join our Discord!** [![Discord](https://img.shields.io/discord/1111172801899012102?label=Discord&logo=discord&logoColor=green&style=flat-square)](https://discord.gg/SwTyuTAxX3) `Gorilla` enables LLMs to use tools by invoking APIs. Given a natural language query, Gorilla comes up with the semantically- and syntactically- correct API to invoke. With Gorilla, we are the first to demonstrate how to use LLMs to invoke 1,600+ (and growing) API calls accurately while reducing hallucination. We also release APIBench, the largest collection of APIs, curated and easy to be trained on! Join us, as we try to expand the largest API store and teach LLMs how to write them! Hop on our Discord, or open a PR, or email us if you would like to have your API incorporated as well.
https://github.com/ShishirPatil/gorilla
0
[ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ]
https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md
[ [ "repository", "organization", "repository", "organization", "shown", ".", "-", "`", "data", "`", "folder", "contains", "evaluation", "apis", "`", "(", "apibench", ")", "`", "community", "contributed", "apis", ".", "-", "`", "eval", "`", "folder", "contains", "evaluation", "code", "well", "gorilla", "output", ".", "-", "`", "inference", "`", "folder", "contains", "inference", "code", "running", "gorilla", "locally", ".", "-", "<", "span", "style=", "''", "color", ":", "hr", "''", ">", "[", "coming", "soon", "!", "]", "<", "/span", ">", "`", "train", "`", "folder", "contains", "training", "code", "associated", "gorilla", "finetuning", ".", "dataset", "collection", ",", "1640", "api", "documentation", "`", "data/api", "`", ".", "also", "include", "`", "apibench", "`", "dataset", "created", "self-instruct", "`", "data/apibench", "`", ".", "evaluation", ",", "convert", "llm-friendly", "chat", "format", ",", "question", "`", "eval/eval-data/questions", "`", ",", "corresponding", "response", "`", "eval/eval-data/responses", "`", ".", "also", "included", "evaluation", "script", "`", "eval/eval-scripts", "`", ".", "would", "entirely", "sufficient", "train", "gorilla", ",", "reproduce", "result", ".", "please", "see", "[", "evaluation", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/eval", ")", "detail", "use", "evaluation", "pipeline", ".", "additionally", ",", "released", "model", "weight", ".", "`", "gorilla-7b-hf-v0", "`", "let", "invoke", "925", "hugging", "face", "apis", ".", "similarly", ",", "`", "gorilla-7b-tf-v0", "`", "`", "gorilla-7b-th-v0", "`", "626", "(", "exhaustive", ")", "tensorflow", "v2", ",", "94", "(", "exhaustive", ")", "torch", "hub", "apis", ".", "`", "gorilla-mpt-7b-hf-v0", "`", "`", "gorilla-falcon-7b-hf-v0", "`", "apache", "2.0", "licensed", "model", "(", "commercially", "usable", ")", "fine-tuned", "mpt-7b", "falcon-7b", "respectively", ".", "release", "model", "three", "combined", "generic", "chat", "capability", "community", "contributed", "apis", "soon", "scale", "serving", "infrastructure", ".", "run", "gorilla", "locally", "instruction", "`", "inference/", "`", "sub-directory", ",", "also", "provide", "hosted", "gorilla", "chat", "completion", "api", "(", "see", "colab", ")", "!", "suggestion", ",", "run", "issue", "please", "feel", "free", "reach", "u", "either", "discord", "email", "raise", "github", "issue", ".", "``", "`", "gorilla", "├──", "data", "│", "├──", "api", "(", "tf/hf/th", "apis", "used", "generating", "apibench", ")", "│", "│", "├──", "{", "api_name", "}", "_api.jsonl", "│", "├──", "apibench", "(", "evaluating", "llm", "model", ")", "v-1.0", "│", "│", "├──", "{", "api_name", "}", "_train.jsonl", ",", "{", "api_name", "}", "_eval.jsonl", "|", "|──", "apizoo", "(", "contributed", "community", "-", "evolving", ")", "│", "|", "├──", "username1.json", "│", "│", "├──", "username2.json", "│", "│", "├──", "...", "├──", "eval", "│", "├──", "readme.md", "│", "├──", "get_llm_responses.py", "│", "├──", "eval-scripts", "│", "│", "├──", "ast_eval_", "{", "api_name", "}", ".py", "│", "├──", "eval-data", "│", "│", "├──", "question", "│", "│", "│", "├──", "api", "name", "│", "│", "│", "│", "├──", "questions_", "{", "api_name", "}", "_", "{", "eval_metric", "}", ".jsonl", "│", "│", "├──", "response", "│", "│", "│", "├──", "api", "name", "│", "│", "│", "│", "├──", "responses_", "{", "api_name", "}", "_gorilla_ft_", "{", "eval_metric", "}", ".jsonl", "│", "│", "│", "│", "├──", "responses_", "{", "api_name", "}", "_gorilla_rt_", "{", "eval_metric", "}", ".jsonl", "├──", "inference", "│", "├──", "readme.md", "│", "├──", "serve", "│", "│", "├──", "gorilla_cli.py", "│", "│", "├──", "conv_template.py", "├──", "train", "(", "coming", "soon", "!", ")", "``", "`" ], [ "repository organization repository organization shown .", "- ` data ` folder contains evaluation apis ` ( apibench ) ` community contributed apis .", "- ` eval ` folder contains evaluation code well gorilla output .", "- ` inference ` folder contains inference code running gorilla locally .", "- < span style= '' color : hr '' > [ coming soon !", "] < /span > ` train ` folder contains training code associated gorilla finetuning .", "dataset collection , 1640 api documentation ` data/api ` .", "also include ` apibench ` dataset created self-instruct ` data/apibench ` .", "evaluation , convert llm-friendly chat format , question ` eval/eval-data/questions ` , corresponding response ` eval/eval-data/responses ` .", "also included evaluation script ` eval/eval-scripts ` .", "would entirely sufficient train gorilla , reproduce result .", "please see [ evaluation ] ( http : //github.com/shishirpatil/gorilla/tree/main/eval ) detail use evaluation pipeline .", "additionally , released model weight .", "` gorilla-7b-hf-v0 ` let invoke 925 hugging face apis .", "similarly , ` gorilla-7b-tf-v0 ` ` gorilla-7b-th-v0 ` 626 ( exhaustive ) tensorflow v2 , 94 ( exhaustive ) torch hub apis .", "` gorilla-mpt-7b-hf-v0 ` ` gorilla-falcon-7b-hf-v0 ` apache 2.0 licensed model ( commercially usable ) fine-tuned mpt-7b falcon-7b respectively .", "release model three combined generic chat capability community contributed apis soon scale serving infrastructure .", "run gorilla locally instruction ` inference/ ` sub-directory , also provide hosted gorilla chat completion api ( see colab ) !", "suggestion , run issue please feel free reach u either discord email raise github issue .", "`` ` gorilla ├── data │ ├── api ( tf/hf/th apis used generating apibench ) │ │ ├── { api_name } _api.jsonl │ ├── apibench ( evaluating llm model ) v-1.0 │ │ ├── { api_name } _train.jsonl , { api_name } _eval.jsonl | |── apizoo ( contributed community - evolving ) │ | ├── username1.json │ │ ├── username2.json │ │ ├── ... ├── eval │ ├── readme.md │ ├── get_llm_responses.py │ ├── eval-scripts │ │ ├── ast_eval_ { api_name } .py │ ├── eval-data │ │ ├── question │ │ │ ├── api name │ │ │ │ ├── questions_ { api_name } _ { eval_metric } .jsonl │ │ ├── response │ │ │ ├── api name │ │ │ │ ├── responses_ { api_name } _gorilla_ft_ { eval_metric } .jsonl │ │ │ │ ├── responses_ { api_name } _gorilla_rt_ { eval_metric } .jsonl ├── inference │ ├── readme.md │ ├── serve │ │ ├── gorilla_cli.py │ │ ├── conv_template.py ├── train ( coming soon ! )", "`` `" ] ]
[ [ "repository", "organization", "repository", "organization", "shown", ".", "-", "`", "data", "`", "folder", "contains", "evaluation", "apis", "`", "(", "apibench", ")", "`", "community", "contributed", "apis", ".", "-", "`", "eval", "`", "folder", "contains", "evaluation", "code", "well", "gorilla", "output", ".", "-", "`", "inference", "`", "folder", "contains", "inference", "code", "running", "gorilla", "locally", ".", "-", "<", "span", "style=", "''", "color", ":", "hr", "''", ">", "[", "coming", "soon", "!", "]", "<", "/span", ">", "`", "train", "`", "folder", "contains", "training", "code", "associated", "gorilla", "finetuning", ".", "dataset", "collection", ",", "1640", "api", "documentation", "`", "data/api", "`", ".", "also", "include", "`", "apibench", "`", "dataset", "created", "self-instruct", "`", "data/apibench", "`", ".", "evaluation", ",", "convert", "llm-friendly", "chat", "format", ",", "question", "`", "eval/eval-data/questions", "`", ",", "corresponding", "response", "`", "eval/eval-data/responses", "`", ".", "also", "included", "evaluation", "script", "`", "eval/eval-scripts", "`", ".", "would", "entirely", "sufficient", "train", "gorilla", ",", "reproduce", "result", ".", "please", "see", "[", "evaluation", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/eval", ")", "detail", "use", "evaluation", "pipeline", ".", "additionally", ",", "released", "model", "weight", ".", "`", "gorilla-7b-hf-v0", "`", "let", "invoke", "925", "hugging", "face", "apis", ".", "similarly", ",", "`", "gorilla-7b-tf-v0", "`", "`", "gorilla-7b-th-v0", "`", "626", "(", "exhaustive", ")", "tensorflow", "v2", ",", "94", "(", "exhaustive", ")", "torch", "hub", "apis", ".", "`", "gorilla-mpt-7b-hf-v0", "`", "`", "gorilla-falcon-7b-hf-v0", "`", "apache", "2.0", "licensed", "model", "(", "commercially", "usable", ")", "fine-tuned", "mpt-7b", "falcon-7b", "respectively", ".", "release", "model", "three", "combined", "generic", "chat", "capability", "community", "contributed", "apis", "soon", "scale", "serving", "infrastructure", ".", "run", "gorilla", "locally", "instruction", "`", "inference/", "`", "sub-directory", ",", "also", "provide", "hosted", "gorilla", "chat", "completion", "api", "(", "see", "colab", ")", "!", "suggestion", ",", "run", "issue", "please", "feel", "free", "reach", "u", "either", "discord", "email", "raise", "github", "issue", ".", "``", "`", "gorilla", "├──", "data", "│", "├──", "api", "(", "tf/hf/th", "apis", "used", "generating", "apibench", ")", "│", "│", "├──", "{", "api_name", "}", "_api.jsonl", "│", "├──", "apibench", "(", "evaluating", "llm", "model", ")", "v-1.0", "│", "│", "├──", "{", "api_name", "}", "_train.jsonl", ",", "{", "api_name", "}", "_eval.jsonl", "|", "|──", "apizoo", "(", "contributed", "community", "-", "evolving", ")", "│", "|", "├──", "username1.json", "│", "│", "├──", "username2.json", "│", "│", "├──", "...", "├──", "eval", "│", "├──", "readme.md", "│", "├──", "get_llm_responses.py", "│", "├──", "eval-scripts", "│", "│", "├──", "ast_eval_", "{", "api_name", "}", ".py", "│", "├──", "eval-data", "│", "│", "├──", "question", "│", "│", "│", "├──", "api", "name", "│", "│", "│", "│", "├──", "questions_", "{", "api_name", "}", "_", "{", "eval_metric", "}", ".jsonl", "│", "│", "├──", "response", "│", "│", "│", "├──", "api", "name", "│", "│", "│", "│", "├──", "responses_", "{", "api_name", "}", "_gorilla_ft_", "{", "eval_metric", "}", ".jsonl", "│", "│", "│", "│", "├──", "responses_", "{", "api_name", "}", "_gorilla_rt_", "{", "eval_metric", "}", ".jsonl", "├──", "inference", "│", "├──", "readme.md", "│", "├──", "serve", "│", "│", "├──", "gorilla_cli.py", "│", "│", "├──", "conv_template.py", "├──", "train", "(", "coming", "soon", "!", ")", "``", "`" ], [ "repository organization repository organization shown .", "- ` data ` folder contains evaluation apis ` ( apibench ) ` community contributed apis .", "- ` eval ` folder contains evaluation code well gorilla output .", "- ` inference ` folder contains inference code running gorilla locally .", "- < span style= '' color : hr '' > [ coming soon !", "] < /span > ` train ` folder contains training code associated gorilla finetuning .", "dataset collection , 1640 api documentation ` data/api ` .", "also include ` apibench ` dataset created self-instruct ` data/apibench ` .", "evaluation , convert llm-friendly chat format , question ` eval/eval-data/questions ` , corresponding response ` eval/eval-data/responses ` .", "also included evaluation script ` eval/eval-scripts ` .", "would entirely sufficient train gorilla , reproduce result .", "please see [ evaluation ] ( http : //github.com/shishirpatil/gorilla/tree/main/eval ) detail use evaluation pipeline .", "additionally , released model weight .", "` gorilla-7b-hf-v0 ` let invoke 925 hugging face apis .", "similarly , ` gorilla-7b-tf-v0 ` ` gorilla-7b-th-v0 ` 626 ( exhaustive ) tensorflow v2 , 94 ( exhaustive ) torch hub apis .", "` gorilla-mpt-7b-hf-v0 ` ` gorilla-falcon-7b-hf-v0 ` apache 2.0 licensed model ( commercially usable ) fine-tuned mpt-7b falcon-7b respectively .", "release model three combined generic chat capability community contributed apis soon scale serving infrastructure .", "run gorilla locally instruction ` inference/ ` sub-directory , also provide hosted gorilla chat completion api ( see colab ) !", "suggestion , run issue please feel free reach u either discord email raise github issue .", "`` ` gorilla ├── data │ ├── api ( tf/hf/th apis used generating apibench ) │ │ ├── { api_name } _api.jsonl │ ├── apibench ( evaluating llm model ) v-1.0 │ │ ├── { api_name } _train.jsonl , { api_name } _eval.jsonl | |── apizoo ( contributed community - evolving ) │ | ├── username1.json │ │ ├── username2.json │ │ ├── ... ├── eval │ ├── readme.md │ ├── get_llm_responses.py │ ├── eval-scripts │ │ ├── ast_eval_ { api_name } .py │ ├── eval-data │ │ ├── question │ │ │ ├── api name │ │ │ │ ├── questions_ { api_name } _ { eval_metric } .jsonl │ │ ├── response │ │ │ ├── api name │ │ │ │ ├── responses_ { api_name } _gorilla_ft_ { eval_metric } .jsonl │ │ │ │ ├── responses_ { api_name } _gorilla_rt_ { eval_metric } .jsonl ├── inference │ ├── readme.md │ ├── serve │ │ ├── gorilla_cli.py │ │ ├── conv_template.py ├── train ( coming soon ! )", "`` `" ] ]
Repository Organization Our repository organization is shown below. - The `data` folder contains all the evaluation APIs `(APIBench)` and the community contributed APIs. - The `eval` folder contains all our evaluation code as well as the Gorilla outputs. - The `inference` folder contains all the inference code for running Gorilla locally. - <span style="color:hr">[Coming Soon!]</span> The `train` folder contains all the training code associated with Gorilla finetuning. For our dataset collections, all the 1640 API documentation is in `data/api`. We also include the `APIBench` dataset created by self-instruct in `data/apibench`. For evaluation, we convert this into a LLM-friendly chat format, and the questions are in `eval/eval-data/questions`, and the corresponding responses are in `eval/eval-data/responses`. We have also included the evaluation scripts are in `eval/eval-scripts`. This would be entirely sufficient to train Gorilla yourself, and reproduce our results. Please see [evaluation](https://github.com/ShishirPatil/gorilla/tree/main/eval) for the details on how to use our evaluation pipeline. Additionally, we have released all the model weights. `gorilla-7b-hf-v0` lets you invoke over 925 Hugging Face APIs. Similarly, `gorilla-7b-tf-v0` and `gorilla-7b-th-v0` have 626 (exhaustive) Tensorflow v2, and 94 (exhaustive) Torch Hub APIs. `gorilla-mpt-7b-hf-v0` and `gorilla-falcon-7b-hf-v0` are Apache 2.0 licensed models (commercially usable) fine-tuned on MPT-7B and Falcon-7B respectively. We will release a model with all three combined with generic chat capability and community contributed APIs as soon as we can scale our serving infrastructure. You can run Gorilla locally from instructions in the `inference/` sub-directory, or we also provide a hosted Gorilla chat completion API (see Colab)! If you have any suggestions, or if you run into any issues please feel free to reach out to us either through Discord or email or raise a Github issue. ``` gorilla ├── data │ ├── api (TF/HF/TH APIs used in generating apibench) │ │ ├── {api_name}_api.jsonl │ ├── apibench (Evaluating LLM models) v-1.0 │ │ ├── {api_name}_train.jsonl, {api_name}_eval.jsonl | |── apizoo (Contributed by the community - evolving) │ | ├── username1.json │ │ ├── username2.json │ │ ├── ... ├── eval │ ├── README.md │ ├── get_llm_responses.py │ ├── eval-scripts │ │ ├── ast_eval_{api_name}.py │ ├── eval-data │ │ ├── questions │ │ │ ├── API name │ │ │ │ ├── questions_{api_name}_{eval_metric}.jsonl │ │ ├── responses │ │ │ ├── API name │ │ │ │ ├── responses_{api_name}_Gorilla_FT_{eval_metric}.jsonl │ │ │ │ ├── responses_{api_name}_Gorilla_RT_{eval_metric}.jsonl ├── inference │ ├── README.md │ ├── serve │ │ ├── gorilla_cli.py │ │ ├── conv_template.py ├── train (Coming Soon!) ```
https://github.com/ShishirPatil/gorilla
-1
[ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ]
https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md
[ [ "contributing", "api", "aim", "build", "open-source", ",", "one-stop-shop", "apis", ",", "llm", "interact", "!", "suggestion", "contribution", "welcome", "!", "please", "see", "detail", "[", "contribute", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/data/readme.md", ")", ".", "always", "remain", "open", "source", "." ], [ "contributing api aim build open-source , one-stop-shop apis , llm interact !", "suggestion contribution welcome !", "please see detail [ contribute ] ( http : //github.com/shishirpatil/gorilla/tree/main/data/readme.md ) .", "always remain open source ." ] ]
[ [ "contributing", "api", "aim", "build", "open-source", ",", "one-stop-shop", "apis", ",", "llm", "interact", "!", "suggestion", "contribution", "welcome", "!", "please", "see", "detail", "[", "contribute", "]", "(", "http", ":", "//github.com/shishirpatil/gorilla/tree/main/data/readme.md", ")", ".", "always", "remain", "open", "source", "." ], [ "contributing api aim build open-source , one-stop-shop apis , llm interact !", "suggestion contribution welcome !", "please see detail [ contribute ] ( http : //github.com/shishirpatil/gorilla/tree/main/data/readme.md ) .", "always remain open source ." ] ]
Contributing Your API We aim to build an open-source, one-stop-shop for all APIs, LLMs can interact with! Any suggestions and contributions are welcome! Please see the details on [how to contribute](https://github.com/ShishirPatil/gorilla/tree/main/data/README.md). THIS WILL ALWAYS REMAIN OPEN SOURCE.
https://github.com/ShishirPatil/gorilla
-1
[ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ]
https://raw.githubusercontent.com/ShishirPatil/gorilla/main/README.md
[ [ "faq", "(", ")", "1", ".", "would", "like", "use", "gorilla", "commercially", ".", "going", "apache", "2.0", "licensed", "version", "?", "yes", "!", "model", "use", "commercially", "without", "obligation", ".", "2", ".", "use", "gorilla", "langchain", ",", "toolformer", ",", "autogpt", "etc", "?", "absolutely", "!", "'ve", "highlighted", "great", "aspect", "tool", ".", "gorilla", "end-to-end", "model", ",", "specifically", "tailored", "serve", "correct", "api", "call", "without", "requiring", "additional", "coding", ".", "'s", "designed", "work", "part", "wider", "ecosystem", "flexibly", "integrated", "tool", ".", "langchain", ",", "versatile", "developer", "tool", ".", "``", "agent", "''", "efficiently", "swap", "llm", ",", "gorilla", "included", ",", "making", "highly", "adaptable", "solution", "various", "need", ".", "autogpt", ",", "hand", ",", "concentrate", "art", "prompting", "gpt", "series", "model", ".", "'s", "worth", "noting", "gorilla", ",", "fully", "fine-tuned", "model", ",", "consistently", "show", "remarkable", "accuracy", ",", "lower", "hallucination", ",", "outperforming", "gpt-4", "making", "specific", "api", "call", ".", ",", "come", "toolformer", ",", "toolformer", "zero", "select", "set", "tool", ",", "providing", "specialized", "functionality", ".", "gorilla", ",", "contrast", ",", "capacity", "manage", "thousand", "api", "call", ",", "offering", "broader", "coverage", "extensive", "range", "tool", ".", "beauty", "tool", "truly", "shine", "collaborate", ",", "complementing", "'s", "strength", "capability", "create", "even", "powerful", "comprehensive", "solution", ".", "contribution", "make", "difference", ".", "enthusiastically", "welcome", "input", "refine", "enhance", "tool", ".", "3", ".", "train", "gorilla", "model", "?", "release", "training", "code", "soon", "get", "gpus", "test", "finalize", "pipeline", ".", "given", "demand", "hosted", "end-points", ",", "dedicated", "gpus", "serve", "model", ".", "would", "like", "help", "resource", "get", "touch", "!" ], [ "faq ( ) 1 .", "would like use gorilla commercially .", "going apache 2.0 licensed version ?", "yes !", "model use commercially without obligation .", "2 .", "use gorilla langchain , toolformer , autogpt etc ?", "absolutely !", "'ve highlighted great aspect tool .", "gorilla end-to-end model , specifically tailored serve correct api call without requiring additional coding .", "'s designed work part wider ecosystem flexibly integrated tool .", "langchain , versatile developer tool .", "`` agent '' efficiently swap llm , gorilla included , making highly adaptable solution various need .", "autogpt , hand , concentrate art prompting gpt series model .", "'s worth noting gorilla , fully fine-tuned model , consistently show remarkable accuracy , lower hallucination , outperforming gpt-4 making specific api call .", ", come toolformer , toolformer zero select set tool , providing specialized functionality .", "gorilla , contrast , capacity manage thousand api call , offering broader coverage extensive range tool .", "beauty tool truly shine collaborate , complementing 's strength capability create even powerful comprehensive solution .", "contribution make difference .", "enthusiastically welcome input refine enhance tool .", "3 .", "train gorilla model ?", "release training code soon get gpus test finalize pipeline .", "given demand hosted end-points , dedicated gpus serve model .", "would like help resource get touch !" ] ]
[ [ "faq", "(", ")", "1", ".", "would", "like", "use", "gorilla", "commercially", ".", "going", "apache", "2.0", "licensed", "version", "?", "yes", "!", "model", "use", "commercially", "without", "obligation", ".", "2", ".", "use", "gorilla", "langchain", ",", "toolformer", ",", "autogpt", "etc", "?", "absolutely", "!", "'ve", "highlighted", "great", "aspect", "tool", ".", "gorilla", "end-to-end", "model", ",", "specifically", "tailored", "serve", "correct", "api", "call", "without", "requiring", "additional", "coding", ".", "'s", "designed", "work", "part", "wider", "ecosystem", "flexibly", "integrated", "tool", ".", "langchain", ",", "versatile", "developer", "tool", ".", "``", "agent", "''", "efficiently", "swap", "llm", ",", "gorilla", "included", ",", "making", "highly", "adaptable", "solution", "various", "need", ".", "autogpt", ",", "hand", ",", "concentrate", "art", "prompting", "gpt", "series", "model", ".", "'s", "worth", "noting", "gorilla", ",", "fully", "fine-tuned", "model", ",", "consistently", "show", "remarkable", "accuracy", ",", "lower", "hallucination", ",", "outperforming", "gpt-4", "making", "specific", "api", "call", ".", ",", "come", "toolformer", ",", "toolformer", "zero", "select", "set", "tool", ",", "providing", "specialized", "functionality", ".", "gorilla", ",", "contrast", ",", "capacity", "manage", "thousand", "api", "call", ",", "offering", "broader", "coverage", "extensive", "range", "tool", ".", "beauty", "tool", "truly", "shine", "collaborate", ",", "complementing", "'s", "strength", "capability", "create", "even", "powerful", "comprehensive", "solution", ".", "contribution", "make", "difference", ".", "enthusiastically", "welcome", "input", "refine", "enhance", "tool", ".", "3", ".", "train", "gorilla", "model", "?", "release", "training", "code", "soon", "get", "gpus", "test", "finalize", "pipeline", ".", "given", "demand", "hosted", "end-points", ",", "dedicated", "gpus", "serve", "model", ".", "would", "like", "help", "resource", "get", "touch", "!" ], [ "faq ( ) 1 .", "would like use gorilla commercially .", "going apache 2.0 licensed version ?", "yes !", "model use commercially without obligation .", "2 .", "use gorilla langchain , toolformer , autogpt etc ?", "absolutely !", "'ve highlighted great aspect tool .", "gorilla end-to-end model , specifically tailored serve correct api call without requiring additional coding .", "'s designed work part wider ecosystem flexibly integrated tool .", "langchain , versatile developer tool .", "`` agent '' efficiently swap llm , gorilla included , making highly adaptable solution various need .", "autogpt , hand , concentrate art prompting gpt series model .", "'s worth noting gorilla , fully fine-tuned model , consistently show remarkable accuracy , lower hallucination , outperforming gpt-4 making specific api call .", ", come toolformer , toolformer zero select set tool , providing specialized functionality .", "gorilla , contrast , capacity manage thousand api call , offering broader coverage extensive range tool .", "beauty tool truly shine collaborate , complementing 's strength capability create even powerful comprehensive solution .", "contribution make difference .", "enthusiastically welcome input refine enhance tool .", "3 .", "train gorilla model ?", "release training code soon get gpus test finalize pipeline .", "given demand hosted end-points , dedicated gpus serve model .", "would like help resource get touch !" ] ]
FAQ(s) 1. I would like to use Gorilla commercially. Is there going to be a Apache 2.0 licensed version? Yes! We now have models that you can use commercially without any obligations. 2. Can we use Gorilla with Langchain, Toolformer, AutoGPT etc? Absolutely! You've highlighted a great aspect of our tools. Gorilla is an end-to-end model, specifically tailored to serve correct API calls without requiring any additional coding. It's designed to work as part of a wider ecosystem and can be flexibly integrated with other tools. Langchain, is a versatile developer tool. Its "agents" can efficiently swap in any LLM, Gorilla included, making it a highly adaptable solution for various needs. AutoGPT, on the other hand, concentrates on the art of prompting GPT series models. It's worth noting that Gorilla, as a fully fine-tuned model, consistently shows remarkable accuracy, and lowers hallucination, outperforming GPT-4 in making specific API calls. Now, when it comes to ToolFormer, Toolformer zeroes in on a select set of tools, providing specialized functionalities. Gorilla, in contrast, has the capacity to manage thousands of API calls, offering a broader coverage over a more extensive range of tools. The beauty of these tools truly shines when they collaborate, complementing each other's strengths and capabilities to create an even more powerful and comprehensive solution. This is where your contribution can make a difference. We enthusiastically welcome any inputs to further refine and enhance these tools. 3. How to train your own Gorilla models? We will release the training code as soon as we can get GPUs to test and finalize the pipeline. Given the demand for our hosted end-points, we have dedicated all of our GPUs to serve the models. If you would like to help with resources get in touch!
https://github.com/ShishirPatil/gorilla
-1
[ "api", "api-documentation", "chatgpt", "claude-api", "gpt-4-api", "llm", "openai-api", "openai-functions" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "quickstart", ",", "provide", "simple", "example", "show", "use", "qwen-chat", "🤖", "modelscope", "🤗", "transformer", ".", "use", "pre-built", "docker", "image", "skip", "environment", "setup", "step", ",", "see", "section", "[", "``", "using", "pre-built", "docker", "image", "''", "]", "(", "#", "-docker", ")", "detail", ".", "using", "docker", ",", "please", "make", "sure", "setup", "environment", "installed", "required", "package", ".", "make", "sure", "meet", "requirement", ",", "install", "dependent", "library", ".", "``", "`", "bash", "pip", "install", "-r", "requirements.txt", "``", "`", "device", "support", "fp16", "bf16", ",", "recommend", "installing", "[", "flash-attention", "]", "(", "http", ":", "//github.com/dao-ailab/flash-attention", ")", "(", "*", "*", "support", "flash", "attention", "2", ".", "*", "*", ")", "higher", "efficiency", "lower", "memory", "usage", ".", "(", "*", "*", "flash-attention", "optional", "project", "run", "normally", "without", "installing", "*", "*", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/dao-ailab/flash-attention", "cd", "flash-attention", "&", "&", "pip", "install", "." ], [ "quickstart , provide simple example show use qwen-chat 🤖 modelscope 🤗 transformer .", "use pre-built docker image skip environment setup step , see section [ `` using pre-built docker image '' ] ( # -docker ) detail .", "using docker , please make sure setup environment installed required package .", "make sure meet requirement , install dependent library .", "`` ` bash pip install -r requirements.txt `` ` device support fp16 bf16 , recommend installing [ flash-attention ] ( http : //github.com/dao-ailab/flash-attention ) ( * * support flash attention 2 .", "* * ) higher efficiency lower memory usage .", "( * * flash-attention optional project run normally without installing * * ) `` ` bash git clone http : //github.com/dao-ailab/flash-attention cd flash-attention & & pip install ." ] ]
[ [ "quickstart", ",", "provide", "simple", "example", "show", "use", "qwen-chat", "🤖", "modelscope", "🤗", "transformer", ".", "use", "pre-built", "docker", "image", "skip", "environment", "setup", "step", ",", "see", "section", "[", "``", "using", "pre-built", "docker", "image", "''", "]", "(", "#", "-docker", ")", "detail", ".", "using", "docker", ",", "please", "make", "sure", "setup", "environment", "installed", "required", "package", ".", "make", "sure", "meet", "requirement", ",", "install", "dependent", "library", ".", "``", "`", "bash", "pip", "install", "-r", "requirements.txt", "``", "`", "device", "support", "fp16", "bf16", ",", "recommend", "installing", "[", "flash-attention", "]", "(", "http", ":", "//github.com/dao-ailab/flash-attention", ")", "(", "*", "*", "support", "flash", "attention", "2", ".", "*", "*", ")", "higher", "efficiency", "lower", "memory", "usage", ".", "(", "*", "*", "flash-attention", "optional", "project", "run", "normally", "without", "installing", "*", "*", ")", "``", "`", "bash", "git", "clone", "http", ":", "//github.com/dao-ailab/flash-attention", "cd", "flash-attention", "&", "&", "pip", "install", "." ], [ "quickstart , provide simple example show use qwen-chat 🤖 modelscope 🤗 transformer .", "use pre-built docker image skip environment setup step , see section [ `` using pre-built docker image '' ] ( # -docker ) detail .", "using docker , please make sure setup environment installed required package .", "make sure meet requirement , install dependent library .", "`` ` bash pip install -r requirements.txt `` ` device support fp16 bf16 , recommend installing [ flash-attention ] ( http : //github.com/dao-ailab/flash-attention ) ( * * support flash attention 2 .", "* * ) higher efficiency lower memory usage .", "( * * flash-attention optional project run normally without installing * * ) `` ` bash git clone http : //github.com/dao-ailab/flash-attention cd flash-attention & & pip install ." ] ]
Quickstart Below, we provide simple examples to show how to use Qwen-Chat with 🤖 ModelScope and 🤗 Transformers. You can use our pre-built docker images to skip most of the environment setup steps, see Section ["Using Pre-built Docker Images"](#-docker) for more details. If not using docker, please make sure you have setup the environment and installed the required packages. Make sure you meet the above requirements, and then install the dependent libraries. ```bash pip install -r requirements.txt ``` If your device supports fp16 or bf16, we recommend installing [flash-attention](https://github.com/Dao-AILab/flash-attention) (**we support flash attention 2 now.**) for higher efficiency and lower memory usage. (**flash-attention is optional and the project can run normally without installing it**) ```bash git clone https://github.com/Dao-AILab/flash-attention cd flash-attention && pip install .
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "optional", ".", "installing", "might", "slow", "." ], [ "optional .", "installing might slow ." ] ]
[ [ "optional", ".", "installing", "might", "slow", "." ], [ "optional .", "installing might slow ." ] ]
Below are optional. Installing them might be slow.
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "pip", "install", "csrc/layer_norm" ], [ "pip install csrc/layer_norm" ] ]
[ [ "pip", "install", "csrc/layer_norm" ], [ "pip install csrc/layer_norm" ] ]
pip install csrc/layer_norm
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "pip", "install", "csrc/rotary", "``", "`", "start", "modelscope", "transformer", "." ], [ "pip install csrc/rotary `` ` start modelscope transformer ." ] ]
[ [ "pip", "install", "csrc/rotary", "``", "`", "start", "modelscope", "transformer", "." ], [ "pip install csrc/rotary `` ` start modelscope transformer ." ] ]
pip install csrc/rotary ``` Now you can start with ModelScope or Transformers.
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "dashscope", "simple", "way", "use", "qwen", "apis", "dashscope", "api", "service", "alibaba", "cloud", ".", "give", "introduction", "usage", ".", "additionally", ",", "provide", "script", "deploy", "openai-style", "api", "server", ".", "dashscope", "large", "language", "model", "api", "service", "provided", "alibaba", "cloud", ",", "support", "qwen", ".", "note", "model", "behind", "dashscope", "in-house", "version", "temporarily", "without", "detail", "provided", ".", "service", "include", "`", "qwen-turbo", "`", "`", "qwen-plus", "`", ",", "former", "one", "run", "faster", "latter", "achieves", "better", "performance", ".", "information", ",", "visit", "documentation", "[", "]", "(", "http", ":", "//dashscope.aliyun.com", ")", ".", "please", "head", "official", "website", "[", "link", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key", "?", "spm=a2c4g.11186623.0.0.6c2774fahtfxdn", ")", "create", "dashscope", "account", "obtain", "api", "key", "(", "ak", ")", ".", "recommend", "setting", "ak", "environment", "variable", ":", "``", "`", "bash", "export", "dashscope_api_key=", "''", "your_dashscope_api_key", "''", "``", "`", "please", "install", "package", "click", "[", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk", ")", "documentation", ".", "use", "python", ",", "install", "dashscope", "pip", ":", "``", "`", "bash", "pip", "install", "dashscope", "``", "`", "use", "java", "sdk", ",", "install", "way", ":", "``", "`", "xml", "<", "!", "--", "http", ":", "//mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java", "--", ">", "<", "dependency", ">", "<", "groupid", ">", "com.alibaba", "<", "/groupid", ">", "<", "artifactid", ">", "dashscope-sdk-java", "<", "/artifactid", ">", "<", "version", ">", "the-latest-version", "<", "/version", ">", "<", "/dependency", ">", "``", "`", "simplest", "way", "use", "dashscope", "usage", "message", ",", "similar", "openai", "api", ".", "example", "demonstrated", ":", "``", "`", "python", "import", "random", "http", "import", "httpstatus", "dashscope", "import", "generation", "def", "call_with_messages", "(", ")", ":", "message", "=", "[", "{", "'role", "'", ":", "'system", "'", ",", "'content", "'", ":", "'you", "helpful", "assistant", ".", "'", "}", ",", "{", "'role", "'", ":", "'user", "'", ",", "'content", "'", ":", "'如何做西红柿鸡蛋?", "'", "}", "]", "gen", "=", "generation", "(", ")", "response", "=", "gen.call", "(", "generation.models.qwen_turbo", ",", "messages=messages", ",", "seed=random.randint", "(", "1", ",", "10000", ")", "," ], [ "dashscope simple way use qwen apis dashscope api service alibaba cloud .", "give introduction usage .", "additionally , provide script deploy openai-style api server .", "dashscope large language model api service provided alibaba cloud , support qwen .", "note model behind dashscope in-house version temporarily without detail provided .", "service include ` qwen-turbo ` ` qwen-plus ` , former one run faster latter achieves better performance .", "information , visit documentation [ ] ( http : //dashscope.aliyun.com ) .", "please head official website [ link ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key ? spm=a2c4g.11186623.0.0.6c2774fahtfxdn ) create dashscope account obtain api key ( ak ) .", "recommend setting ak environment variable : `` ` bash export dashscope_api_key= '' your_dashscope_api_key '' `` ` please install package click [ ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk ) documentation .", "use python , install dashscope pip : `` ` bash pip install dashscope `` ` use java sdk , install way : `` ` xml < ! -- http : //mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java -- > < dependency > < groupid > com.alibaba < /groupid > < artifactid > dashscope-sdk-java < /artifactid > < version > the-latest-version < /version > < /dependency > `` ` simplest way use dashscope usage message , similar openai api .", "example demonstrated : `` ` python import random http import httpstatus dashscope import generation def call_with_messages ( ) : message = [ { 'role ' : 'system ' , 'content ' : 'you helpful assistant .", "' } , { 'role ' : 'user ' , 'content ' : '如何做西红柿鸡蛋? ' } ] gen = generation ( ) response = gen.call ( generation.models.qwen_turbo , messages=messages , seed=random.randint ( 1 , 10000 ) ," ] ]
[ [ "dashscope", "simple", "way", "use", "qwen", "apis", "dashscope", "api", "service", "alibaba", "cloud", ".", "give", "introduction", "usage", ".", "additionally", ",", "provide", "script", "deploy", "openai-style", "api", "server", ".", "dashscope", "large", "language", "model", "api", "service", "provided", "alibaba", "cloud", ",", "support", "qwen", ".", "note", "model", "behind", "dashscope", "in-house", "version", "temporarily", "without", "detail", "provided", ".", "service", "include", "`", "qwen-turbo", "`", "`", "qwen-plus", "`", ",", "former", "one", "run", "faster", "latter", "achieves", "better", "performance", ".", "information", ",", "visit", "documentation", "[", "]", "(", "http", ":", "//dashscope.aliyun.com", ")", ".", "please", "head", "official", "website", "[", "link", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key", "?", "spm=a2c4g.11186623.0.0.6c2774fahtfxdn", ")", "create", "dashscope", "account", "obtain", "api", "key", "(", "ak", ")", ".", "recommend", "setting", "ak", "environment", "variable", ":", "``", "`", "bash", "export", "dashscope_api_key=", "''", "your_dashscope_api_key", "''", "``", "`", "please", "install", "package", "click", "[", "]", "(", "http", ":", "//help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk", ")", "documentation", ".", "use", "python", ",", "install", "dashscope", "pip", ":", "``", "`", "bash", "pip", "install", "dashscope", "``", "`", "use", "java", "sdk", ",", "install", "way", ":", "``", "`", "xml", "<", "!", "--", "http", ":", "//mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java", "--", ">", "<", "dependency", ">", "<", "groupid", ">", "com.alibaba", "<", "/groupid", ">", "<", "artifactid", ">", "dashscope-sdk-java", "<", "/artifactid", ">", "<", "version", ">", "the-latest-version", "<", "/version", ">", "<", "/dependency", ">", "``", "`", "simplest", "way", "use", "dashscope", "usage", "message", ",", "similar", "openai", "api", ".", "example", "demonstrated", ":", "``", "`", "python", "import", "random", "http", "import", "httpstatus", "dashscope", "import", "generation", "def", "call_with_messages", "(", ")", ":", "message", "=", "[", "{", "'role", "'", ":", "'system", "'", ",", "'content", "'", ":", "'you", "helpful", "assistant", ".", "'", "}", ",", "{", "'role", "'", ":", "'user", "'", ",", "'content", "'", ":", "'如何做西红柿鸡蛋?", "'", "}", "]", "gen", "=", "generation", "(", ")", "response", "=", "gen.call", "(", "generation.models.qwen_turbo", ",", "messages=messages", ",", "seed=random.randint", "(", "1", ",", "10000", ")", "," ], [ "dashscope simple way use qwen apis dashscope api service alibaba cloud .", "give introduction usage .", "additionally , provide script deploy openai-style api server .", "dashscope large language model api service provided alibaba cloud , support qwen .", "note model behind dashscope in-house version temporarily without detail provided .", "service include ` qwen-turbo ` ` qwen-plus ` , former one run faster latter achieves better performance .", "information , visit documentation [ ] ( http : //dashscope.aliyun.com ) .", "please head official website [ link ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key ? spm=a2c4g.11186623.0.0.6c2774fahtfxdn ) create dashscope account obtain api key ( ak ) .", "recommend setting ak environment variable : `` ` bash export dashscope_api_key= '' your_dashscope_api_key '' `` ` please install package click [ ] ( http : //help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk ) documentation .", "use python , install dashscope pip : `` ` bash pip install dashscope `` ` use java sdk , install way : `` ` xml < ! -- http : //mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java -- > < dependency > < groupid > com.alibaba < /groupid > < artifactid > dashscope-sdk-java < /artifactid > < version > the-latest-version < /version > < /dependency > `` ` simplest way use dashscope usage message , similar openai api .", "example demonstrated : `` ` python import random http import httpstatus dashscope import generation def call_with_messages ( ) : message = [ { 'role ' : 'system ' , 'content ' : 'you helpful assistant .", "' } , { 'role ' : 'user ' , 'content ' : '如何做西红柿鸡蛋? ' } ] gen = generation ( ) response = gen.call ( generation.models.qwen_turbo , messages=messages , seed=random.randint ( 1 , 10000 ) ," ] ]
DashScope The most simple way to use Qwen through APIs is DashScope API service through Alibaba Cloud. We give an introduction to the usage. Additionally, we provide a script for you to deploy an OpenAI-style API on your own servers. DashScope is the large language model API service provided by Alibaba Cloud, which now supports Qwen. Note that the models behind DashScope are in-house versions temporarily without details provided. The services include `qwen-turbo` and `qwen-plus`, where the former one runs faster and the latter achieves better performance. For more information, visit the documentation [here](https://dashscope.aliyun.com). Please head to the official website [link](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key?spm=a2c4g.11186623.0.0.6c2774fahtfXdn) to create a DashScope account and obtain the API key (AK). We recommend setting the AK with an environment variable: ```bash export DASHSCOPE_API_KEY="YOUR_DASHSCOPE_API_KEY" ``` Then please install the packages and click [here](https://help.aliyun.com/zh/dashscope/developer-reference/install-dashscope-sdk) for the documentation. If you use Python, you can install DashScope with pip: ```bash pip install dashscope ``` If you use JAVA SDK, you can install it in this way: ```xml <!-- https://mvnrepository.com/artifact/com.alibaba/dashscope-sdk-java --> <dependency> <groupId>com.alibaba</groupId> <artifactId>dashscope-sdk-java</artifactId> <version>the-latest-version</version> </dependency> ``` The simplest way to use DashScope is the usage with messages, which is similar to OpenAI API. The example is demonstrated below: ```python import random from http import HTTPStatus from dashscope import Generation def call_with_messages(): messages = [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': '如何做西红柿鸡蛋?'}] gen = Generation() response = gen.call( Generation.Models.qwen_turbo, messages=messages, seed=random.randint(1, 10000),
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "gptq", "provide", "solution", "based", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", ",", "release", "int4", "int8", "quantized", "model", ",", "achieve", "nearly", "lossless", "model", "effect", "improved", "performance", "memory", "cost", "inference", "speed", ".", "demonstrate", "use", "provided", "quantized", "model", "inference", ".", "start", ",", "make", "sure", "meet", "requirement", "auto-gptq", "(", "e.g.", ",", "torch", "2.0", ",", "transformer", "4.32.0", ",", "etc", ".", ")", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "meet", "problem", "installing", "`", "auto-gptq", "`", ",", "advise", "check", "official", "[", "repo", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "find", "wheel", ".", ">", "note", ":", "pre-compiled", "`", "auto-gptq", "`", "package", "strongly", "depend", "version", "`", "torch", "`", "cuda", "version", ".", "moreover", ",", "due", "recent", "update", ",", ">", "may", "also", "encounter", "unsupported", "version", "error", "`", "transformer", "`", ",", "`", "optimum", "`", ",", "`", "peft", "`", ".", ">", "recommend", "using", "latest", "version", "meeting", "following", "requirement", ":", ">", "-", "torch==2.1", "auto-gptq", ">", "=0.5.1", "transformer", ">", "=4.35.0", "optimum", ">", "=1.14.0", "peft", ">", "=0.6.1", ">", "-", "torch", ">", "=2.0", ",", "<", "2.1", "auto-gptq", "<", "0.5.0", "transformer", "<", "4.35.0", "optimum", "<", "1.14.0", "peft", ">", "=0.5.0", ",", "<", "0.6.0", "load", "quantized", "model", "easily", "run", "inference", "usual", ":", "``", "`", "python" ], [ "gptq provide solution based [ autogptq ] ( http : //github.com/panqiwei/autogptq ) , release int4 int8 quantized model , achieve nearly lossless model effect improved performance memory cost inference speed .", "demonstrate use provided quantized model inference .", "start , make sure meet requirement auto-gptq ( e.g. , torch 2.0 , transformer 4.32.0 , etc . )", "install required package : `` ` bash pip install auto-gptq optimum `` ` meet problem installing ` auto-gptq ` , advise check official [ repo ] ( http : //github.com/panqiwei/autogptq ) find wheel .", "> note : pre-compiled ` auto-gptq ` package strongly depend version ` torch ` cuda version .", "moreover , due recent update , > may also encounter unsupported version error ` transformer ` , ` optimum ` , ` peft ` .", "> recommend using latest version meeting following requirement : > - torch==2.1 auto-gptq > =0.5.1 transformer > =4.35.0 optimum > =1.14.0 peft > =0.6.1 > - torch > =2.0 , < 2.1 auto-gptq < 0.5.0 transformer < 4.35.0 optimum < 1.14.0 peft > =0.5.0 , < 0.6.0 load quantized model easily run inference usual : `` ` python" ] ]
[ [ "gptq", "provide", "solution", "based", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", ",", "release", "int4", "int8", "quantized", "model", ",", "achieve", "nearly", "lossless", "model", "effect", "improved", "performance", "memory", "cost", "inference", "speed", ".", "demonstrate", "use", "provided", "quantized", "model", "inference", ".", "start", ",", "make", "sure", "meet", "requirement", "auto-gptq", "(", "e.g.", ",", "torch", "2.0", ",", "transformer", "4.32.0", ",", "etc", ".", ")", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "meet", "problem", "installing", "`", "auto-gptq", "`", ",", "advise", "check", "official", "[", "repo", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "find", "wheel", ".", ">", "note", ":", "pre-compiled", "`", "auto-gptq", "`", "package", "strongly", "depend", "version", "`", "torch", "`", "cuda", "version", ".", "moreover", ",", "due", "recent", "update", ",", ">", "may", "also", "encounter", "unsupported", "version", "error", "`", "transformer", "`", ",", "`", "optimum", "`", ",", "`", "peft", "`", ".", ">", "recommend", "using", "latest", "version", "meeting", "following", "requirement", ":", ">", "-", "torch==2.1", "auto-gptq", ">", "=0.5.1", "transformer", ">", "=4.35.0", "optimum", ">", "=1.14.0", "peft", ">", "=0.6.1", ">", "-", "torch", ">", "=2.0", ",", "<", "2.1", "auto-gptq", "<", "0.5.0", "transformer", "<", "4.35.0", "optimum", "<", "1.14.0", "peft", ">", "=0.5.0", ",", "<", "0.6.0", "load", "quantized", "model", "easily", "run", "inference", "usual", ":", "``", "`", "python" ], [ "gptq provide solution based [ autogptq ] ( http : //github.com/panqiwei/autogptq ) , release int4 int8 quantized model , achieve nearly lossless model effect improved performance memory cost inference speed .", "demonstrate use provided quantized model inference .", "start , make sure meet requirement auto-gptq ( e.g. , torch 2.0 , transformer 4.32.0 , etc . )", "install required package : `` ` bash pip install auto-gptq optimum `` ` meet problem installing ` auto-gptq ` , advise check official [ repo ] ( http : //github.com/panqiwei/autogptq ) find wheel .", "> note : pre-compiled ` auto-gptq ` package strongly depend version ` torch ` cuda version .", "moreover , due recent update , > may also encounter unsupported version error ` transformer ` , ` optimum ` , ` peft ` .", "> recommend using latest version meeting following requirement : > - torch==2.1 auto-gptq > =0.5.1 transformer > =4.35.0 optimum > =1.14.0 peft > =0.6.1 > - torch > =2.0 , < 2.1 auto-gptq < 0.5.0 transformer < 4.35.0 optimum < 1.14.0 peft > =0.5.0 , < 0.6.0 load quantized model easily run inference usual : `` ` python" ] ]
GPTQ We provide a solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release the Int4 and Int8 quantized models, which achieve nearly lossless model effects but improved performance on both memory costs and inference speed. Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages: ```bash pip install auto-gptq optimum ``` If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a wheel. > Note: The pre-compiled `auto-gptq` packages strongly depend on the version of `torch` and its CUDA version. Moreover, due to recent update, > you may also encounter unsupported version errors from `transformers`, `optimum`, or `peft`. > We recommend using the latest versions meeting the following requirements: > - torch==2.1 auto-gptq>=0.5.1 transformers>=4.35.0 optimum>=1.14.0 peft>=0.6.1 > - torch>=2.0,<2.1 auto-gptq<0.5.0 transformers<4.35.0 optimum<1.14.0 peft>=0.5.0,<0.6.0 Then you can load the quantized model easily and run inference as same as usual: ```python
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "usage", "provide", "official", "training", "script", ",", "`", "finetune.py", "`", ",", "user", "finetune", "pretrained", "model", "downstream", "application", "simple", "fashion", ".", "additionally", ",", "provide", "shell", "script", "launch", "finetuning", "worry", ".", "script", "support", "training", "[", "deepspeed", "]", "(", "http", ":", "//github.com/microsoft/deepspeed", ")", "[", "fsdp", "]", "(", "http", ":", "//engineering.fb.com/2021/07/15/open-source/fsdp/", ")", ".", "shell", "script", "provide", "use", "deepspeed", "(", "note", ":", "may", "conflict", "latest", "version", "pydantic", "use", "make", "sure", "`", "pydantic", "<", "2.0", "`", ")", "peft", ".", "install", ":", "``", "`", "bash", "pip", "install", "peft", "deepspeed", "``", "`", "prepare", "training", "data", ",", "need", "put", "sample", "list", "save", "json", "file", ".", "sample", "dictionary", "consisting", "id", "list", "conversation", ".", "simple", "example", "list", "1", "sample", ":", "``", "`", "json", "[", "{", "``", "id", "''", ":", "``", "identity_0", "''", ",", "``", "conversation", "''", ":", "[", "{", "``", "''", ":", "``", "user", "''", ",", "``", "value", "''", ":", "``", "你好", "''", "}", ",", "{", "``", "''", ":", "``", "assistant", "''", ",", "``", "value", "''", ":", "``", "我是一个语言模型,我叫通义千问。", "''", "}", "]", "}", "]", "``", "`", "data", "preparation", ",", "use", "provided", "shell", "script", "run", "finetuning", ".", "remember", "specify", "path", "data", "file", ",", "`", "$", "data", "`", ".", "finetuning", "script", "allow", "perform", ":", "-", "full-parameter", "finetuning", "-", "lora", "-", "q-lora", "full-parameter", "finetuning", "requires", "updating", "parameter", "whole", "training", "process", ".", "launch", "training", ",", "run", "following", "script", ":", "``", "`", "bash" ], [ "usage provide official training script , ` finetune.py ` , user finetune pretrained model downstream application simple fashion .", "additionally , provide shell script launch finetuning worry .", "script support training [ deepspeed ] ( http : //github.com/microsoft/deepspeed ) [ fsdp ] ( http : //engineering.fb.com/2021/07/15/open-source/fsdp/ ) .", "shell script provide use deepspeed ( note : may conflict latest version pydantic use make sure ` pydantic < 2.0 ` ) peft .", "install : `` ` bash pip install peft deepspeed `` ` prepare training data , need put sample list save json file .", "sample dictionary consisting id list conversation .", "simple example list 1 sample : `` ` json [ { `` id '' : `` identity_0 '' , `` conversation '' : [ { `` '' : `` user '' , `` value '' : `` 你好 '' } , { `` '' : `` assistant '' , `` value '' : `` 我是一个语言模型,我叫通义千问。 '' } ] } ] `` ` data preparation , use provided shell script run finetuning .", "remember specify path data file , ` $ data ` .", "finetuning script allow perform : - full-parameter finetuning - lora - q-lora full-parameter finetuning requires updating parameter whole training process .", "launch training , run following script : `` ` bash" ] ]
[ [ "usage", "provide", "official", "training", "script", ",", "`", "finetune.py", "`", ",", "user", "finetune", "pretrained", "model", "downstream", "application", "simple", "fashion", ".", "additionally", ",", "provide", "shell", "script", "launch", "finetuning", "worry", ".", "script", "support", "training", "[", "deepspeed", "]", "(", "http", ":", "//github.com/microsoft/deepspeed", ")", "[", "fsdp", "]", "(", "http", ":", "//engineering.fb.com/2021/07/15/open-source/fsdp/", ")", ".", "shell", "script", "provide", "use", "deepspeed", "(", "note", ":", "may", "conflict", "latest", "version", "pydantic", "use", "make", "sure", "`", "pydantic", "<", "2.0", "`", ")", "peft", ".", "install", ":", "``", "`", "bash", "pip", "install", "peft", "deepspeed", "``", "`", "prepare", "training", "data", ",", "need", "put", "sample", "list", "save", "json", "file", ".", "sample", "dictionary", "consisting", "id", "list", "conversation", ".", "simple", "example", "list", "1", "sample", ":", "``", "`", "json", "[", "{", "``", "id", "''", ":", "``", "identity_0", "''", ",", "``", "conversation", "''", ":", "[", "{", "``", "''", ":", "``", "user", "''", ",", "``", "value", "''", ":", "``", "你好", "''", "}", ",", "{", "``", "''", ":", "``", "assistant", "''", ",", "``", "value", "''", ":", "``", "我是一个语言模型,我叫通义千问。", "''", "}", "]", "}", "]", "``", "`", "data", "preparation", ",", "use", "provided", "shell", "script", "run", "finetuning", ".", "remember", "specify", "path", "data", "file", ",", "`", "$", "data", "`", ".", "finetuning", "script", "allow", "perform", ":", "-", "full-parameter", "finetuning", "-", "lora", "-", "q-lora", "full-parameter", "finetuning", "requires", "updating", "parameter", "whole", "training", "process", ".", "launch", "training", ",", "run", "following", "script", ":", "``", "`", "bash" ], [ "usage provide official training script , ` finetune.py ` , user finetune pretrained model downstream application simple fashion .", "additionally , provide shell script launch finetuning worry .", "script support training [ deepspeed ] ( http : //github.com/microsoft/deepspeed ) [ fsdp ] ( http : //engineering.fb.com/2021/07/15/open-source/fsdp/ ) .", "shell script provide use deepspeed ( note : may conflict latest version pydantic use make sure ` pydantic < 2.0 ` ) peft .", "install : `` ` bash pip install peft deepspeed `` ` prepare training data , need put sample list save json file .", "sample dictionary consisting id list conversation .", "simple example list 1 sample : `` ` json [ { `` id '' : `` identity_0 '' , `` conversation '' : [ { `` '' : `` user '' , `` value '' : `` 你好 '' } , { `` '' : `` assistant '' , `` value '' : `` 我是一个语言模型,我叫通义千问。 '' } ] } ] `` ` data preparation , use provided shell script run finetuning .", "remember specify path data file , ` $ data ` .", "finetuning script allow perform : - full-parameter finetuning - lora - q-lora full-parameter finetuning requires updating parameter whole training process .", "launch training , run following script : `` ` bash" ] ]
Usage Now we provide the official training script, `finetune.py`, for users to finetune the pretrained model for downstream applications in a simple fashion. Additionally, we provide shell scripts to launch finetuning with no worries. This script supports the training with [DeepSpeed](https://github.com/microsoft/DeepSpeed) and [FSDP](https://engineering.fb.com/2021/07/15/open-source/fsdp/). The shell scripts that we provide use DeepSpeed (Note: this may have conflicts with the latest version of pydantic and you should use make sure `pydantic<2.0`) and Peft. You can install them by: ```bash pip install peft deepspeed ``` To prepare your training data, you need to put all the samples into a list and save it to a json file. Each sample is a dictionary consisting of an id and a list for conversation. Below is a simple example list with 1 sample: ```json [ { "id": "identity_0", "conversations": [ { "from": "user", "value": "你好" }, { "from": "assistant", "value": "我是一个语言模型,我叫通义千问。" } ] } ] ``` After data preparation, you can use the provided shell scripts to run finetuning. Remember to specify the path to the data file, `$DATA`. The finetuning scripts allow you to perform: - Full-parameter finetuning - LoRA - Q-LoRA Full-parameter finetuning requires updating all parameters in the whole training process. To launch your training, run the following script: ```bash
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "distributed", "training", ".", "provide", "single-gpu", "training", "script", "insufficient", "gpu", "memory", "break", "training", ".", "bash", "finetune/finetune_ds.sh", "``", "`", "remember", "specify", "correct", "model", "name", "path", ",", "data", "path", ",", "well", "output", "directory", "shell", "script", ".", "another", "thing", "notice", "use", "deepspeed", "zero", "3", "script", ".", "want", "make", "change", ",", "remove", "argument", "`", "--", "deepspeed", "`", "make", "change", "deepspeed", "configuration", "json", "file", "based", "requirement", ".", "additionally", ",", "script", "support", "mixed-precision", "training", ",", "thus", "use", "`", "--", "bf16", "true", "`", "`", "--", "fp16", "true", "`", ".", "remember", "use", "deepspeed", "use", "fp16", "due", "mixed", "precision", "training", ".", "empirically", "advise", "use", "bf16", "make", "training", "consistent", "pretraining", "alignment", "machine", "support", "bf16", ",", "thus", "use", "default", ".", "similarly", ",", "run", "lora", ",", "use", "another", "script", "run", "shown", ".", "start", ",", "make", "sure", "installed", "`", "peft", "`", ".", "also", ",", "need", "specify", "path", "model", ",", "data", ",", "output", ".", "advise", "use", "absolute", "path", "pretrained", "model", ".", "lora", "save", "adapter", "absolute", "path", "adapter", "configuration", "json", "file", "used", "finding", "pretrained", "model", "load", ".", "also", ",", "script", "support", "bf16", "fp16", ".", "``", "`", "bash" ], [ "distributed training .", "provide single-gpu training script insufficient gpu memory break training .", "bash finetune/finetune_ds.sh `` ` remember specify correct model name path , data path , well output directory shell script .", "another thing notice use deepspeed zero 3 script .", "want make change , remove argument ` -- deepspeed ` make change deepspeed configuration json file based requirement .", "additionally , script support mixed-precision training , thus use ` -- bf16 true ` ` -- fp16 true ` .", "remember use deepspeed use fp16 due mixed precision training .", "empirically advise use bf16 make training consistent pretraining alignment machine support bf16 , thus use default .", "similarly , run lora , use another script run shown .", "start , make sure installed ` peft ` .", "also , need specify path model , data , output .", "advise use absolute path pretrained model .", "lora save adapter absolute path adapter configuration json file used finding pretrained model load .", "also , script support bf16 fp16 .", "`` ` bash" ] ]
[ [ "distributed", "training", ".", "provide", "single-gpu", "training", "script", "insufficient", "gpu", "memory", "break", "training", ".", "bash", "finetune/finetune_ds.sh", "``", "`", "remember", "specify", "correct", "model", "name", "path", ",", "data", "path", ",", "well", "output", "directory", "shell", "script", ".", "another", "thing", "notice", "use", "deepspeed", "zero", "3", "script", ".", "want", "make", "change", ",", "remove", "argument", "`", "--", "deepspeed", "`", "make", "change", "deepspeed", "configuration", "json", "file", "based", "requirement", ".", "additionally", ",", "script", "support", "mixed-precision", "training", ",", "thus", "use", "`", "--", "bf16", "true", "`", "`", "--", "fp16", "true", "`", ".", "remember", "use", "deepspeed", "use", "fp16", "due", "mixed", "precision", "training", ".", "empirically", "advise", "use", "bf16", "make", "training", "consistent", "pretraining", "alignment", "machine", "support", "bf16", ",", "thus", "use", "default", ".", "similarly", ",", "run", "lora", ",", "use", "another", "script", "run", "shown", ".", "start", ",", "make", "sure", "installed", "`", "peft", "`", ".", "also", ",", "need", "specify", "path", "model", ",", "data", ",", "output", ".", "advise", "use", "absolute", "path", "pretrained", "model", ".", "lora", "save", "adapter", "absolute", "path", "adapter", "configuration", "json", "file", "used", "finding", "pretrained", "model", "load", ".", "also", ",", "script", "support", "bf16", "fp16", ".", "``", "`", "bash" ], [ "distributed training .", "provide single-gpu training script insufficient gpu memory break training .", "bash finetune/finetune_ds.sh `` ` remember specify correct model name path , data path , well output directory shell script .", "another thing notice use deepspeed zero 3 script .", "want make change , remove argument ` -- deepspeed ` make change deepspeed configuration json file based requirement .", "additionally , script support mixed-precision training , thus use ` -- bf16 true ` ` -- fp16 true ` .", "remember use deepspeed use fp16 due mixed precision training .", "empirically advise use bf16 make training consistent pretraining alignment machine support bf16 , thus use default .", "similarly , run lora , use another script run shown .", "start , make sure installed ` peft ` .", "also , need specify path model , data , output .", "advise use absolute path pretrained model .", "lora save adapter absolute path adapter configuration json file used finding pretrained model load .", "also , script support bf16 fp16 .", "`` ` bash" ] ]
Distributed training. We do not provide single-GPU training script as the insufficient GPU memory will break down the training. bash finetune/finetune_ds.sh ``` Remember to specify the correct model name or path, the data path, as well as the output directory in the shell scripts. Another thing to notice is that we use DeepSpeed ZeRO 3 in this script. If you want to make changes, just remove the argument `--deepspeed` or make changes in the DeepSpeed configuration json file based on your requirements. Additionally, this script supports mixed-precision training, and thus you can use `--bf16 True` or `--fp16 True`. Remember to use DeepSpeed when you use fp16 due to mixed precision training. Empirically we advise you to use bf16 to make your training consistent with our pretraining and alignment if your machine supports bf16, and thus we use it by default. Similarly, to run LoRA, use another script to run as shown below. Before you start, make sure that you have installed `peft`. Also, you need to specify your paths to your model, data, and output. We advise you to use absolute path for your pretrained model. This is because LoRA only saves the adapter and the absolute path in the adapter configuration json file is used for finding out the pretrained model to load. Also, this script support both bf16 and fp16. ```bash
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "distributed", "training", "bash", "finetune/finetune_lora_ds.sh", "``", "`", "comparison", "full-parameter", "finetuning", ",", "lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ")", "update", "parameter", "adapter", "layer", "keep", "original", "large", "language", "model", "layer", "frozen", ".", "allows", "much", "fewer", "memory", "cost", "thus", "fewer", "computation", "cost", ".", "note", "use", "lora", "finetune", "base", "language", "model", ",", "e.g.", ",", "qwen-7b", ",", "instead", "chat", "model", ",", "e.g.", ",", "qwen-7b-chat", ",", "script", "automatically", "switch", "embedding", "output", "layer", "trainable", "parameter", ".", "base", "language", "model", "knowledge", "special", "token", "brought", "chatml", "format", ".", "thus", "layer", "updated", "model", "understand", "predict", "token", ".", "another", "word", ",", "training", "brings", "special", "token", "lora", ",", "set", "layer", "trainable", "parameter", "setting", "`", "modules_to_save", "`", "inside", "code", ".", "also", ",", "parameter", "trainable", ",", "available", "use", "zero", "3", ",", "use", "zero", "2", "script", "default", ".", "new", "trainable", "parameter", ",", "switch", "zero", "3", "changing", "deepspeed", "configuration", "file", ".", "additionally", ",", "find", "significant", "gap", "memory", "footprint", "lora", "without", "trainable", "parameter", ".", "therefore", ",", "trouble", "memory", ",", "advise", "lora", "finetune", "chat", "model", ".", "check", "profile", "information", ".", "still", "suffer", "insufficient", "memory", ",", "consider", "q-lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ")", ",", "us", "quantized", "large", "language", "model", "technique", "paged", "attention", "allow", "even", "fewer", "memory", "cost", ".", "note", ":", "run", "single-gpu", "q-lora", "training", ",", "may", "need", "install", "`", "mpi4py", "`", "`", "pip", "`", "`", "conda", "`", ".", "run", "q-lora", ",", "directly", "run", "following", "script", ":", "``", "`", "bash" ], [ "distributed training bash finetune/finetune_lora_ds.sh `` ` comparison full-parameter finetuning , lora ( [ paper ] ( http : //arxiv.org/abs/2106.09685 ) ) update parameter adapter layer keep original large language model layer frozen .", "allows much fewer memory cost thus fewer computation cost .", "note use lora finetune base language model , e.g. , qwen-7b , instead chat model , e.g. , qwen-7b-chat , script automatically switch embedding output layer trainable parameter .", "base language model knowledge special token brought chatml format .", "thus layer updated model understand predict token .", "another word , training brings special token lora , set layer trainable parameter setting ` modules_to_save ` inside code .", "also , parameter trainable , available use zero 3 , use zero 2 script default .", "new trainable parameter , switch zero 3 changing deepspeed configuration file .", "additionally , find significant gap memory footprint lora without trainable parameter .", "therefore , trouble memory , advise lora finetune chat model .", "check profile information .", "still suffer insufficient memory , consider q-lora ( [ paper ] ( http : //arxiv.org/abs/2305.14314 ) ) , us quantized large language model technique paged attention allow even fewer memory cost .", "note : run single-gpu q-lora training , may need install ` mpi4py ` ` pip ` ` conda ` .", "run q-lora , directly run following script : `` ` bash" ] ]
[ [ "distributed", "training", "bash", "finetune/finetune_lora_ds.sh", "``", "`", "comparison", "full-parameter", "finetuning", ",", "lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2106.09685", ")", ")", "update", "parameter", "adapter", "layer", "keep", "original", "large", "language", "model", "layer", "frozen", ".", "allows", "much", "fewer", "memory", "cost", "thus", "fewer", "computation", "cost", ".", "note", "use", "lora", "finetune", "base", "language", "model", ",", "e.g.", ",", "qwen-7b", ",", "instead", "chat", "model", ",", "e.g.", ",", "qwen-7b-chat", ",", "script", "automatically", "switch", "embedding", "output", "layer", "trainable", "parameter", ".", "base", "language", "model", "knowledge", "special", "token", "brought", "chatml", "format", ".", "thus", "layer", "updated", "model", "understand", "predict", "token", ".", "another", "word", ",", "training", "brings", "special", "token", "lora", ",", "set", "layer", "trainable", "parameter", "setting", "`", "modules_to_save", "`", "inside", "code", ".", "also", ",", "parameter", "trainable", ",", "available", "use", "zero", "3", ",", "use", "zero", "2", "script", "default", ".", "new", "trainable", "parameter", ",", "switch", "zero", "3", "changing", "deepspeed", "configuration", "file", ".", "additionally", ",", "find", "significant", "gap", "memory", "footprint", "lora", "without", "trainable", "parameter", ".", "therefore", ",", "trouble", "memory", ",", "advise", "lora", "finetune", "chat", "model", ".", "check", "profile", "information", ".", "still", "suffer", "insufficient", "memory", ",", "consider", "q-lora", "(", "[", "paper", "]", "(", "http", ":", "//arxiv.org/abs/2305.14314", ")", ")", ",", "us", "quantized", "large", "language", "model", "technique", "paged", "attention", "allow", "even", "fewer", "memory", "cost", ".", "note", ":", "run", "single-gpu", "q-lora", "training", ",", "may", "need", "install", "`", "mpi4py", "`", "`", "pip", "`", "`", "conda", "`", ".", "run", "q-lora", ",", "directly", "run", "following", "script", ":", "``", "`", "bash" ], [ "distributed training bash finetune/finetune_lora_ds.sh `` ` comparison full-parameter finetuning , lora ( [ paper ] ( http : //arxiv.org/abs/2106.09685 ) ) update parameter adapter layer keep original large language model layer frozen .", "allows much fewer memory cost thus fewer computation cost .", "note use lora finetune base language model , e.g. , qwen-7b , instead chat model , e.g. , qwen-7b-chat , script automatically switch embedding output layer trainable parameter .", "base language model knowledge special token brought chatml format .", "thus layer updated model understand predict token .", "another word , training brings special token lora , set layer trainable parameter setting ` modules_to_save ` inside code .", "also , parameter trainable , available use zero 3 , use zero 2 script default .", "new trainable parameter , switch zero 3 changing deepspeed configuration file .", "additionally , find significant gap memory footprint lora without trainable parameter .", "therefore , trouble memory , advise lora finetune chat model .", "check profile information .", "still suffer insufficient memory , consider q-lora ( [ paper ] ( http : //arxiv.org/abs/2305.14314 ) ) , us quantized large language model technique paged attention allow even fewer memory cost .", "note : run single-gpu q-lora training , may need install ` mpi4py ` ` pip ` ` conda ` .", "run q-lora , directly run following script : `` ` bash" ] ]
Distributed training bash finetune/finetune_lora_ds.sh ``` In comparison with full-parameter finetuning, LoRA ([paper](https://arxiv.org/abs/2106.09685)) only updates the parameters of adapter layers but keeps the original large language model layers frozen. This allows much fewer memory costs and thus fewer computation costs. Note that if you use LoRA to finetune the base language model, e.g., Qwen-7B, instead of chat models, e.g., Qwen-7B-Chat, the script automatically switches the embedding and output layer as trainable parameters. This is because the base language model has no knowledge of special tokens brought by ChatML format. Thus these layers should be updated for the model to understand and predict the tokens. Or in another word, if your training brings in special tokens in LoRA, you should set the layers to trainable parameters by setting `modules_to_save` inside the code. Also, if we have these parameters trainable, it is not available to use ZeRO 3, and this is why we use ZeRO 2 in the script by default. If you do not have new trainable parameters, you can switch to ZeRO 3 by changing the DeepSpeed configuration file. Additionally, we find that there is a significant gap between the memory footprint of LoRA with and without these trainable parameters. Therefore, if you have trouble with memory, we advise you to LoRA finetune the chat models. Check the profile below for more information. If you still suffer from insufficient memory, you can consider Q-LoRA ([paper](https://arxiv.org/abs/2305.14314)), which uses the quantized large language model and other techniques such as paged attention to allow even fewer memory costs. Note: to run single-GPU Q-LoRA training, you may need to install `mpi4py` through `pip` or `conda`. To run Q-LoRA, directly run the following script: ```bash
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "quantize", "fine-tuned", "model", "section", "applies", "full-parameter/lora", "fine-tuned", "model", ".", "(", "note", ":", "need", "quantize", "q-lora", "fine-tuned", "model", "already", "quantized", ".", ")", "use", "lora", ",", "please", "follow", "instruction", "merge", "model", "quantization", ".", "recommend", "using", "[", "auto_gptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "quantize", "finetuned", "model", ".", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "note", ":", "currently", "autogptq", "bug", "referred", "[", "issue", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/issues/370", ")", ".", "[", "workaround", "pr", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/pull/495", ")", ",", "pull", "branch", "install", "source", ".", "first", ",", "prepare", "calibration", "data", ".", "reuse", "fine-tuning", "data", ",", "use", "data", "following", "format", ".", "second", ",", "run", "following", "script", ":", "``", "`", "bash", "python", "run_gptq.py", "\\", "--", "model_name_or_path", "$", "your_lora_model_path", "\\", "--", "data_path", "$", "data", "\\", "--", "out_path", "$", "output_path", "\\", "--", "bit", "4" ], [ "quantize fine-tuned model section applies full-parameter/lora fine-tuned model .", "( note : need quantize q-lora fine-tuned model already quantized . )", "use lora , please follow instruction merge model quantization .", "recommend using [ auto_gptq ] ( http : //github.com/panqiwei/autogptq ) quantize finetuned model .", "`` ` bash pip install auto-gptq optimum `` ` note : currently autogptq bug referred [ issue ] ( http : //github.com/panqiwei/autogptq/issues/370 ) .", "[ workaround pr ] ( http : //github.com/panqiwei/autogptq/pull/495 ) , pull branch install source .", "first , prepare calibration data .", "reuse fine-tuning data , use data following format .", "second , run following script : `` ` bash python run_gptq.py \\ -- model_name_or_path $ your_lora_model_path \\ -- data_path $ data \\ -- out_path $ output_path \\ -- bit 4" ] ]
[ [ "quantize", "fine-tuned", "model", "section", "applies", "full-parameter/lora", "fine-tuned", "model", ".", "(", "note", ":", "need", "quantize", "q-lora", "fine-tuned", "model", "already", "quantized", ".", ")", "use", "lora", ",", "please", "follow", "instruction", "merge", "model", "quantization", ".", "recommend", "using", "[", "auto_gptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "quantize", "finetuned", "model", ".", "``", "`", "bash", "pip", "install", "auto-gptq", "optimum", "``", "`", "note", ":", "currently", "autogptq", "bug", "referred", "[", "issue", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/issues/370", ")", ".", "[", "workaround", "pr", "]", "(", "http", ":", "//github.com/panqiwei/autogptq/pull/495", ")", ",", "pull", "branch", "install", "source", ".", "first", ",", "prepare", "calibration", "data", ".", "reuse", "fine-tuning", "data", ",", "use", "data", "following", "format", ".", "second", ",", "run", "following", "script", ":", "``", "`", "bash", "python", "run_gptq.py", "\\", "--", "model_name_or_path", "$", "your_lora_model_path", "\\", "--", "data_path", "$", "data", "\\", "--", "out_path", "$", "output_path", "\\", "--", "bit", "4" ], [ "quantize fine-tuned model section applies full-parameter/lora fine-tuned model .", "( note : need quantize q-lora fine-tuned model already quantized . )", "use lora , please follow instruction merge model quantization .", "recommend using [ auto_gptq ] ( http : //github.com/panqiwei/autogptq ) quantize finetuned model .", "`` ` bash pip install auto-gptq optimum `` ` note : currently autogptq bug referred [ issue ] ( http : //github.com/panqiwei/autogptq/issues/370 ) .", "[ workaround pr ] ( http : //github.com/panqiwei/autogptq/pull/495 ) , pull branch install source .", "first , prepare calibration data .", "reuse fine-tuning data , use data following format .", "second , run following script : `` ` bash python run_gptq.py \\ -- model_name_or_path $ your_lora_model_path \\ -- data_path $ data \\ -- out_path $ output_path \\ -- bit 4" ] ]
Quantize Fine-tuned Models This section applies to full-parameter/LoRA fine-tuned models. (Note: You do not need to quantize the Q-LoRA fine-tuned model because it is already quantized.) If you use LoRA, please follow the above instructions to merge your model before quantization. We recommend using [auto_gptq](https://github.com/PanQiWei/AutoGPTQ) to quantize the finetuned model. ```bash pip install auto-gptq optimum ``` Note: Currently AutoGPTQ has a bug referred in [this issue](https://github.com/PanQiWei/AutoGPTQ/issues/370). Here is a [workaround PR](https://github.com/PanQiWei/AutoGPTQ/pull/495), and you can pull this branch and install from the source. First, prepare the calibration data. You can reuse the fine-tuning data, or use other data following the same format. Second, run the following script: ```bash python run_gptq.py \ --model_name_or_path $YOUR_LORA_MODEL_PATH \ --data_path $DATA \ --out_path $OUTPUT_PATH \ --bits 4
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]