readme_url
stringlengths
60
79
sentence
sequence
token
sequence
text
stringlengths
9
6.51k
url
stringlengths
30
49
level of complexity
int64
-1
2
topic
sequence
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "profiling", "memory", "speed", "profile", "gpu", "memory", "training", "speed", "lora", "(", "lora", "(", "emb", ")", "refers", "training", "embedding", "output", "layer", ",", "lora", "trainable", "embedding", "output", "layer", ")", "q-lora", "setup", "single-gpu", "training", ".", "test", ",", "experiment", "single", "a100-sxm4-80g", "gpu", ",", "use", "cuda", "11.8", "pytorch", "2.0", ".", "flash", "attention", "2", "applied", ".", "uniformly", "use", "batch", "size", "1", "gradient", "accumulation", "8", ".", "profile", "memory", "(", "gb", ")", "speed", "(", "s/iter", ")", "input", "different", "length", ",", "namely", "256", ",", "512", ",", "1024", ",", "2048", ",", "4096", ",", "8192", ".", "also", "report", "statistic", "full-parameter", "finetuning", "qwen-7b", "2", "a100", "gpus", ".", "report", "statistic", "256", ",", "512", ",", "1024", "token", "due", "limitation", "gpu", "memory", ".", "qwen-7b", ",", "also", "test", "performance", "multinode", "finetuning", ".", "experiment", "using", "two", "server", ",", "containing", "two", "a100-sxm4-80g", "gpus", ",", "rest", "configuration", "qwen-7b", "experiment", ".", "result", "multinode", "finetuning", "marked", "lora", "(", "multinode", ")", "table", ".", "qwen-72b", ",", "experiment", "two", "way", ":", "1", ")", "lora", "fintuning", "+", "deepspeed", "zero", "3", "4", "a100-sxm4-80g", "gpus", "2", ")", "qlora", "(", "int4", ")", "fine-tuning", "single", "a100-sxm4-80g", "gpu", ".", "note", "oom", "occurs", "4", "a100-sxm4-80g", "gpus", "lora", "(", "emb", ")", "fine-tuning", "lora", "fine-tuning", "without", "deepspeed", "zero", "3", "(", "pas", "`", "--", "deepspeed", "finetune/ds_config_zero3.json", "`", "[", "`", "finetune/finetune_lora_ds.sh", "`", "]", "(", "finetune/finetune_lora_ds.sh", ")", "enable", "deepspeed", "zero", "3", ")", ".", "statistic", "listed", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "model", "size", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "method", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "node", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "gpus", "per", "node", "<", "/th", ">", "<", "th", "colspan=", "''", "6", "''", "align=", "''", "center", "''", ">", "sequence", "length", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "256", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "512", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "1024", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "2048", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "4096", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "8192", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "4", "''", ">", "1.8b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.4g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.4g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.0g", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.2g", "/", "3.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.8g", "/", "6.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.1g", "/", "1.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.7g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.7g", "/", "7.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.8g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.0g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.6g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.8g", "/", "2.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.2g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.8g", "/", "6.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.1g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3g", "/", "5.6s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "5", "''", ">", "7b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.1g", "/", "1.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.4g", "/", "1.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8g", "/", "5.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "29.7g", "/", "10.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "36.6g", "/", "21.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.7g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.1g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.2g", "/", "2.9s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "39.2g", "/", "10.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.5g", "/", "21.7s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "12.3g", "/", "3.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.9g", "/", "7.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.9g", "/", "11.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.5g", "/", "22.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "139.2g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "148.0g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "162.0g", "/", "4.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "multinode", ")", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.7g", "/", "2.09s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "77.6g", "/", "3.16s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "84.9g", "/", "5.17s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.1g", "/", "9.25s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "121.1g", "/", "18.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "155.5g", "/", "37.4s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "3", "''", ">", "14b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "2.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.3g", "/", "4.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "37.4g", "/", "8.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "42.5g", "/", "17.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.2g", "/", "36.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.2", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.1g", "/", "2.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.5g", "/", "4.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.1g", "/", "8.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "56.8g", "/", "17.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.7g", "/", "36.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.7g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.4g", "/", "6.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.9g", "/", "8.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.9g", "/", "11.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.0g", "/", "20.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.9g", "/", "38.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "72b", "<", "/th", ">", "<", "td", ">", "lora", "+", "deepspeed", "zero3", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "215.4g", "/", "17.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "217.7g", "/", "20.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "222.6g", "/", "29.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "228.8g", "/", "45.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "249.0g", "/", "83.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "289.2g", "/", "161.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "27.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "31.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "62.9g", "/", "41.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "64.1g", "/", "59.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "68.0g", "/", "97.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "75.6g", "/", "179.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "br", ">" ], [ "profiling memory speed profile gpu memory training speed lora ( lora ( emb ) refers training embedding output layer , lora trainable embedding output layer ) q-lora setup single-gpu training .", "test , experiment single a100-sxm4-80g gpu , use cuda 11.8 pytorch 2.0 .", "flash attention 2 applied .", "uniformly use batch size 1 gradient accumulation 8 .", "profile memory ( gb ) speed ( s/iter ) input different length , namely 256 , 512 , 1024 , 2048 , 4096 , 8192 .", "also report statistic full-parameter finetuning qwen-7b 2 a100 gpus .", "report statistic 256 , 512 , 1024 token due limitation gpu memory .", "qwen-7b , also test performance multinode finetuning .", "experiment using two server , containing two a100-sxm4-80g gpus , rest configuration qwen-7b experiment .", "result multinode finetuning marked lora ( multinode ) table .", "qwen-72b , experiment two way : 1 ) lora fintuning + deepspeed zero 3 4 a100-sxm4-80g gpus 2 ) qlora ( int4 ) fine-tuning single a100-sxm4-80g gpu .", "note oom occurs 4 a100-sxm4-80g gpus lora ( emb ) fine-tuning lora fine-tuning without deepspeed zero 3 ( pas ` -- deepspeed finetune/ds_config_zero3.json ` [ ` finetune/finetune_lora_ds.sh ` ] ( finetune/finetune_lora_ds.sh ) enable deepspeed zero 3 ) .", "statistic listed : < table > < tr > < th rowspan= '' 2 '' > model size < /th > < th rowspan= '' 2 '' > method < /th > < th rowspan= '' 2 '' > # node < /th > < th rowspan= '' 2 '' > # gpus per node < /th > < th colspan= '' 6 '' align= '' center '' > sequence length < /th > < /tr > < tr > < th align= '' center '' > 256 < /th > < th align= '' center '' > 512 < /th > < th align= '' center '' > 1024 < /th > < th align= '' center '' > 2048 < /th > < th align= '' center '' > 4096 < /th > < th align= '' center '' > 8192 < /th > < /tr > < tr > < th rowspan= '' 4 '' > 1.8b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 6.7g / 1.0s/it < /td > < td align= '' center '' > 7.4g / 1.0s/it < /td > < td align= '' center '' > 8.4g / 1.1s/it < /td > < td align= '' center '' > 11.0g / 1.7s/it < /td > < td align= '' center '' > 16.2g / 3.3s/it < /td > < td align= '' center '' > 21.8g / 6.8s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 13.7g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.1s/it < /td > < td align= '' center '' > 15.1g / 1.8s/it < /td > < td align= '' center '' > 19.7g / 3.4s/it < /td > < td align= '' center '' > 27.7g / 7.0s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 5.8g / 1.4s/it < /td > < td align= '' center '' > 6.0g / 1.4s/it < /td > < td align= '' center '' > 6.6g / 1.4s/it < /td > < td align= '' center '' > 7.8g / 2.0s/it < /td > < td align= '' center '' > 10.2g / 3.4s/it < /td > < td align= '' center '' > 15.8g / 6.5s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 43.5g / 2.1s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.3s/it < /td > < td align= '' center '' > 47.1g / 2.8s/it < /td > < td align= '' center '' > 48.3g / 5.6s/it < /td > < /tr > < tr > < th rowspan= '' 5 '' > 7b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 20.1g / 1.2s/it < /td > < td align= '' center '' > 20.4g / 1.5s/it < /td > < td align= '' center '' > 21.5g / 2.8s/it < /td > < td align= '' center '' > 23.8g / 5.2s/it < /td > < td align= '' center '' > 29.7g / 10.1s/it < /td > < td align= '' center '' > 36.6g / 21.3s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 33.7g / 1.4s/it < /td > < td align= '' center '' > 34.1g / 1.6s/it < /td > < td align= '' center '' > 35.2g / 2.9s/it < /td > < td align= '' center '' > 35.1g / 5.3s/it < /td > < td align= '' center '' > 39.2g / 10.3s/it < /td > < td align= '' center '' > 48.5g / 21.7s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 12.3g / 3.5s/it < /td > < td align= '' center '' > 13.9g / 7.0s/it < /td > < td align= '' center '' > 16.9g / 11.6s/it < /td > < td align= '' center '' > 23.5g / 22.3s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 2 < /td > < td align= '' center '' > 139.2g / 4.0s/it < /td > < td align= '' center '' > 148.0g / 4.0s/it < /td > < td align= '' center '' > 162.0g / 4.5s/it < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < /tr > < tr > < td > lora ( multinode ) < /td > < td > 2 < /td > < td > 2 < /td > < td align= '' center '' > 74.7g / 2.09s/it < /td > < td align= '' center '' > 77.6g / 3.16s/it < /td > < td align= '' center '' > 84.9g / 5.17s/it < /td > < td align= '' center '' > 95.1g / 9.25s/it < /td > < td align= '' center '' > 121.1g / 18.1s/it < /td > < td align= '' center '' > 155.5g / 37.4s/it < /td > < /tr > < tr > < th rowspan= '' 3 '' > 14b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 34.6g / 1.6s/it < /td > < td align= '' center '' > 35.1g / 2.4s/it < /td > < td align= '' center '' > 35.3g / 4.4s/it < /td > < td align= '' center '' > 37.4g / 8.4s/it < /td > < td align= '' center '' > 42.5g / 17.0s/it < /td > < td align= '' center '' > 55.2g / 36.0s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 51.2 / 1.7s/it < /td > < td align= '' center '' > 51.1g / 2.6s/it < /td > < td align= '' center '' > 51.5g / 4.6s/it < /td > < td align= '' center '' > 54.1g / 8.6s/it < /td > < td align= '' center '' > 56.8g / 17.2s/it < /td > < td align= '' center '' > 67.7g / 36.3s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 18.7g / 5.3s/it < /td > < td align= '' center '' > 18.4g / 6.3s/it < /td > < td align= '' center '' > 18.9g / 8.2s/it < /td > < td align= '' center '' > 19.9g / 11.8s/it < /td > < td align= '' center '' > 23.0g / 20.1s/it < /td > < td align= '' center '' > 27.9g / 38.3s/it < /td > < /tr > < tr > < th rowspan= '' 2 '' > 72b < /th > < td > lora + deepspeed zero3 < /td > < td > 1 < /td > < td > 4 < /td > < td align= '' center '' > 215.4g / 17.6s/it < /td > < td align= '' center '' > 217.7g / 20.5s/it < /td > < td align= '' center '' > 222.6g / 29.4s/it < /td > < td align= '' center '' > 228.8g / 45.7s/it < /td > < td align= '' center '' > 249.0g / 83.4s/it < /td > < td align= '' center '' > 289.2g / 161.5s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 61.4g / 27.4s/it < /td > < td align= '' center '' > 61.4g / 31.5s/it < /td > < td align= '' center '' > 62.9g / 41.4s/it < /td > < td align= '' center '' > 64.1g / 59.5s/it < /td > < td align= '' center '' > 68.0g / 97.7s/it < /td > < td align= '' center '' > 75.6g / 179.8s/it < /td > < /tr > < /table > < br >" ] ]
[ [ "profiling", "memory", "speed", "profile", "gpu", "memory", "training", "speed", "lora", "(", "lora", "(", "emb", ")", "refers", "training", "embedding", "output", "layer", ",", "lora", "trainable", "embedding", "output", "layer", ")", "q-lora", "setup", "single-gpu", "training", ".", "test", ",", "experiment", "single", "a100-sxm4-80g", "gpu", ",", "use", "cuda", "11.8", "pytorch", "2.0", ".", "flash", "attention", "2", "applied", ".", "uniformly", "use", "batch", "size", "1", "gradient", "accumulation", "8", ".", "profile", "memory", "(", "gb", ")", "speed", "(", "s/iter", ")", "input", "different", "length", ",", "namely", "256", ",", "512", ",", "1024", ",", "2048", ",", "4096", ",", "8192", ".", "also", "report", "statistic", "full-parameter", "finetuning", "qwen-7b", "2", "a100", "gpus", ".", "report", "statistic", "256", ",", "512", ",", "1024", "token", "due", "limitation", "gpu", "memory", ".", "qwen-7b", ",", "also", "test", "performance", "multinode", "finetuning", ".", "experiment", "using", "two", "server", ",", "containing", "two", "a100-sxm4-80g", "gpus", ",", "rest", "configuration", "qwen-7b", "experiment", ".", "result", "multinode", "finetuning", "marked", "lora", "(", "multinode", ")", "table", ".", "qwen-72b", ",", "experiment", "two", "way", ":", "1", ")", "lora", "fintuning", "+", "deepspeed", "zero", "3", "4", "a100-sxm4-80g", "gpus", "2", ")", "qlora", "(", "int4", ")", "fine-tuning", "single", "a100-sxm4-80g", "gpu", ".", "note", "oom", "occurs", "4", "a100-sxm4-80g", "gpus", "lora", "(", "emb", ")", "fine-tuning", "lora", "fine-tuning", "without", "deepspeed", "zero", "3", "(", "pas", "`", "--", "deepspeed", "finetune/ds_config_zero3.json", "`", "[", "`", "finetune/finetune_lora_ds.sh", "`", "]", "(", "finetune/finetune_lora_ds.sh", ")", "enable", "deepspeed", "zero", "3", ")", ".", "statistic", "listed", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "model", "size", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "method", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "node", "<", "/th", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "#", "gpus", "per", "node", "<", "/th", ">", "<", "th", "colspan=", "''", "6", "''", "align=", "''", "center", "''", ">", "sequence", "length", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "256", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "512", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "1024", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "2048", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "4096", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "8192", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "4", "''", ">", "1.8b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.4g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.4g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.0g", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.2g", "/", "3.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.8g", "/", "6.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.7g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "14.0g", "/", "1.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.1g", "/", "1.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.7g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.7g", "/", "7.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.8g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.0g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "6.6g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "7.8g", "/", "2.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.2g", "/", "3.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.8g", "/", "6.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.5g", "/", "2.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.1g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3g", "/", "5.6s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "5", "''", ">", "7b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.1g", "/", "1.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "20.4g", "/", "1.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5g", "/", "2.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8g", "/", "5.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "29.7g", "/", "10.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "36.6g", "/", "21.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.7g", "/", "1.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.1g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.2g", "/", "2.9s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "39.2g", "/", "10.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.5g", "/", "21.7s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.5g", "/", "3.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "12.3g", "/", "3.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "13.9g", "/", "7.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "16.9g", "/", "11.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.5g", "/", "22.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "full-parameter", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "139.2g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "148.0g", "/", "4.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "162.0g", "/", "4.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "-", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "multinode", ")", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", ">", "2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.7g", "/", "2.09s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "77.6g", "/", "3.16s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "84.9g", "/", "5.17s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.1g", "/", "9.25s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "121.1g", "/", "18.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "155.5g", "/", "37.4s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "3", "''", ">", "14b", "<", "/th", ">", "<", "td", ">", "lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6g", "/", "1.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.1g", "/", "2.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "35.3g", "/", "4.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "37.4g", "/", "8.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "42.5g", "/", "17.0s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.2g", "/", "36.0s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "lora", "(", "emb", ")", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.2", "/", "1.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.1g", "/", "2.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "51.5g", "/", "4.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.1g", "/", "8.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "56.8g", "/", "17.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.7g", "/", "36.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.7g", "/", "5.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.4g", "/", "6.3s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "18.9g", "/", "8.2s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "19.9g", "/", "11.8s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.0g", "/", "20.1s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.9g", "/", "38.3s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", ">", "72b", "<", "/th", ">", "<", "td", ">", "lora", "+", "deepspeed", "zero3", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "215.4g", "/", "17.6s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "217.7g", "/", "20.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "222.6g", "/", "29.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "228.8g", "/", "45.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "249.0g", "/", "83.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "289.2g", "/", "161.5s/it", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "q-lora", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", ">", "1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "27.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "61.4g", "/", "31.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "62.9g", "/", "41.4s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "64.1g", "/", "59.5s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "68.0g", "/", "97.7s/it", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "75.6g", "/", "179.8s/it", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "br", ">" ], [ "profiling memory speed profile gpu memory training speed lora ( lora ( emb ) refers training embedding output layer , lora trainable embedding output layer ) q-lora setup single-gpu training .", "test , experiment single a100-sxm4-80g gpu , use cuda 11.8 pytorch 2.0 .", "flash attention 2 applied .", "uniformly use batch size 1 gradient accumulation 8 .", "profile memory ( gb ) speed ( s/iter ) input different length , namely 256 , 512 , 1024 , 2048 , 4096 , 8192 .", "also report statistic full-parameter finetuning qwen-7b 2 a100 gpus .", "report statistic 256 , 512 , 1024 token due limitation gpu memory .", "qwen-7b , also test performance multinode finetuning .", "experiment using two server , containing two a100-sxm4-80g gpus , rest configuration qwen-7b experiment .", "result multinode finetuning marked lora ( multinode ) table .", "qwen-72b , experiment two way : 1 ) lora fintuning + deepspeed zero 3 4 a100-sxm4-80g gpus 2 ) qlora ( int4 ) fine-tuning single a100-sxm4-80g gpu .", "note oom occurs 4 a100-sxm4-80g gpus lora ( emb ) fine-tuning lora fine-tuning without deepspeed zero 3 ( pas ` -- deepspeed finetune/ds_config_zero3.json ` [ ` finetune/finetune_lora_ds.sh ` ] ( finetune/finetune_lora_ds.sh ) enable deepspeed zero 3 ) .", "statistic listed : < table > < tr > < th rowspan= '' 2 '' > model size < /th > < th rowspan= '' 2 '' > method < /th > < th rowspan= '' 2 '' > # node < /th > < th rowspan= '' 2 '' > # gpus per node < /th > < th colspan= '' 6 '' align= '' center '' > sequence length < /th > < /tr > < tr > < th align= '' center '' > 256 < /th > < th align= '' center '' > 512 < /th > < th align= '' center '' > 1024 < /th > < th align= '' center '' > 2048 < /th > < th align= '' center '' > 4096 < /th > < th align= '' center '' > 8192 < /th > < /tr > < tr > < th rowspan= '' 4 '' > 1.8b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 6.7g / 1.0s/it < /td > < td align= '' center '' > 7.4g / 1.0s/it < /td > < td align= '' center '' > 8.4g / 1.1s/it < /td > < td align= '' center '' > 11.0g / 1.7s/it < /td > < td align= '' center '' > 16.2g / 3.3s/it < /td > < td align= '' center '' > 21.8g / 6.8s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 13.7g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.0s/it < /td > < td align= '' center '' > 14.0g / 1.1s/it < /td > < td align= '' center '' > 15.1g / 1.8s/it < /td > < td align= '' center '' > 19.7g / 3.4s/it < /td > < td align= '' center '' > 27.7g / 7.0s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 5.8g / 1.4s/it < /td > < td align= '' center '' > 6.0g / 1.4s/it < /td > < td align= '' center '' > 6.6g / 1.4s/it < /td > < td align= '' center '' > 7.8g / 2.0s/it < /td > < td align= '' center '' > 10.2g / 3.4s/it < /td > < td align= '' center '' > 15.8g / 6.5s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 43.5g / 2.1s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.2s/it < /td > < td align= '' center '' > 43.5g / 2.3s/it < /td > < td align= '' center '' > 47.1g / 2.8s/it < /td > < td align= '' center '' > 48.3g / 5.6s/it < /td > < /tr > < tr > < th rowspan= '' 5 '' > 7b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 20.1g / 1.2s/it < /td > < td align= '' center '' > 20.4g / 1.5s/it < /td > < td align= '' center '' > 21.5g / 2.8s/it < /td > < td align= '' center '' > 23.8g / 5.2s/it < /td > < td align= '' center '' > 29.7g / 10.1s/it < /td > < td align= '' center '' > 36.6g / 21.3s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 33.7g / 1.4s/it < /td > < td align= '' center '' > 34.1g / 1.6s/it < /td > < td align= '' center '' > 35.2g / 2.9s/it < /td > < td align= '' center '' > 35.1g / 5.3s/it < /td > < td align= '' center '' > 39.2g / 10.3s/it < /td > < td align= '' center '' > 48.5g / 21.7s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 11.5g / 3.0s/it < /td > < td align= '' center '' > 12.3g / 3.5s/it < /td > < td align= '' center '' > 13.9g / 7.0s/it < /td > < td align= '' center '' > 16.9g / 11.6s/it < /td > < td align= '' center '' > 23.5g / 22.3s/it < /td > < /tr > < tr > < td > full-parameter < /td > < td > 1 < /td > < td > 2 < /td > < td align= '' center '' > 139.2g / 4.0s/it < /td > < td align= '' center '' > 148.0g / 4.0s/it < /td > < td align= '' center '' > 162.0g / 4.5s/it < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < td align= '' center '' > - < /td > < /tr > < tr > < td > lora ( multinode ) < /td > < td > 2 < /td > < td > 2 < /td > < td align= '' center '' > 74.7g / 2.09s/it < /td > < td align= '' center '' > 77.6g / 3.16s/it < /td > < td align= '' center '' > 84.9g / 5.17s/it < /td > < td align= '' center '' > 95.1g / 9.25s/it < /td > < td align= '' center '' > 121.1g / 18.1s/it < /td > < td align= '' center '' > 155.5g / 37.4s/it < /td > < /tr > < tr > < th rowspan= '' 3 '' > 14b < /th > < td > lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 34.6g / 1.6s/it < /td > < td align= '' center '' > 35.1g / 2.4s/it < /td > < td align= '' center '' > 35.3g / 4.4s/it < /td > < td align= '' center '' > 37.4g / 8.4s/it < /td > < td align= '' center '' > 42.5g / 17.0s/it < /td > < td align= '' center '' > 55.2g / 36.0s/it < /td > < /tr > < tr > < td > lora ( emb ) < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 51.2 / 1.7s/it < /td > < td align= '' center '' > 51.1g / 2.6s/it < /td > < td align= '' center '' > 51.5g / 4.6s/it < /td > < td align= '' center '' > 54.1g / 8.6s/it < /td > < td align= '' center '' > 56.8g / 17.2s/it < /td > < td align= '' center '' > 67.7g / 36.3s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 18.7g / 5.3s/it < /td > < td align= '' center '' > 18.4g / 6.3s/it < /td > < td align= '' center '' > 18.9g / 8.2s/it < /td > < td align= '' center '' > 19.9g / 11.8s/it < /td > < td align= '' center '' > 23.0g / 20.1s/it < /td > < td align= '' center '' > 27.9g / 38.3s/it < /td > < /tr > < tr > < th rowspan= '' 2 '' > 72b < /th > < td > lora + deepspeed zero3 < /td > < td > 1 < /td > < td > 4 < /td > < td align= '' center '' > 215.4g / 17.6s/it < /td > < td align= '' center '' > 217.7g / 20.5s/it < /td > < td align= '' center '' > 222.6g / 29.4s/it < /td > < td align= '' center '' > 228.8g / 45.7s/it < /td > < td align= '' center '' > 249.0g / 83.4s/it < /td > < td align= '' center '' > 289.2g / 161.5s/it < /td > < /tr > < tr > < td > q-lora < /td > < td > 1 < /td > < td > 1 < /td > < td align= '' center '' > 61.4g / 27.4s/it < /td > < td align= '' center '' > 61.4g / 31.5s/it < /td > < td align= '' center '' > 62.9g / 41.4s/it < /td > < td align= '' center '' > 64.1g / 59.5s/it < /td > < td align= '' center '' > 68.0g / 97.7s/it < /td > < td align= '' center '' > 75.6g / 179.8s/it < /td > < /tr > < /table > < br >" ] ]
Profiling of Memory and Speed We profile the GPU memory and training speed of both LoRA (LoRA (emb) refers to training the embedding and output layer, while LoRA has no trainable embedding and output layer) and Q-LoRA in the setup of single-GPU training. In this test, we experiment on a single A100-SXM4-80G GPU, and we use CUDA 11.8 and Pytorch 2.0. Flash attention 2 is applied. We uniformly use a batch size of 1 and gradient accumulation of 8. We profile the memory (GB) and speed (s/iter) of inputs of different lengths, namely 256, 512, 1024, 2048, 4096, and 8192. We also report the statistics of full-parameter finetuning with Qwen-7B on 2 A100 GPUs. We only report the statistics of 256, 512, and 1024 tokens due to the limitation of GPU memory. For Qwen-7B, we also test the performance of multinode finetuning. We experiment using two servers, each containing two A100-SXM4-80G GPUs, and the rest of configurations are the same as other Qwen-7B experiments. The results of multinode finetuning are marked as LoRA (multinode) in the table. For Qwen-72B, we experiment in two ways: 1) Lora fintuning + DeepSpeed ZeRO 3 on 4 A100-SXM4-80G GPUs and 2) QLora (int4) fine-tuning on a single A100-SXM4-80G GPU. Note that OOM occurs on 4 A100-SXM4-80G GPUs both with LoRA (emb) fine-tuning and LoRA fine-tuning without Deepspeed ZeRO 3 (you can pass `--deepspeed finetune/ds_config_zero3.json` to [`finetune/finetune_lora_ds.sh`](finetune/finetune_lora_ds.sh) to enable DeepSpeed ZeRO 3). The statistics are listed below: <table> <tr> <th rowspan="2">Model Size</th><th rowspan="2">Method</th><th rowspan="2">#Nodes</th><th rowspan="2">#GPUs per node</th><th colspan="6" align="center">Sequence Length</th> </tr> <tr> <th align="center">256</th><th align="center">512</th><th align="center">1024</th><th align="center">2048</th><th align="center">4096</th><th align="center">8192</th> </tr> <tr> <th rowspan="4">1.8B</th><td>LoRA</td> <td>1</td><td>1</td> <td align="center">6.7G / 1.0s/it</td><td align="center">7.4G / 1.0s/it</td><td align="center">8.4G / 1.1s/it</td><td align="center">11.0G / 1.7s/it</td><td align="center">16.2G / 3.3s/it</td><td align="center">21.8G / 6.8s/it</td> </tr> <tr> <td>LoRA (emb)</td> <td>1</td><td>1</td> <td align="center">13.7G / 1.0s/it</td><td align="center">14.0G / 1.0s/it</td><td align="center">14.0G / 1.1s/it</td><td align="center">15.1G / 1.8s/it</td><td align="center">19.7G / 3.4s/it</td><td align="center">27.7G / 7.0s/it</td> </tr> <tr> <td>Q-LoRA</td> <td>1</td><td>1</td> <td align="center">5.8G / 1.4s/it</td><td align="center">6.0G / 1.4s/it</td><td align="center">6.6G / 1.4s/it</td><td align="center">7.8G / 2.0s/it</td><td align="center">10.2G / 3.4s/it</td><td align="center">15.8G / 6.5s/it</td> </tr> <tr> <td>Full-parameter</td> <td>1</td><td>1</td> <td align="center">43.5G / 2.1s/it</td><td align="center">43.5G / 2.2s/it</td><td align="center">43.5G / 2.2s/it</td><td align="center">43.5G / 2.3s/it</td><td align="center">47.1G / 2.8s/it</td><td align="center">48.3G / 5.6s/it</td> </tr> <tr> <th rowspan="5">7B</th> <td>LoRA</td> <td>1</td><td>1</td> <td align="center">20.1G / 1.2s/it</td><td align="center">20.4G / 1.5s/it</td><td align="center">21.5G / 2.8s/it</td><td align="center">23.8G / 5.2s/it</td><td align="center">29.7G / 10.1s/it</td><td align="center">36.6G / 21.3s/it</td> </tr> <tr> <td>LoRA (emb)</td> <td>1</td><td>1</td> <td align="center">33.7G / 1.4s/it</td><td align="center">34.1G / 1.6s/it</td><td align="center">35.2G / 2.9s/it</td><td align="center">35.1G / 5.3s/it</td><td align="center">39.2G / 10.3s/it</td><td align="center">48.5G / 21.7s/it</td> </tr> <tr> <td>Q-LoRA</td> <td>1</td><td>1</td> <td align="center">11.5G / 3.0s/it</td><td align="center">11.5G / 3.0s/it</td><td align="center">12.3G / 3.5s/it</td><td align="center">13.9G / 7.0s/it</td><td align="center">16.9G / 11.6s/it</td><td align="center">23.5G / 22.3s/it</td> </tr> <tr> <td>Full-parameter</td> <td>1</td><td>2</td> <td align="center">139.2G / 4.0s/it</td><td align="center">148.0G / 4.0s/it</td><td align="center">162.0G / 4.5s/it</td><td align="center">-</td><td align="center">-</td><td align="center">-</td> </tr> <tr> <td>LoRA (multinode)</td> <td>2</td><td>2</td> <td align="center">74.7G / 2.09s/it</td><td align="center">77.6G / 3.16s/it</td><td align="center">84.9G / 5.17s/it</td><td align="center">95.1G / 9.25s/it</td><td align="center">121.1G / 18.1s/it</td><td align="center">155.5G / 37.4s/it</td> </tr> <tr> <th rowspan="3">14B</th> <td>LoRA</td> <td>1</td><td>1</td> <td align="center">34.6G / 1.6s/it</td><td align="center">35.1G / 2.4s/it</td><td align="center">35.3G / 4.4s/it</td><td align="center">37.4G / 8.4s/it</td><td align="center">42.5G / 17.0s/it</td><td align="center">55.2G / 36.0s/it</td> </tr> <tr> <td>LoRA (emb)</td> <td>1</td><td>1</td> <td align="center">51.2 / 1.7s/it</td><td align="center">51.1G / 2.6s/it</td><td align="center">51.5G / 4.6s/it</td><td align="center">54.1G / 8.6s/it</td><td align="center">56.8G / 17.2s/it</td><td align="center">67.7G / 36.3s/it</td> </tr> <tr> <td>Q-LoRA</td> <td>1</td><td>1</td> <td align="center">18.7G / 5.3s/it</td><td align="center">18.4G / 6.3s/it</td><td align="center">18.9G / 8.2s/it</td><td align="center">19.9G / 11.8s/it</td><td align="center">23.0G / 20.1s/it</td><td align="center">27.9G / 38.3s/it</td> </tr> <tr> <th rowspan="2">72B</th> <td>LoRA + Deepspeed Zero3</td> <td>1</td><td>4</td> <td align="center">215.4G / 17.6s/it</td><td align="center">217.7G / 20.5s/it</td><td align="center">222.6G / 29.4s/it</td><td align="center">228.8G / 45.7s/it</td><td align="center">249.0G / 83.4s/it</td><td align="center">289.2G / 161.5s/it</td> </tr> <tr> <td>Q-LoRA</td> <td>1</td><td>1</td> <td align="center">61.4G / 27.4s/it</td><td align="center">61.4G / 31.5s/it</td><td align="center">62.9G / 41.4s/it</td><td align="center">64.1G / 59.5s/it</td><td align="center">68.0G / 97.7s/it</td><td align="center">75.6G / 179.8s/it</td> </tr> </table> <br>
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "vllm", "deployment", "fast", "inference", ",", "suggest", "using", "vllm", ".", "use", "cuda", "12.1", "pytorch", "2.1", ",", "directly", "use", "following", "command", "install", "vllm", ".", "``", "`", "bash" ], [ "vllm deployment fast inference , suggest using vllm .", "use cuda 12.1 pytorch 2.1 , directly use following command install vllm .", "`` ` bash" ] ]
[ [ "vllm", "deployment", "fast", "inference", ",", "suggest", "using", "vllm", ".", "use", "cuda", "12.1", "pytorch", "2.1", ",", "directly", "use", "following", "command", "install", "vllm", ".", "``", "`", "bash" ], [ "vllm deployment fast inference , suggest using vllm .", "use cuda 12.1 pytorch 2.1 , directly use following command install vllm .", "`` ` bash" ] ]
vLLM For deployment and fast inference, we suggest using vLLM. If you use cuda 12.1 and pytorch 2.1, you can directly use the following command to install vLLM. ```bash
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "pip", "install", "vllm" ], [ "pip install vllm" ] ]
[ [ "pip", "install", "vllm" ], [ "pip install vllm" ] ]
pip install vllm
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "line", "support", "int4", "quantization", "(", "int8", "supported", "soon", ")", ".", "installation", "slower", "(", "~10", "minute", ")", ".", "git", "clone", "http", ":", "//github.com/qwenlm/vllm-gptq", "cd", "vllm-gptq", "pip", "install", "-e", ".", "``", "`", "otherwise", ",", "please", "refer", "official", "vllm", "[", "installation", "instruction", "]", "(", "http", ":", "//docs.vllm.ai/en/latest/getting_started/installation.html", ")", ",", "[", "vllm", "repo", "gptq", "quantization", "]", "(", "http", ":", "//github.com/qwenlm/vllm-gptq", ")", "." ], [ "line support int4 quantization ( int8 supported soon ) .", "installation slower ( ~10 minute ) .", "git clone http : //github.com/qwenlm/vllm-gptq cd vllm-gptq pip install -e .", "`` ` otherwise , please refer official vllm [ installation instruction ] ( http : //docs.vllm.ai/en/latest/getting_started/installation.html ) , [ vllm repo gptq quantization ] ( http : //github.com/qwenlm/vllm-gptq ) ." ] ]
[ [ "line", "support", "int4", "quantization", "(", "int8", "supported", "soon", ")", ".", "installation", "slower", "(", "~10", "minute", ")", ".", "git", "clone", "http", ":", "//github.com/qwenlm/vllm-gptq", "cd", "vllm-gptq", "pip", "install", "-e", ".", "``", "`", "otherwise", ",", "please", "refer", "official", "vllm", "[", "installation", "instruction", "]", "(", "http", ":", "//docs.vllm.ai/en/latest/getting_started/installation.html", ")", ",", "[", "vllm", "repo", "gptq", "quantization", "]", "(", "http", ":", "//github.com/qwenlm/vllm-gptq", ")", "." ], [ "line support int4 quantization ( int8 supported soon ) .", "installation slower ( ~10 minute ) .", "git clone http : //github.com/qwenlm/vllm-gptq cd vllm-gptq pip install -e .", "`` ` otherwise , please refer official vllm [ installation instruction ] ( http : //docs.vllm.ai/en/latest/getting_started/installation.html ) , [ vllm repo gptq quantization ] ( http : //github.com/qwenlm/vllm-gptq ) ." ] ]
The below lines support int4 quantization (int8 will be supported soon). The installation are slower (~10 minutes). git clone https://github.com/QwenLM/vllm-gptq cd vllm-gptq pip install -e . ``` Otherwise, please refer to the official vLLM [Installation Instructions](https://docs.vllm.ai/en/latest/getting_started/installation.html), or our [vLLM repo for GPTQ quantization](https://github.com/QwenLM/vllm-gptq).
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "vllm", "+", "web", "demo", "/", "openai-like", "api", "use", "fastchat", "lauch", "web", "demo", "openai", "api", "server", ".", "first", ",", "install", "fastchat", ":", "``", "`", "bash", "pip", "install", "``", "fschat", "[", "model_worker", ",", "webui", "]", "''", "``", "`", "run", "qwen", "vllm", "fastchat", ",", "need", "launch", "controller", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.controller", "``", "`", "launch", "model", "worker", ",", "mean", "loading", "model", "inference", ".", "single", "gpu", "inference", ",", "directly", "run", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.vllm_worker", "--", "model-path", "$", "model_path", "--", "trust-remote-code", "--", "dtype", "bfloat16" ], [ "vllm + web demo / openai-like api use fastchat lauch web demo openai api server .", "first , install fastchat : `` ` bash pip install `` fschat [ model_worker , webui ] '' `` ` run qwen vllm fastchat , need launch controller : `` ` bash python -m fastchat.serve.controller `` ` launch model worker , mean loading model inference .", "single gpu inference , directly run : `` ` bash python -m fastchat.serve.vllm_worker -- model-path $ model_path -- trust-remote-code -- dtype bfloat16" ] ]
[ [ "vllm", "+", "web", "demo", "/", "openai-like", "api", "use", "fastchat", "lauch", "web", "demo", "openai", "api", "server", ".", "first", ",", "install", "fastchat", ":", "``", "`", "bash", "pip", "install", "``", "fschat", "[", "model_worker", ",", "webui", "]", "''", "``", "`", "run", "qwen", "vllm", "fastchat", ",", "need", "launch", "controller", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.controller", "``", "`", "launch", "model", "worker", ",", "mean", "loading", "model", "inference", ".", "single", "gpu", "inference", ",", "directly", "run", ":", "``", "`", "bash", "python", "-m", "fastchat.serve.vllm_worker", "--", "model-path", "$", "model_path", "--", "trust-remote-code", "--", "dtype", "bfloat16" ], [ "vllm + web demo / openai-like api use fastchat lauch web demo openai api server .", "first , install fastchat : `` ` bash pip install `` fschat [ model_worker , webui ] '' `` ` run qwen vllm fastchat , need launch controller : `` ` bash python -m fastchat.serve.controller `` ` launch model worker , mean loading model inference .", "single gpu inference , directly run : `` ` bash python -m fastchat.serve.vllm_worker -- model-path $ model_path -- trust-remote-code -- dtype bfloat16" ] ]
vLLM + Web Demo / OpenAI-like API You can use FastChat to lauch a web demo or an OpenAI API server. First, install FastChat: ```bash pip install "fschat[model_worker,webui]" ``` To run Qwen with vLLM and FastChat, you need launch a controller by: ```bash python -m fastchat.serve.controller ``` Then you can launch the model worker, which means loading your model for inference. For single GPU inference, you can directly run: ```bash python -m fastchat.serve.vllm_worker --model-path $model_path --trust-remote-code --dtype bfloat16
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "web", "ui", "provide", "code", "user", "build", "web", "ui", "demo", "(", "thanks", "@", "wysaid", ")", ".", "start", ",", "make", "sure", "install", "following", "package", ":", "``", "`", "pip", "install", "-r", "requirements_web_demo.txt", "``", "`", "run", "command", "click", "generated", "link", ":", "``", "`", "bash", "python", "web_demo.py", "``", "`", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/web_demo.gif", "''", "width=", "''", "600", "''", "/", ">", "<", "br", ">", "<", "p", ">" ], [ "web ui provide code user build web ui demo ( thanks @ wysaid ) .", "start , make sure install following package : `` ` pip install -r requirements_web_demo.txt `` ` run command click generated link : `` ` bash python web_demo.py `` ` < p align= '' center '' > < br > < img src= '' assets/web_demo.gif '' width= '' 600 '' / > < br > < p >" ] ]
[ [ "web", "ui", "provide", "code", "user", "build", "web", "ui", "demo", "(", "thanks", "@", "wysaid", ")", ".", "start", ",", "make", "sure", "install", "following", "package", ":", "``", "`", "pip", "install", "-r", "requirements_web_demo.txt", "``", "`", "run", "command", "click", "generated", "link", ":", "``", "`", "bash", "python", "web_demo.py", "``", "`", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/web_demo.gif", "''", "width=", "''", "600", "''", "/", ">", "<", "br", ">", "<", "p", ">" ], [ "web ui provide code user build web ui demo ( thanks @ wysaid ) .", "start , make sure install following package : `` ` pip install -r requirements_web_demo.txt `` ` run command click generated link : `` ` bash python web_demo.py `` ` < p align= '' center '' > < br > < img src= '' assets/web_demo.gif '' width= '' 600 '' / > < br > < p >" ] ]
Web UI We provide code for users to build a web UI demo (thanks to @wysaid). Before you start, make sure you install the following packages: ``` pip install -r requirements_web_demo.txt ``` Then run the command below and click on the generated link: ```bash python web_demo.py ``` <p align="center"> <br> <img src="assets/web_demo.gif" width="600" /> <br> <p>
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "api", "provide", "method", "deploy", "local", "api", "based", "openai", "api", "(", "thanks", "@", "hanpenggit", ")", ".", "start", ",", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "fastapi", "uvicorn", "``", "openai", "<", "1.0", "''", "pydantic", "sse_starlette", "``", "`", "run", "command", "deploy", "api", ":", "``", "`", "bash", "python", "openai_api.py", "``", "`", "change", "argument", ",", "e.g.", ",", "`", "-c", "`", "checkpoint", "name", "path", ",", "`", "--", "cpu-only", "`", "cpu", "deployment", ",", "etc", ".", "meet", "problem", "launching", "api", "deployment", ",", "updating", "package", "latest", "version", "probably", "solve", ".", "using", "api", "also", "simple", ".", "see", "example", ":", "``", "`", "python", "import", "openai", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''" ], [ "api provide method deploy local api based openai api ( thanks @ hanpenggit ) .", "start , install required package : `` ` bash pip install fastapi uvicorn `` openai < 1.0 '' pydantic sse_starlette `` ` run command deploy api : `` ` bash python openai_api.py `` ` change argument , e.g. , ` -c ` checkpoint name path , ` -- cpu-only ` cpu deployment , etc .", "meet problem launching api deployment , updating package latest version probably solve .", "using api also simple .", "see example : `` ` python import openai openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none ''" ] ]
[ [ "api", "provide", "method", "deploy", "local", "api", "based", "openai", "api", "(", "thanks", "@", "hanpenggit", ")", ".", "start", ",", "install", "required", "package", ":", "``", "`", "bash", "pip", "install", "fastapi", "uvicorn", "``", "openai", "<", "1.0", "''", "pydantic", "sse_starlette", "``", "`", "run", "command", "deploy", "api", ":", "``", "`", "bash", "python", "openai_api.py", "``", "`", "change", "argument", ",", "e.g.", ",", "`", "-c", "`", "checkpoint", "name", "path", ",", "`", "--", "cpu-only", "`", "cpu", "deployment", ",", "etc", ".", "meet", "problem", "launching", "api", "deployment", ",", "updating", "package", "latest", "version", "probably", "solve", ".", "using", "api", "also", "simple", ".", "see", "example", ":", "``", "`", "python", "import", "openai", "openai.api_base", "=", "``", "http", ":", "//localhost:8000/v1", "''", "openai.api_key", "=", "``", "none", "''" ], [ "api provide method deploy local api based openai api ( thanks @ hanpenggit ) .", "start , install required package : `` ` bash pip install fastapi uvicorn `` openai < 1.0 '' pydantic sse_starlette `` ` run command deploy api : `` ` bash python openai_api.py `` ` change argument , e.g. , ` -c ` checkpoint name path , ` -- cpu-only ` cpu deployment , etc .", "meet problem launching api deployment , updating package latest version probably solve .", "using api also simple .", "see example : `` ` python import openai openai.api_base = `` http : //localhost:8000/v1 '' openai.api_key = `` none ''" ] ]
API We provide methods to deploy local API based on OpenAI API (thanks to @hanpenggit). Before you start, install the required packages: ```bash pip install fastapi uvicorn "openai<1.0" pydantic sse_starlette ``` Then run the command to deploy your API: ```bash python openai_api.py ``` You can change your arguments, e.g., `-c` for checkpoint name or path, `--cpu-only` for CPU deployment, etc. If you meet problems launching your API deployment, updating the packages to the latest version can probably solve them. Using the API is also simple. See the example below: ```python import openai openai.api_base = "http://localhost:8000/v1" openai.api_key = "none"
https://github.com/QwenLM/Qwen
0
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "🐳", "docker", "simplify", "deployment", "process", ",", "provide", "docker", "image", "pre-built", "environment", ":", "[", "qwenllm/qwen", "]", "(", "http", ":", "//hub.docker.com/r/qwenllm/qwen", ")", ".", "need", "install", "driver", "download", "model", "file", "launch", "demo", ",", "deploy", "openai", "api", ",", "finetune", "model", "." ], [ "🐳 docker simplify deployment process , provide docker image pre-built environment : [ qwenllm/qwen ] ( http : //hub.docker.com/r/qwenllm/qwen ) .", "need install driver download model file launch demo , deploy openai api , finetune model ." ] ]
[ [ "🐳", "docker", "simplify", "deployment", "process", ",", "provide", "docker", "image", "pre-built", "environment", ":", "[", "qwenllm/qwen", "]", "(", "http", ":", "//hub.docker.com/r/qwenllm/qwen", ")", ".", "need", "install", "driver", "download", "model", "file", "launch", "demo", ",", "deploy", "openai", "api", ",", "finetune", "model", "." ], [ "🐳 docker simplify deployment process , provide docker image pre-built environment : [ qwenllm/qwen ] ( http : //hub.docker.com/r/qwenllm/qwen ) .", "need install driver download model file launch demo , deploy openai api , finetune model ." ] ]
🐳 Docker To simplify the deployment process, we provide docker images with pre-built environments: [qwenllm/qwen](https://hub.docker.com/r/qwenllm/qwen). You only need to install the driver and download model files to launch demos, deploy OpenAI API, and finetune the model.
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "preparation", "1", ".", "install", "correct", "version", "nvidia", "driver", "depending", "image", "use", ":", "-", "`", "qwenllm/qwen", ":", "cu117", "`", "(", "*", "*", "recommend", "*", "*", ")", ":", "`", ">", "=", "515.48.07", "`", "-", "`", "qwenllm/qwen", ":", "cu114", "`", "(", "w/o", "flash-attention", ")", ":", "`", ">", "=", "470.82.01", "`", "-", "`", "qwenllm/qwen", ":", "cu121", "`", ":", "`", ">", "=", "530.30.02", "`", "-", "`", "qwenllm/qwen", ":", "latest", "`", ":", "`", "qwenllm/qwen", ":", "cu117", "`", "2", ".", "install", "configure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/engine/install/", ")", "[", "nvidia-container-toolkit", "]", "(", "http", ":", "//docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html", ")", ":", "``", "`", "bash" ], [ "preparation 1 .", "install correct version nvidia driver depending image use : - ` qwenllm/qwen : cu117 ` ( * * recommend * * ) : ` > = 515.48.07 ` - ` qwenllm/qwen : cu114 ` ( w/o flash-attention ) : ` > = 470.82.01 ` - ` qwenllm/qwen : cu121 ` : ` > = 530.30.02 ` - ` qwenllm/qwen : latest ` : ` qwenllm/qwen : cu117 ` 2 .", "install configure [ docker ] ( http : //docs.docker.com/engine/install/ ) [ nvidia-container-toolkit ] ( http : //docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html ) : `` ` bash" ] ]
[ [ "preparation", "1", ".", "install", "correct", "version", "nvidia", "driver", "depending", "image", "use", ":", "-", "`", "qwenllm/qwen", ":", "cu117", "`", "(", "*", "*", "recommend", "*", "*", ")", ":", "`", ">", "=", "515.48.07", "`", "-", "`", "qwenllm/qwen", ":", "cu114", "`", "(", "w/o", "flash-attention", ")", ":", "`", ">", "=", "470.82.01", "`", "-", "`", "qwenllm/qwen", ":", "cu121", "`", ":", "`", ">", "=", "530.30.02", "`", "-", "`", "qwenllm/qwen", ":", "latest", "`", ":", "`", "qwenllm/qwen", ":", "cu117", "`", "2", ".", "install", "configure", "[", "docker", "]", "(", "http", ":", "//docs.docker.com/engine/install/", ")", "[", "nvidia-container-toolkit", "]", "(", "http", ":", "//docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html", ")", ":", "``", "`", "bash" ], [ "preparation 1 .", "install correct version nvidia driver depending image use : - ` qwenllm/qwen : cu117 ` ( * * recommend * * ) : ` > = 515.48.07 ` - ` qwenllm/qwen : cu114 ` ( w/o flash-attention ) : ` > = 470.82.01 ` - ` qwenllm/qwen : cu121 ` : ` > = 530.30.02 ` - ` qwenllm/qwen : latest ` : ` qwenllm/qwen : cu117 ` 2 .", "install configure [ docker ] ( http : //docs.docker.com/engine/install/ ) [ nvidia-container-toolkit ] ( http : //docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html ) : `` ` bash" ] ]
Preparation 1. Install the correct version of Nvidia driver depending on the image to use: - `qwenllm/qwen:cu117` (**recommend**): `>= 515.48.07` - `qwenllm/qwen:cu114` (w/o flash-attention): `>= 470.82.01` - `qwenllm/qwen:cu121`: `>= 530.30.02` - `qwenllm/qwen:latest`: same as `qwenllm/qwen:cu117` 2. Install and configure [docker](https://docs.docker.com/engine/install/) and [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html): ```bash
https://github.com/QwenLM/Qwen
1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "test", "docker", "correctly", "installed", "sudo", "docker", "run", "hello-world" ], [ "test docker correctly installed sudo docker run hello-world" ] ]
[ [ "test", "docker", "correctly", "installed", "sudo", "docker", "run", "hello-world" ], [ "test docker correctly installed sudo docker run hello-world" ] ]
test if docker is correctly installed sudo docker run hello-world
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "test", "nvidia-container-toolkit", "correctly", "installed", "sudo", "docker", "run", "--", "rm", "--", "runtime=nvidia", "--", "gpus", "ubuntu", "nvidia-smi", "``", "`", "3", ".", "download", "model", "checkpoint", "code", "environment", "(", "see", "[", "]", "(", "#", "downloadmodel", ")", ")", "." ], [ "test nvidia-container-toolkit correctly installed sudo docker run -- rm -- runtime=nvidia -- gpus ubuntu nvidia-smi `` ` 3 .", "download model checkpoint code environment ( see [ ] ( # downloadmodel ) ) ." ] ]
[ [ "test", "nvidia-container-toolkit", "correctly", "installed", "sudo", "docker", "run", "--", "rm", "--", "runtime=nvidia", "--", "gpus", "ubuntu", "nvidia-smi", "``", "`", "3", ".", "download", "model", "checkpoint", "code", "environment", "(", "see", "[", "]", "(", "#", "downloadmodel", ")", ")", "." ], [ "test nvidia-container-toolkit correctly installed sudo docker run -- rm -- runtime=nvidia -- gpus ubuntu nvidia-smi `` ` 3 .", "download model checkpoint code environment ( see [ ] ( # downloadmodel ) ) ." ] ]
test if nvidia-container-toolkit is correctly installed sudo docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi ``` 3. Download model checkpoints and codes to your environment (see [here](#DownloadModel)).
https://github.com/QwenLM/Qwen
1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "deployment", "use", "qwen-7b-chat", "example", ".", "launching", "web", "demo", "api", ",", "setup", "configuration", "shown", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "port=8901", "checkpoint_path=/path/to/qwen-7b-chat" ], [ "deployment use qwen-7b-chat example .", "launching web demo api , setup configuration shown : `` ` bash image_name=qwenllm/qwen : cu117 port=8901 checkpoint_path=/path/to/qwen-7b-chat" ] ]
[ [ "deployment", "use", "qwen-7b-chat", "example", ".", "launching", "web", "demo", "api", ",", "setup", "configuration", "shown", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "port=8901", "checkpoint_path=/path/to/qwen-7b-chat" ], [ "deployment use qwen-7b-chat example .", "launching web demo api , setup configuration shown : `` ` bash image_name=qwenllm/qwen : cu117 port=8901 checkpoint_path=/path/to/qwen-7b-chat" ] ]
Deployment Here we use Qwen-7B-Chat as an example. Before launching a web demo or API, you can setup the configuration as shown below: ```bash IMAGE_NAME=qwenllm/qwen:cu117 PORT=8901 CHECKPOINT_PATH=/path/to/Qwen-7B-Chat
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "finetuning", "method", "finetuning", "using", "pre-built", "docker", "image", "basically", "[", "chapter", "]", "(", "#", "finetuning", ")", "(", "already", "installed", "dependency", "image", ")", ":", "following", "example", "single-gpu", "lora", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "checkpoint_path=/path/to/qwen-7b" ], [ "finetuning method finetuning using pre-built docker image basically [ chapter ] ( # finetuning ) ( already installed dependency image ) : following example single-gpu lora : `` ` bash image_name=qwenllm/qwen : cu117 checkpoint_path=/path/to/qwen-7b" ] ]
[ [ "finetuning", "method", "finetuning", "using", "pre-built", "docker", "image", "basically", "[", "chapter", "]", "(", "#", "finetuning", ")", "(", "already", "installed", "dependency", "image", ")", ":", "following", "example", "single-gpu", "lora", ":", "``", "`", "bash", "image_name=qwenllm/qwen", ":", "cu117", "checkpoint_path=/path/to/qwen-7b" ], [ "finetuning method finetuning using pre-built docker image basically [ chapter ] ( # finetuning ) ( already installed dependency image ) : following example single-gpu lora : `` ` bash image_name=qwenllm/qwen : cu117 checkpoint_path=/path/to/qwen-7b" ] ]
Finetuning The method of finetuning using the pre-built Docker image is basically the same as [the above chapter](#Finetuning) (we have already installed dependencies in the image): The following is an example of single-GPU LoRA: ```bash IMAGE_NAME=qwenllm/qwen:cu117 CHECKPOINT_PATH=/path/to/Qwen-7B
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/QwenLM/Qwen/main/README.md
[ [ "tool", "usage", "qwen-chat", "optimized", "tool", "usage", "function", "calling", "capability", ".", "user", "develop", "agent", ",", "langchain", "application", ",", "even", "augment", "qwen", "python", "code", "interpreter", ".", "provide", "documentation", "implement", "tool", "call", "based", "principle", "react", "prompting", ",", "please", "refer", "[", "react", "example", "]", "(", "examples/react_prompt.md", ")", ".", "based", "principle", ",", "provide", "support", "function", "calling", "[", "openai_api.py", "]", "(", "openai_api.py", ")", ".", "tested", "model", "'s", "tool", "calling", "capability", "open-source", "chinese", "evaluation", "benchmark", "found", "qwen-chat", "consistently", "performs", "well", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "4", "''", "align=", "''", "center", "''", ">", "chinese", "tool-use", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "selection", "(", "acc.↑", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "input", "(", "rouge-l↑", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "false", "positive", "error↓", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.953", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.9", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.807", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "80.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1_8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "85.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.839", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.900", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "96.9", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.917", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.2", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.927", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.1", "%", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "ass", "qwen", "'s", "ability", "use", "python", "code", "interpreter", "task", "mathematical", "problem", "solving", ",", "data", "visualization", ",", "general-purpose", "task", "file", "handling", "web", "scraping", ",", "created", "open-sourced", "benchmark", "specifically", "designed", "evaluating", "capability", ".", "find", "benchmark", "[", "link", "]", "(", "http", ":", "//github.com/qwenlm/qwen-agent/tree/main/benchmark", ")", ".", "observed", "qwen", "performs", "well", "term", "code", "executability", "result", "accuracy", "generating", "code", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "5", "''", "align=", "''", "center", "''", ">", "code", "interpreter", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "colspan=", "''", "3", "''", "align=", "''", "center", "''", ">", "accuracy", "code", "execution", "result", "(", "%", ")", "<", "/th", ">", "<", "th", "colspan=", "''", "1", "''", "align=", "''", "center", "''", ">", "executable", "rate", "code", "(", "%", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "math↑", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-hard↑", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-easy↑", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "general↑", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "66.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "60.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "llama2-13b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "codellama-13b-instruct", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "28.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "internlm-20b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm3-6b", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "4.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1.8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "22.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.9", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "38.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.2", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "58.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "31.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "45.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "72.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/code_interpreter_showcase_001.jpg", "''", "/", ">", "<", "br", ">", "<", "p", ">", "<", "br", ">" ], [ "tool usage qwen-chat optimized tool usage function calling capability .", "user develop agent , langchain application , even augment qwen python code interpreter .", "provide documentation implement tool call based principle react prompting , please refer [ react example ] ( examples/react_prompt.md ) .", "based principle , provide support function calling [ openai_api.py ] ( openai_api.py ) .", "tested model 's tool calling capability open-source chinese evaluation benchmark found qwen-chat consistently performs well : < table > < tr > < th colspan= '' 4 '' align= '' center '' > chinese tool-use benchmark ( version 20231206 ) < /th > < /tr > < tr > < th align= '' center '' > model < /th > < th align= '' center '' > tool selection ( acc.↑ ) < /th > < th align= '' center '' > tool input ( rouge-l↑ ) < /th > < th align= '' center '' > false positive error↓ < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 98.0 % < /td > < td align= '' center '' > 0.953 < /td > < td align= '' center '' > 23.9 % < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 74.5 % < /td > < td align= '' center '' > 0.807 < /td > < td align= '' center '' > 80.6 % < /td > < /tr > < tr > < td > qwen-1_8b-chat < /td > < td align= '' center '' > 85.0 % < /td > < td align= '' center '' > 0.839 < /td > < td align= '' center '' > 27.6 % < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 95.5 % < /td > < td align= '' center '' > 0.900 < /td > < td align= '' center '' > 11.6 % < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 96.9 % < /td > < td align= '' center '' > 0.917 < /td > < td align= '' center '' > 5.6 % < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 98.2 % < /td > < td align= '' center '' > 0.927 < /td > < td align= '' center '' > 1.1 % < /td > < /tr > < /table > ass qwen 's ability use python code interpreter task mathematical problem solving , data visualization , general-purpose task file handling web scraping , created open-sourced benchmark specifically designed evaluating capability .", "find benchmark [ link ] ( http : //github.com/qwenlm/qwen-agent/tree/main/benchmark ) .", "observed qwen performs well term code executability result accuracy generating code : < table > < tr > < th colspan= '' 5 '' align= '' center '' > code interpreter benchmark ( version 20231206 ) < /th > < /tr > < tr > < th rowspan= '' 2 '' align= '' center '' > model < /th > < th colspan= '' 3 '' align= '' center '' > accuracy code execution result ( % ) < /th > < th colspan= '' 1 '' align= '' center '' > executable rate code ( % ) < /th > < /tr > < tr > < th align= '' center '' > math↑ < /th > < th align= '' center '' > visualization-hard↑ < /th > < th align= '' center '' > visualization-easy↑ < /th > < th align= '' center '' > general↑ < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 82.8 < /td > < td align= '' center '' > 66.7 < /td > < td align= '' center '' > 60.8 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 47.3 < /td > < td align= '' center '' > 33.3 < /td > < td align= '' center '' > 55.7 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > llama2-13b-chat < /td > < td align= '' center '' > 8.3 < /td > < td align= '' center '' > 1.2 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 48.3 < /td > < /tr > < tr > < td > codellama-13b-instruct < /td > < td align= '' center '' > 28.2 < /td > < td align= '' center '' > 15.5 < /td > < td align= '' center '' > 21.5 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > internlm-20b-chat < /td > < td align= '' center '' > 34.6 < /td > < td align= '' center '' > 10.7 < /td > < td align= '' center '' > 25.1 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > chatglm3-6b < /td > < td align= '' center '' > 54.2 < /td > < td align= '' center '' > 4.8 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 67.1 < /td > < /tr > < tr > < td > qwen-1.8b-chat < /td > < td align= '' center '' > 25.6 < /td > < td align= '' center '' > 21.4 < /td > < td align= '' center '' > 22.8 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 41.9 < /td > < td align= '' center '' > 23.8 < /td > < td align= '' center '' > 38.0 < /td > < td align= '' center '' > 67.2 < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 58.4 < /td > < td align= '' center '' > 31.0 < /td > < td align= '' center '' > 45.6 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 72.7 < /td > < td align= '' center '' > 41.7 < /td > < td align= '' center '' > 43.0 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < /table > < p align= '' center '' > < br > < img src= '' assets/code_interpreter_showcase_001.jpg '' / > < br > < p > < br >" ] ]
[ [ "tool", "usage", "qwen-chat", "optimized", "tool", "usage", "function", "calling", "capability", ".", "user", "develop", "agent", ",", "langchain", "application", ",", "even", "augment", "qwen", "python", "code", "interpreter", ".", "provide", "documentation", "implement", "tool", "call", "based", "principle", "react", "prompting", ",", "please", "refer", "[", "react", "example", "]", "(", "examples/react_prompt.md", ")", ".", "based", "principle", ",", "provide", "support", "function", "calling", "[", "openai_api.py", "]", "(", "openai_api.py", ")", ".", "tested", "model", "'s", "tool", "calling", "capability", "open-source", "chinese", "evaluation", "benchmark", "found", "qwen-chat", "consistently", "performs", "well", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "4", "''", "align=", "''", "center", "''", ">", "chinese", "tool-use", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "selection", "(", "acc.↑", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "tool", "input", "(", "rouge-l↑", ")", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "false", "positive", "error↓", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.953", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.9", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.807", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "80.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1_8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "85.0", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.839", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "27.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "95.5", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.900", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "11.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "96.9", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.917", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "5.6", "%", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "98.2", "%", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "0.927", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.1", "%", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "ass", "qwen", "'s", "ability", "use", "python", "code", "interpreter", "task", "mathematical", "problem", "solving", ",", "data", "visualization", ",", "general-purpose", "task", "file", "handling", "web", "scraping", ",", "created", "open-sourced", "benchmark", "specifically", "designed", "evaluating", "capability", ".", "find", "benchmark", "[", "link", "]", "(", "http", ":", "//github.com/qwenlm/qwen-agent/tree/main/benchmark", ")", ".", "observed", "qwen", "performs", "well", "term", "code", "executability", "result", "accuracy", "generating", "code", ":", "<", "table", ">", "<", "tr", ">", "<", "th", "colspan=", "''", "5", "''", "align=", "''", "center", "''", ">", "code", "interpreter", "benchmark", "(", "version", "20231206", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "rowspan=", "''", "2", "''", "align=", "''", "center", "''", ">", "model", "<", "/th", ">", "<", "th", "colspan=", "''", "3", "''", "align=", "''", "center", "''", ">", "accuracy", "code", "execution", "result", "(", "%", ")", "<", "/th", ">", "<", "th", "colspan=", "''", "1", "''", "align=", "''", "center", "''", ">", "executable", "rate", "code", "(", "%", ")", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "th", "align=", "''", "center", "''", ">", "math↑", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-hard↑", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "visualization-easy↑", "<", "/th", ">", "<", "th", "align=", "''", "center", "''", ">", "general↑", "<", "/th", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "66.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "60.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "gpt-3.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "47.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "33.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "55.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "llama2-13b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "8.3", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "1.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "48.3", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "codellama-13b-instruct", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "28.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.5", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "74.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "internlm-20b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "34.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "10.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.1", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "chatglm3-6b", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "54.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "4.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "15.2", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.1", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-1.8b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "25.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "21.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "22.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-7b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.9", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "23.8", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "38.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "67.2", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-14b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "58.4", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "31.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "45.6", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "65.5", "<", "/td", ">", "<", "/tr", ">", "<", "tr", ">", "<", "td", ">", "qwen-72b-chat", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "72.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "41.7", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "43.0", "<", "/td", ">", "<", "td", "align=", "''", "center", "''", ">", "82.8", "<", "/td", ">", "<", "/tr", ">", "<", "/table", ">", "<", "p", "align=", "''", "center", "''", ">", "<", "br", ">", "<", "img", "src=", "''", "assets/code_interpreter_showcase_001.jpg", "''", "/", ">", "<", "br", ">", "<", "p", ">", "<", "br", ">" ], [ "tool usage qwen-chat optimized tool usage function calling capability .", "user develop agent , langchain application , even augment qwen python code interpreter .", "provide documentation implement tool call based principle react prompting , please refer [ react example ] ( examples/react_prompt.md ) .", "based principle , provide support function calling [ openai_api.py ] ( openai_api.py ) .", "tested model 's tool calling capability open-source chinese evaluation benchmark found qwen-chat consistently performs well : < table > < tr > < th colspan= '' 4 '' align= '' center '' > chinese tool-use benchmark ( version 20231206 ) < /th > < /tr > < tr > < th align= '' center '' > model < /th > < th align= '' center '' > tool selection ( acc.↑ ) < /th > < th align= '' center '' > tool input ( rouge-l↑ ) < /th > < th align= '' center '' > false positive error↓ < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 98.0 % < /td > < td align= '' center '' > 0.953 < /td > < td align= '' center '' > 23.9 % < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 74.5 % < /td > < td align= '' center '' > 0.807 < /td > < td align= '' center '' > 80.6 % < /td > < /tr > < tr > < td > qwen-1_8b-chat < /td > < td align= '' center '' > 85.0 % < /td > < td align= '' center '' > 0.839 < /td > < td align= '' center '' > 27.6 % < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 95.5 % < /td > < td align= '' center '' > 0.900 < /td > < td align= '' center '' > 11.6 % < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 96.9 % < /td > < td align= '' center '' > 0.917 < /td > < td align= '' center '' > 5.6 % < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 98.2 % < /td > < td align= '' center '' > 0.927 < /td > < td align= '' center '' > 1.1 % < /td > < /tr > < /table > ass qwen 's ability use python code interpreter task mathematical problem solving , data visualization , general-purpose task file handling web scraping , created open-sourced benchmark specifically designed evaluating capability .", "find benchmark [ link ] ( http : //github.com/qwenlm/qwen-agent/tree/main/benchmark ) .", "observed qwen performs well term code executability result accuracy generating code : < table > < tr > < th colspan= '' 5 '' align= '' center '' > code interpreter benchmark ( version 20231206 ) < /th > < /tr > < tr > < th rowspan= '' 2 '' align= '' center '' > model < /th > < th colspan= '' 3 '' align= '' center '' > accuracy code execution result ( % ) < /th > < th colspan= '' 1 '' align= '' center '' > executable rate code ( % ) < /th > < /tr > < tr > < th align= '' center '' > math↑ < /th > < th align= '' center '' > visualization-hard↑ < /th > < th align= '' center '' > visualization-easy↑ < /th > < th align= '' center '' > general↑ < /th > < /tr > < tr > < td > gpt-4 < /td > < td align= '' center '' > 82.8 < /td > < td align= '' center '' > 66.7 < /td > < td align= '' center '' > 60.8 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < tr > < td > gpt-3.5 < /td > < td align= '' center '' > 47.3 < /td > < td align= '' center '' > 33.3 < /td > < td align= '' center '' > 55.7 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > llama2-13b-chat < /td > < td align= '' center '' > 8.3 < /td > < td align= '' center '' > 1.2 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 48.3 < /td > < /tr > < tr > < td > codellama-13b-instruct < /td > < td align= '' center '' > 28.2 < /td > < td align= '' center '' > 15.5 < /td > < td align= '' center '' > 21.5 < /td > < td align= '' center '' > 74.1 < /td > < /tr > < tr > < td > internlm-20b-chat < /td > < td align= '' center '' > 34.6 < /td > < td align= '' center '' > 10.7 < /td > < td align= '' center '' > 25.1 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > chatglm3-6b < /td > < td align= '' center '' > 54.2 < /td > < td align= '' center '' > 4.8 < /td > < td align= '' center '' > 15.2 < /td > < td align= '' center '' > 67.1 < /td > < /tr > < tr > < td > qwen-1.8b-chat < /td > < td align= '' center '' > 25.6 < /td > < td align= '' center '' > 21.4 < /td > < td align= '' center '' > 22.8 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-7b-chat < /td > < td align= '' center '' > 41.9 < /td > < td align= '' center '' > 23.8 < /td > < td align= '' center '' > 38.0 < /td > < td align= '' center '' > 67.2 < /td > < /tr > < tr > < td > qwen-14b-chat < /td > < td align= '' center '' > 58.4 < /td > < td align= '' center '' > 31.0 < /td > < td align= '' center '' > 45.6 < /td > < td align= '' center '' > 65.5 < /td > < /tr > < tr > < td > qwen-72b-chat < /td > < td align= '' center '' > 72.7 < /td > < td align= '' center '' > 41.7 < /td > < td align= '' center '' > 43.0 < /td > < td align= '' center '' > 82.8 < /td > < /tr > < /table > < p align= '' center '' > < br > < img src= '' assets/code_interpreter_showcase_001.jpg '' / > < br > < p > < br >" ] ]
Tool Usage Qwen-Chat has been optimized for tool usage and function calling capabilities. Users can develop agents, LangChain applications, and even augment Qwen with a Python Code Interpreter. We provide documentation on how to implement tool calls based on the principle of ReAct Prompting, please refer to [the ReAct example](examples/react_prompt.md). Based on this principle, we provide support for function calling in [openai_api.py](openai_api.py). We have tested the model's tool calling capabilities on our open-source Chinese evaluation benchmark and found that Qwen-Chat consistently performs well: <table> <tr> <th colspan="4" align="center">Chinese Tool-Use Benchmark (Version 20231206)</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection (Acc.↑)</th><th align="center">Tool Input (Rouge-L↑)</th><th align="center">False Positive Error↓</th> </tr> <tr> <td>GPT-4</td><td align="center">98.0%</td><td align="center">0.953</td><td align="center">23.9%</td> </tr> <tr> <td>GPT-3.5</td><td align="center">74.5%</td><td align="center">0.807</td><td align="center">80.6%</td> </tr> <tr> <td>Qwen-1_8B-Chat</td><td align="center">85.0%</td><td align="center">0.839</td><td align="center">27.6%</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">95.5%</td><td align="center">0.900</td><td align="center">11.6%</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">96.9%</td><td align="center">0.917</td><td align="center">5.6%</td> </tr> <tr> <td>Qwen-72B-Chat</td><td align="center">98.2%</td><td align="center">0.927</td><td align="center">1.1%</td> </tr> </table> To assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark). We have observed that Qwen performs well in terms of code executability and result accuracy when generating code: <table> <tr> <th colspan="5" align="center">Code Interpreter Benchmark (Version 20231206)</th> </tr> <tr> <th rowspan="2" align="center">Model</th> <th colspan="3" align="center">Accuracy of Code Execution Results (%)</th> <th colspan="1" align="center">Executable Rate of Code (%)</th> </tr> <tr> <th align="center">Math↑</th><th align="center">Visualization-Hard↑</th><th align="center">Visualization-Easy↑</th><th align="center">General↑</th> </tr> <tr> <td>GPT-4</td> <td align="center">82.8</td> <td align="center">66.7</td> <td align="center">60.8</td> <td align="center">82.8</td> </tr> <tr> <td>GPT-3.5</td> <td align="center">47.3</td> <td align="center">33.3</td> <td align="center">55.7</td> <td align="center">74.1</td> </tr> <tr> <td>LLaMA2-13B-Chat</td> <td align="center">8.3</td> <td align="center">1.2</td> <td align="center">15.2</td> <td align="center">48.3</td> </tr> <tr> <td>CodeLLaMA-13B-Instruct</td> <td align="center">28.2</td> <td align="center">15.5</td> <td align="center">21.5</td> <td align="center">74.1</td> </tr> <tr> <td>InternLM-20B-Chat</td> <td align="center">34.6</td> <td align="center">10.7</td> <td align="center">25.1</td> <td align="center">65.5</td> </tr> <tr> <td>ChatGLM3-6B</td> <td align="center">54.2</td> <td align="center">4.8</td> <td align="center">15.2</td> <td align="center">67.1</td> </tr> <tr> <td>Qwen-1.8B-Chat</td> <td align="center">25.6</td> <td align="center">21.4</td> <td align="center">22.8</td> <td align="center">65.5</td> </tr> <tr> <td>Qwen-7B-Chat</td> <td align="center">41.9</td> <td align="center">23.8</td> <td align="center">38.0</td> <td align="center">67.2</td> </tr> <tr> <td>Qwen-14B-Chat</td> <td align="center">58.4</td> <td align="center">31.0</td> <td align="center">45.6</td> <td align="center">65.5</td> </tr> <tr> <td>Qwen-72B-Chat</td> <td align="center">72.7</td> <td align="center">41.7</td> <td align="center">43.0</td> <td align="center">82.8</td> </tr> </table> <p align="center"> <br> <img src="assets/code_interpreter_showcase_001.jpg" /> <br> <p> <br>
https://github.com/QwenLM/Qwen
-1
[ "chinese", "flash-attention", "large-language-models", "llm", "natural-language-processing", "pretrained-models" ]
https://raw.githubusercontent.com/rasbt/LLMs-from-scratch/main/README.md
[ [ "table", "content", "please", "note", "`", "readme.md", "`", "file", "markdown", "(", "`", ".md", "`", ")", "file", ".", "downloaded", "code", "bundle", "manning", "website", "viewing", "local", "computer", ",", "recommend", "using", "markdown", "editor", "previewer", "proper", "viewing", ".", "n't", "installed", "markdown", "editor", "yet", ",", "[", "marktext", "]", "(", "http", ":", "//www.marktext.cc", ")", "good", "free", "option", ".", "alternatively", ",", "view", "file", "github", "[", "http", ":", "//github.com/rasbt/llms-from-scratch", "]", "(", "http", ":", "//github.com/rasbt/llms-from-scratch", ")", ".", "<", "br", ">", "<", "br", ">", "|", "chapter", "title", "|", "main", "code", "(", "quick", "access", ")", "|", "code", "+", "supplementary", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "|", "ch", "1", ":", "understanding", "large", "language", "model", "|", "code", "|", "code", "|", "|", "ch", "2", ":", "working", "text", "data", "|", "-", "[", "ch02.ipynb", "]", "(", "ch02/01_main-chapter-code/ch02.ipynb", ")", "<", "br/", ">", "-", "[", "dataloader.ipynb", "]", "(", "ch02/01_main-chapter-code/dataloader.ipynb", ")", "(", "summary", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "ch02/01_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./ch02", "]", "(", "./ch02", ")", "|", "|", "ch", "3", ":", "coding", "attention", "mechanism", "|", "-", "[", "ch03.ipynb", "]", "(", "ch03/01_main-chapter-code/ch03.ipynb", ")", "<", "br/", ">", "-", "[", "multihead-attention.ipynb", "]", "(", "ch03/01_main-chapter-code/multihead-attention.ipynb", ")", "(", "summary", ")", "|", "[", "./ch03", "]", "(", "./ch03", ")", "|", "|", "ch", "4", ":", "implementing", "gpt", "model", "scratch", "|", "-", "[", "ch04.ipynb", "]", "(", "ch04/01_main-chapter-code/ch04.ipynb", ")", "<", "br/", ">", "-", "[", "gpt.py", "]", "(", "ch04/01_main-chapter-code/gpt.py", ")", "(", "summary", ")", "|", "[", "./ch04", "]", "(", "./ch04", ")", "|", "|", "ch", "5", ":", "pretraining", "unlabeled", "data", "|", "q1", "2024", "|", "...", "|", "|", "ch", "6", ":", "finetuning", "text", "classification", "|", "q2", "2024", "|", "...", "|", "|", "ch", "7", ":", "finetuning", "human", "feedback", "|", "q2", "2024", "|", "...", "|", "|", "ch", "8", ":", "using", "large", "language", "model", "practice", "|", "q2/3", "2024", "|", "...", "|", "|", "appendix", ":", "introduction", "pytorch", "*", "|", "-", "[", "code-part1.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part1.ipynb", ")", "<", "br/", ">", "-", "[", "code-part2.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part2.ipynb", ")", "<", "br/", ">", "-", "[", "ddp-script.py", "]", "(", "appendix-a/03_main-chapter-code/ddp-script.py", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "appendix-a/03_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./appendix-a", "]", "(", "./appendix-a", ")", "|", "(", "*", "please", "see", "[", "]", "(", "appendix-a/01_optional-python-setup-preferences", ")", "[", "]", "(", "appendix-a/02_installing-python-libraries", ")", "folder", "need", "guidance", "installing", "python", "python", "package", ".", ")", "<", "br", ">", "<", "br", ">", "<", "img", "src=", "''", "images/mental-model.jpg", "''", "width=", "''", "600px", "''", ">", "(", "mental", "model", "summarizing", "content", "covered", "book", ".", ")" ], [ "table content please note ` readme.md ` file markdown ( ` .md ` ) file .", "downloaded code bundle manning website viewing local computer , recommend using markdown editor previewer proper viewing .", "n't installed markdown editor yet , [ marktext ] ( http : //www.marktext.cc ) good free option .", "alternatively , view file github [ http : //github.com/rasbt/llms-from-scratch ] ( http : //github.com/rasbt/llms-from-scratch ) .", "< br > < br > | chapter title | main code ( quick access ) | code + supplementary | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| | ch 1 : understanding large language model | code | code | | ch 2 : working text data | - [ ch02.ipynb ] ( ch02/01_main-chapter-code/ch02.ipynb ) < br/ > - [ dataloader.ipynb ] ( ch02/01_main-chapter-code/dataloader.ipynb ) ( summary ) < br/ > - [ exercise-solutions.ipynb ] ( ch02/01_main-chapter-code/exercise-solutions.ipynb ) | [ ./ch02 ] ( ./ch02 ) | | ch 3 : coding attention mechanism | - [ ch03.ipynb ] ( ch03/01_main-chapter-code/ch03.ipynb ) < br/ > - [ multihead-attention.ipynb ] ( ch03/01_main-chapter-code/multihead-attention.ipynb ) ( summary ) | [ ./ch03 ] ( ./ch03 ) | | ch 4 : implementing gpt model scratch | - [ ch04.ipynb ] ( ch04/01_main-chapter-code/ch04.ipynb ) < br/ > - [ gpt.py ] ( ch04/01_main-chapter-code/gpt.py ) ( summary ) | [ ./ch04 ] ( ./ch04 ) | | ch 5 : pretraining unlabeled data | q1 2024 | ... | | ch 6 : finetuning text classification | q2 2024 | ... | | ch 7 : finetuning human feedback | q2 2024 | ... | | ch 8 : using large language model practice | q2/3 2024 | ... | | appendix : introduction pytorch * | - [ code-part1.ipynb ] ( appendix-a/03_main-chapter-code/code-part1.ipynb ) < br/ > - [ code-part2.ipynb ] ( appendix-a/03_main-chapter-code/code-part2.ipynb ) < br/ > - [ ddp-script.py ] ( appendix-a/03_main-chapter-code/ddp-script.py ) < br/ > - [ exercise-solutions.ipynb ] ( appendix-a/03_main-chapter-code/exercise-solutions.ipynb ) | [ ./appendix-a ] ( ./appendix-a ) | ( * please see [ ] ( appendix-a/01_optional-python-setup-preferences ) [ ] ( appendix-a/02_installing-python-libraries ) folder need guidance installing python python package . )", "< br > < br > < img src= '' images/mental-model.jpg '' width= '' 600px '' > ( mental model summarizing content covered book . )" ] ]
[ [ "table", "content", "please", "note", "`", "readme.md", "`", "file", "markdown", "(", "`", ".md", "`", ")", "file", ".", "downloaded", "code", "bundle", "manning", "website", "viewing", "local", "computer", ",", "recommend", "using", "markdown", "editor", "previewer", "proper", "viewing", ".", "n't", "installed", "markdown", "editor", "yet", ",", "[", "marktext", "]", "(", "http", ":", "//www.marktext.cc", ")", "good", "free", "option", ".", "alternatively", ",", "view", "file", "github", "[", "http", ":", "//github.com/rasbt/llms-from-scratch", "]", "(", "http", ":", "//github.com/rasbt/llms-from-scratch", ")", ".", "<", "br", ">", "<", "br", ">", "|", "chapter", "title", "|", "main", "code", "(", "quick", "access", ")", "|", "code", "+", "supplementary", "|", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "--", "-|", "|", "ch", "1", ":", "understanding", "large", "language", "model", "|", "code", "|", "code", "|", "|", "ch", "2", ":", "working", "text", "data", "|", "-", "[", "ch02.ipynb", "]", "(", "ch02/01_main-chapter-code/ch02.ipynb", ")", "<", "br/", ">", "-", "[", "dataloader.ipynb", "]", "(", "ch02/01_main-chapter-code/dataloader.ipynb", ")", "(", "summary", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "ch02/01_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./ch02", "]", "(", "./ch02", ")", "|", "|", "ch", "3", ":", "coding", "attention", "mechanism", "|", "-", "[", "ch03.ipynb", "]", "(", "ch03/01_main-chapter-code/ch03.ipynb", ")", "<", "br/", ">", "-", "[", "multihead-attention.ipynb", "]", "(", "ch03/01_main-chapter-code/multihead-attention.ipynb", ")", "(", "summary", ")", "|", "[", "./ch03", "]", "(", "./ch03", ")", "|", "|", "ch", "4", ":", "implementing", "gpt", "model", "scratch", "|", "-", "[", "ch04.ipynb", "]", "(", "ch04/01_main-chapter-code/ch04.ipynb", ")", "<", "br/", ">", "-", "[", "gpt.py", "]", "(", "ch04/01_main-chapter-code/gpt.py", ")", "(", "summary", ")", "|", "[", "./ch04", "]", "(", "./ch04", ")", "|", "|", "ch", "5", ":", "pretraining", "unlabeled", "data", "|", "q1", "2024", "|", "...", "|", "|", "ch", "6", ":", "finetuning", "text", "classification", "|", "q2", "2024", "|", "...", "|", "|", "ch", "7", ":", "finetuning", "human", "feedback", "|", "q2", "2024", "|", "...", "|", "|", "ch", "8", ":", "using", "large", "language", "model", "practice", "|", "q2/3", "2024", "|", "...", "|", "|", "appendix", ":", "introduction", "pytorch", "*", "|", "-", "[", "code-part1.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part1.ipynb", ")", "<", "br/", ">", "-", "[", "code-part2.ipynb", "]", "(", "appendix-a/03_main-chapter-code/code-part2.ipynb", ")", "<", "br/", ">", "-", "[", "ddp-script.py", "]", "(", "appendix-a/03_main-chapter-code/ddp-script.py", ")", "<", "br/", ">", "-", "[", "exercise-solutions.ipynb", "]", "(", "appendix-a/03_main-chapter-code/exercise-solutions.ipynb", ")", "|", "[", "./appendix-a", "]", "(", "./appendix-a", ")", "|", "(", "*", "please", "see", "[", "]", "(", "appendix-a/01_optional-python-setup-preferences", ")", "[", "]", "(", "appendix-a/02_installing-python-libraries", ")", "folder", "need", "guidance", "installing", "python", "python", "package", ".", ")", "<", "br", ">", "<", "br", ">", "<", "img", "src=", "''", "images/mental-model.jpg", "''", "width=", "''", "600px", "''", ">", "(", "mental", "model", "summarizing", "content", "covered", "book", ".", ")" ], [ "table content please note ` readme.md ` file markdown ( ` .md ` ) file .", "downloaded code bundle manning website viewing local computer , recommend using markdown editor previewer proper viewing .", "n't installed markdown editor yet , [ marktext ] ( http : //www.marktext.cc ) good free option .", "alternatively , view file github [ http : //github.com/rasbt/llms-from-scratch ] ( http : //github.com/rasbt/llms-from-scratch ) .", "< br > < br > | chapter title | main code ( quick access ) | code + supplementary | | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- | -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -| | ch 1 : understanding large language model | code | code | | ch 2 : working text data | - [ ch02.ipynb ] ( ch02/01_main-chapter-code/ch02.ipynb ) < br/ > - [ dataloader.ipynb ] ( ch02/01_main-chapter-code/dataloader.ipynb ) ( summary ) < br/ > - [ exercise-solutions.ipynb ] ( ch02/01_main-chapter-code/exercise-solutions.ipynb ) | [ ./ch02 ] ( ./ch02 ) | | ch 3 : coding attention mechanism | - [ ch03.ipynb ] ( ch03/01_main-chapter-code/ch03.ipynb ) < br/ > - [ multihead-attention.ipynb ] ( ch03/01_main-chapter-code/multihead-attention.ipynb ) ( summary ) | [ ./ch03 ] ( ./ch03 ) | | ch 4 : implementing gpt model scratch | - [ ch04.ipynb ] ( ch04/01_main-chapter-code/ch04.ipynb ) < br/ > - [ gpt.py ] ( ch04/01_main-chapter-code/gpt.py ) ( summary ) | [ ./ch04 ] ( ./ch04 ) | | ch 5 : pretraining unlabeled data | q1 2024 | ... | | ch 6 : finetuning text classification | q2 2024 | ... | | ch 7 : finetuning human feedback | q2 2024 | ... | | ch 8 : using large language model practice | q2/3 2024 | ... | | appendix : introduction pytorch * | - [ code-part1.ipynb ] ( appendix-a/03_main-chapter-code/code-part1.ipynb ) < br/ > - [ code-part2.ipynb ] ( appendix-a/03_main-chapter-code/code-part2.ipynb ) < br/ > - [ ddp-script.py ] ( appendix-a/03_main-chapter-code/ddp-script.py ) < br/ > - [ exercise-solutions.ipynb ] ( appendix-a/03_main-chapter-code/exercise-solutions.ipynb ) | [ ./appendix-a ] ( ./appendix-a ) | ( * please see [ ] ( appendix-a/01_optional-python-setup-preferences ) [ ] ( appendix-a/02_installing-python-libraries ) folder need guidance installing python python package . )", "< br > < br > < img src= '' images/mental-model.jpg '' width= '' 600px '' > ( mental model summarizing content covered book . )" ] ]
Table of Contents Please note that the `Readme.md` file is a Markdown (`.md`) file. If you have downloaded this code bundle from the Manning website and are viewing it on your local computer, I recommend using a Markdown editor or previewer for proper viewing. If you haven't installed a Markdown editor yet, [MarkText](https://www.marktext.cc) is a good free option. Alternatively, you can view this and other files on GitHub at [https://github.com/rasbt/LLMs-from-scratch](https://github.com/rasbt/LLMs-from-scratch). <br> <br> | Chapter Title | Main Code (for quick access) | All Code + Supplementary | |------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------------------| | Ch 1: Understanding Large Language Models | No code | No code | | Ch 2: Working with Text Data | - [ch02.ipynb](ch02/01_main-chapter-code/ch02.ipynb)<br/>- [dataloader.ipynb](ch02/01_main-chapter-code/dataloader.ipynb) (summary)<br/>- [exercise-solutions.ipynb](ch02/01_main-chapter-code/exercise-solutions.ipynb) | [./ch02](./ch02) | | Ch 3: Coding Attention Mechanisms | - [ch03.ipynb](ch03/01_main-chapter-code/ch03.ipynb)<br/>- [multihead-attention.ipynb](ch03/01_main-chapter-code/multihead-attention.ipynb) (summary) | [./ch03](./ch03) | | Ch 4: Implementing a GPT Model from Scratch | - [ch04.ipynb](ch04/01_main-chapter-code/ch04.ipynb)<br/>- [gpt.py](ch04/01_main-chapter-code/gpt.py) (summary) | [./ch04](./ch04) | | Ch 5: Pretraining on Unlabeled Data | Q1 2024 | ... | | Ch 6: Finetuning for Text Classification | Q2 2024 | ... | | Ch 7: Finetuning with Human Feedback | Q2 2024 | ... | | Ch 8: Using Large Language Models in Practice | Q2/3 2024 | ... | | Appendix A: Introduction to PyTorch* | - [code-part1.ipynb](appendix-A/03_main-chapter-code/code-part1.ipynb)<br/>- [code-part2.ipynb](appendix-A/03_main-chapter-code/code-part2.ipynb)<br/>- [DDP-script.py](appendix-A/03_main-chapter-code/DDP-script.py)<br/>- [exercise-solutions.ipynb](appendix-A/03_main-chapter-code/exercise-solutions.ipynb) | [./appendix-A](./appendix-A) | (* Please see [this](appendix-A/01_optional-python-setup-preferences) and [this](appendix-A/02_installing-python-libraries) folder if you need more guidance on installing Python and Python packages.) <br> <br> <img src="images/mental-model.jpg" width="600px"> (A mental model summarizing the contents covered in this book.)
https://github.com/rasbt/LLMs-from-scratch
-1
[ "chatgpt", "gpt", "large-language-models", "llm", "python", "pytorch" ]
https://raw.githubusercontent.com/mlc-ai/web-llm/main/README.md
[ [ "customized", "model", "weight", "webllm", "work", "companion", "project", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "reuses", "model", "artifact", "build", "flow", "mlc", "llm", ",", "please", "check", "[", "mlc", "llm", "document", "]", "(", "http", ":", "//llm.mlc.ai/docs/deploy/javascript.html", ")", "add", "new", "model", "weight", "library", "webllm", ".", ",", "go", "high-level", "idea", ".", "two", "element", "webllm", "package", "enables", "new", "model", "weight", "variant", ".", "-", "model_url", ":", "contains", "url", "model", "artifact", ",", "weight", "meta-data", ".", "-", "model_lib_url", ":", "url", "web", "assembly", "library", "(", "i.e", ".", "wasm", "file", ")", "contains", "executables", "accelerate", "model", "computation", ".", "customizable", "webllm", ".", "``", "`", "typescript", "async", "main", "(", ")", "{", "const", "myllamaurl", "=", "``", "/url/to/my/llama", "''", ";", "const", "appconfig", "=", "{", "``", "model_list", "''", ":", "[", "{", "``", "model_url", "''", ":", "myllamaurl", ",", "``", "local_id", "''", ":", "``", "myllama-3b-v1-q4f32_0", "''", "``", "model_lib_url", "''", ":", "``", "/url/to/myllama3b.wasm", "''", ",", "}", "]", ",", "}", ";", "//", "override", "default", "const", "chatopts", "=", "{", "``", "repetition_penalty", "''", ":", "1.01", "}", ";", "const", "chat", "=", "new", "chatmodule", "(", ")", ";", "//", "load", "prebuilt", "model", "//", "chat", "option", "override", "app", "config", "//", "hood", ",", "load", "model", "myllamaurl", "//", "cache", "browser", "cache", "//", "chat", "also", "load", "model", "library", "``", "/url/to/myllama3b.wasm", "''", ",", "//", "assuming", "compatible", "model", "myllamaurl", ".", "await", "chat.reload", "(", "``", "myllama-3b-v1-q4f32_0", "''", ",", "chatopts", ",", "appconfig", ")", ";", "}", "``", "`", "many", "case", ",", "want", "supply", "model", "weight", "variant", ",", "necessarily", "new", "model", "(", "e.g", ".", "`", "neuralhermes-mistral", "`", "reuse", "`", "mistral", "`", "'s", "model", "library", ";", "`", "wizardmath", "`", "reuse", "`", "llama-2", "`", "'s", "model", "library", ")", ".", "example", "model", "library", "shared", "different", "model", "variant", ",", "see", "`", "examples/simple-chat/src/gh-config.js", "`", ".", "also", "provide", "plethora", "prebuilt", "model", "library", ",", "including", ":", "-", "`", "llama-2-7b-chat-hf-q4f32_1", "`", ":", "llama-7b", "model", ".", "-", "`", "redpajama-incite-chat-3b-v1-q4f32_1", "`", ":", "redpajama-3b", "variant", ".", "-", "`", "mistral-7b-instruct-v0.1-q4f16_1", "`", ":", "mistral-7b", "variant", ".", "-", "many", "[", "binary-mlc-llm-libs", "]", "(", "http", ":", "//github.com/mlc-ai/binary-mlc-llm-libs", ")", "." ], [ "customized model weight webllm work companion project [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "reuses model artifact build flow mlc llm , please check [ mlc llm document ] ( http : //llm.mlc.ai/docs/deploy/javascript.html ) add new model weight library webllm .", ", go high-level idea .", "two element webllm package enables new model weight variant .", "- model_url : contains url model artifact , weight meta-data .", "- model_lib_url : url web assembly library ( i.e .", "wasm file ) contains executables accelerate model computation .", "customizable webllm .", "`` ` typescript async main ( ) { const myllamaurl = `` /url/to/my/llama '' ; const appconfig = { `` model_list '' : [ { `` model_url '' : myllamaurl , `` local_id '' : `` myllama-3b-v1-q4f32_0 '' `` model_lib_url '' : `` /url/to/myllama3b.wasm '' , } ] , } ; // override default const chatopts = { `` repetition_penalty '' : 1.01 } ; const chat = new chatmodule ( ) ; // load prebuilt model // chat option override app config // hood , load model myllamaurl // cache browser cache // chat also load model library `` /url/to/myllama3b.wasm '' , // assuming compatible model myllamaurl .", "await chat.reload ( `` myllama-3b-v1-q4f32_0 '' , chatopts , appconfig ) ; } `` ` many case , want supply model weight variant , necessarily new model ( e.g .", "` neuralhermes-mistral ` reuse ` mistral ` 's model library ; ` wizardmath ` reuse ` llama-2 ` 's model library ) .", "example model library shared different model variant , see ` examples/simple-chat/src/gh-config.js ` .", "also provide plethora prebuilt model library , including : - ` llama-2-7b-chat-hf-q4f32_1 ` : llama-7b model .", "- ` redpajama-incite-chat-3b-v1-q4f32_1 ` : redpajama-3b variant .", "- ` mistral-7b-instruct-v0.1-q4f16_1 ` : mistral-7b variant .", "- many [ binary-mlc-llm-libs ] ( http : //github.com/mlc-ai/binary-mlc-llm-libs ) ." ] ]
[ [ "customized", "model", "weight", "webllm", "work", "companion", "project", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "reuses", "model", "artifact", "build", "flow", "mlc", "llm", ",", "please", "check", "[", "mlc", "llm", "document", "]", "(", "http", ":", "//llm.mlc.ai/docs/deploy/javascript.html", ")", "add", "new", "model", "weight", "library", "webllm", ".", ",", "go", "high-level", "idea", ".", "two", "element", "webllm", "package", "enables", "new", "model", "weight", "variant", ".", "-", "model_url", ":", "contains", "url", "model", "artifact", ",", "weight", "meta-data", ".", "-", "model_lib_url", ":", "url", "web", "assembly", "library", "(", "i.e", ".", "wasm", "file", ")", "contains", "executables", "accelerate", "model", "computation", ".", "customizable", "webllm", ".", "``", "`", "typescript", "async", "main", "(", ")", "{", "const", "myllamaurl", "=", "``", "/url/to/my/llama", "''", ";", "const", "appconfig", "=", "{", "``", "model_list", "''", ":", "[", "{", "``", "model_url", "''", ":", "myllamaurl", ",", "``", "local_id", "''", ":", "``", "myllama-3b-v1-q4f32_0", "''", "``", "model_lib_url", "''", ":", "``", "/url/to/myllama3b.wasm", "''", ",", "}", "]", ",", "}", ";", "//", "override", "default", "const", "chatopts", "=", "{", "``", "repetition_penalty", "''", ":", "1.01", "}", ";", "const", "chat", "=", "new", "chatmodule", "(", ")", ";", "//", "load", "prebuilt", "model", "//", "chat", "option", "override", "app", "config", "//", "hood", ",", "load", "model", "myllamaurl", "//", "cache", "browser", "cache", "//", "chat", "also", "load", "model", "library", "``", "/url/to/myllama3b.wasm", "''", ",", "//", "assuming", "compatible", "model", "myllamaurl", ".", "await", "chat.reload", "(", "``", "myllama-3b-v1-q4f32_0", "''", ",", "chatopts", ",", "appconfig", ")", ";", "}", "``", "`", "many", "case", ",", "want", "supply", "model", "weight", "variant", ",", "necessarily", "new", "model", "(", "e.g", ".", "`", "neuralhermes-mistral", "`", "reuse", "`", "mistral", "`", "'s", "model", "library", ";", "`", "wizardmath", "`", "reuse", "`", "llama-2", "`", "'s", "model", "library", ")", ".", "example", "model", "library", "shared", "different", "model", "variant", ",", "see", "`", "examples/simple-chat/src/gh-config.js", "`", ".", "also", "provide", "plethora", "prebuilt", "model", "library", ",", "including", ":", "-", "`", "llama-2-7b-chat-hf-q4f32_1", "`", ":", "llama-7b", "model", ".", "-", "`", "redpajama-incite-chat-3b-v1-q4f32_1", "`", ":", "redpajama-3b", "variant", ".", "-", "`", "mistral-7b-instruct-v0.1-q4f16_1", "`", ":", "mistral-7b", "variant", ".", "-", "many", "[", "binary-mlc-llm-libs", "]", "(", "http", ":", "//github.com/mlc-ai/binary-mlc-llm-libs", ")", "." ], [ "customized model weight webllm work companion project [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "reuses model artifact build flow mlc llm , please check [ mlc llm document ] ( http : //llm.mlc.ai/docs/deploy/javascript.html ) add new model weight library webllm .", ", go high-level idea .", "two element webllm package enables new model weight variant .", "- model_url : contains url model artifact , weight meta-data .", "- model_lib_url : url web assembly library ( i.e .", "wasm file ) contains executables accelerate model computation .", "customizable webllm .", "`` ` typescript async main ( ) { const myllamaurl = `` /url/to/my/llama '' ; const appconfig = { `` model_list '' : [ { `` model_url '' : myllamaurl , `` local_id '' : `` myllama-3b-v1-q4f32_0 '' `` model_lib_url '' : `` /url/to/myllama3b.wasm '' , } ] , } ; // override default const chatopts = { `` repetition_penalty '' : 1.01 } ; const chat = new chatmodule ( ) ; // load prebuilt model // chat option override app config // hood , load model myllamaurl // cache browser cache // chat also load model library `` /url/to/myllama3b.wasm '' , // assuming compatible model myllamaurl .", "await chat.reload ( `` myllama-3b-v1-q4f32_0 '' , chatopts , appconfig ) ; } `` ` many case , want supply model weight variant , necessarily new model ( e.g .", "` neuralhermes-mistral ` reuse ` mistral ` 's model library ; ` wizardmath ` reuse ` llama-2 ` 's model library ) .", "example model library shared different model variant , see ` examples/simple-chat/src/gh-config.js ` .", "also provide plethora prebuilt model library , including : - ` llama-2-7b-chat-hf-q4f32_1 ` : llama-7b model .", "- ` redpajama-incite-chat-3b-v1-q4f32_1 ` : redpajama-3b variant .", "- ` mistral-7b-instruct-v0.1-q4f16_1 ` : mistral-7b variant .", "- many [ binary-mlc-llm-libs ] ( http : //github.com/mlc-ai/binary-mlc-llm-libs ) ." ] ]
Customized Model Weights WebLLM works as a companion project of [MLC LLM](https://github.com/mlc-ai/mlc-llm). It reuses the model artifact and builds flow of MLC LLM, please check out [MLC LLM document](https://llm.mlc.ai/docs/deploy/javascript.html) on how to add new model weights and libraries to WebLLM. Here, we go over the high-level idea. There are two elements of the WebLLM package that enables new models and weight variants. - model_url: Contains a URL to model artifacts, such as weights and meta-data. - model_lib_url: A URL to the web assembly library (i.e. wasm file) that contains the executables to accelerate the model computations. Both are customizable in the WebLLM. ```typescript async main() { const myLlamaUrl = "/url/to/my/llama"; const appConfig = { "model_list": [ { "model_url": myLlamaUrl, "local_id": "MyLlama-3b-v1-q4f32_0" "model_lib_url": "/url/to/myllama3b.wasm", } ], }; // override default const chatOpts = { "repetition_penalty": 1.01 }; const chat = new ChatModule(); // load a prebuilt model // with a chat option override and app config // under the hood, it will load the model from myLlamaUrl // and cache it in the browser cache // The chat will also load the model library from "/url/to/myllama3b.wasm", // assuming that it is compatible to the model in myLlamaUrl. await chat.reload("MyLlama-3b-v1-q4f32_0", chatOpts, appConfig); } ``` In many cases, we only want to supply the model weight variant, but not necessarily a new model (e.g. `NeuralHermes-Mistral` can reuse `Mistral`'s model library; `WizardMath` can reuse `Llama-2`'s model library). For an example of how a model library is shared by different model variants, see `examples/simple-chat/src/gh-config.js`. We also provide a plethora of prebuilt model libraries, including: - `Llama-2-7b-chat-hf-q4f32_1`: Llama-7b models. - `RedPajama-INCITE-Chat-3B-v1-q4f32_1`: RedPajama-3B variants. - `Mistral-7B-Instruct-v0.1-q4f16_1`: Mistral-7B variants. - and many more at [binary-mlc-llm-libs](https://github.com/mlc-ai/binary-mlc-llm-libs).
https://github.com/mlc-ai/web-llm
-1
[ "chatgpt", "deep-learning", "language-model", "llm", "tvm", "webgpu", "webml" ]
https://raw.githubusercontent.com/mlc-ai/web-llm/main/README.md
[ [ "build", "webllm", "package", "source", "note", ":", "n't", "need", "build", "unless", "would", "like", "change", "webllm", "package", ",", "follow", "[", "use", "webllm", "]", "(", "#", "use-web-llm-package", ")", "instead", ".", "webllm", "package", "web", "runtime", "designed", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "1", ".", "install", "prerequisite", "compilation", ":", "1", ".", "[", "emscripten", "]", "(", "http", ":", "//emscripten.org", ")", ".", "llvm-based", "compiler", "compiles", "c/c++", "source", "code", "webassembly", ".", "-", "follow", "[", "installation", "instruction", "]", "(", "http", ":", "//emscripten.org/docs/getting_started/downloads.html", "#", "installation-instructions-using-the-emsdk-recommended", ")", "install", "latest", "emsdk", ".", "-", "source", "`", "emsdk_env.sh", "`", "`", "source", "path/to/emsdk_env.sh", "`", ",", "`", "emcc", "`", "reachable", "path", "command", "`", "emcc", "`", "work", ".", "4", ".", "install", "jekyll", "following", "[", "official", "guide", "]", "(", "http", ":", "//jekyllrb.com/docs/installation/", ")", ".", "package", "use", "website", ".", "needed", "'re", "using", "nextjs", "(", "see", "next-simple-chat", "example", ")", ".", "5", ".", "install", "jekyll-remote-theme", "command", ".", "try", "[", "gem", "mirror", "]", "(", "http", ":", "//gems.ruby-china.com/", ")", "install", "blocked", ".", "``", "`", "shell", "gem", "install", "jekyll-remote-theme", "``", "`", "verify", "successful", "installation", "trying", "`", "emcc", "`", "`", "jekyll", "`", "terminal", ",", "respectively", ".", "2", ".", "setup", "necessary", "environment", "prepare", "necessary", "dependency", "web", "build", ":", "``", "`", "shell", "./scripts/prep_deps.sh", "``", "`", "3", ".", "buld", "webllm", "package", "``", "`", "shell", "npm", "run", "build", "``", "`", "4", ".", "validate", "sub-packages", "go", "subfolders", "[", "example", "]", "(", "example", ")", "validate", "sub-packages", ".", "use", "parcelv2", "bundling", ".", "although", "parcel", "good", "tracking", "parent", "directory", "change", "sometimes", ".", "make", "change", "webllm", "package", ",", "try", "edit", "`", "package.json", "`", "subfolder", "save", ",", "trigger", "parcel", "rebuild", "." ], [ "build webllm package source note : n't need build unless would like change webllm package , follow [ use webllm ] ( # use-web-llm-package ) instead .", "webllm package web runtime designed [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "1 .", "install prerequisite compilation : 1 .", "[ emscripten ] ( http : //emscripten.org ) .", "llvm-based compiler compiles c/c++ source code webassembly .", "- follow [ installation instruction ] ( http : //emscripten.org/docs/getting_started/downloads.html # installation-instructions-using-the-emsdk-recommended ) install latest emsdk .", "- source ` emsdk_env.sh ` ` source path/to/emsdk_env.sh ` , ` emcc ` reachable path command ` emcc ` work .", "4 .", "install jekyll following [ official guide ] ( http : //jekyllrb.com/docs/installation/ ) .", "package use website .", "needed 're using nextjs ( see next-simple-chat example ) .", "5 .", "install jekyll-remote-theme command .", "try [ gem mirror ] ( http : //gems.ruby-china.com/ ) install blocked .", "`` ` shell gem install jekyll-remote-theme `` ` verify successful installation trying ` emcc ` ` jekyll ` terminal , respectively .", "2 .", "setup necessary environment prepare necessary dependency web build : `` ` shell ./scripts/prep_deps.sh `` ` 3 .", "buld webllm package `` ` shell npm run build `` ` 4 .", "validate sub-packages go subfolders [ example ] ( example ) validate sub-packages .", "use parcelv2 bundling .", "although parcel good tracking parent directory change sometimes .", "make change webllm package , try edit ` package.json ` subfolder save , trigger parcel rebuild ." ] ]
[ [ "build", "webllm", "package", "source", "note", ":", "n't", "need", "build", "unless", "would", "like", "change", "webllm", "package", ",", "follow", "[", "use", "webllm", "]", "(", "#", "use-web-llm-package", ")", "instead", ".", "webllm", "package", "web", "runtime", "designed", "[", "mlc", "llm", "]", "(", "http", ":", "//github.com/mlc-ai/mlc-llm", ")", ".", "1", ".", "install", "prerequisite", "compilation", ":", "1", ".", "[", "emscripten", "]", "(", "http", ":", "//emscripten.org", ")", ".", "llvm-based", "compiler", "compiles", "c/c++", "source", "code", "webassembly", ".", "-", "follow", "[", "installation", "instruction", "]", "(", "http", ":", "//emscripten.org/docs/getting_started/downloads.html", "#", "installation-instructions-using-the-emsdk-recommended", ")", "install", "latest", "emsdk", ".", "-", "source", "`", "emsdk_env.sh", "`", "`", "source", "path/to/emsdk_env.sh", "`", ",", "`", "emcc", "`", "reachable", "path", "command", "`", "emcc", "`", "work", ".", "4", ".", "install", "jekyll", "following", "[", "official", "guide", "]", "(", "http", ":", "//jekyllrb.com/docs/installation/", ")", ".", "package", "use", "website", ".", "needed", "'re", "using", "nextjs", "(", "see", "next-simple-chat", "example", ")", ".", "5", ".", "install", "jekyll-remote-theme", "command", ".", "try", "[", "gem", "mirror", "]", "(", "http", ":", "//gems.ruby-china.com/", ")", "install", "blocked", ".", "``", "`", "shell", "gem", "install", "jekyll-remote-theme", "``", "`", "verify", "successful", "installation", "trying", "`", "emcc", "`", "`", "jekyll", "`", "terminal", ",", "respectively", ".", "2", ".", "setup", "necessary", "environment", "prepare", "necessary", "dependency", "web", "build", ":", "``", "`", "shell", "./scripts/prep_deps.sh", "``", "`", "3", ".", "buld", "webllm", "package", "``", "`", "shell", "npm", "run", "build", "``", "`", "4", ".", "validate", "sub-packages", "go", "subfolders", "[", "example", "]", "(", "example", ")", "validate", "sub-packages", ".", "use", "parcelv2", "bundling", ".", "although", "parcel", "good", "tracking", "parent", "directory", "change", "sometimes", ".", "make", "change", "webllm", "package", ",", "try", "edit", "`", "package.json", "`", "subfolder", "save", ",", "trigger", "parcel", "rebuild", "." ], [ "build webllm package source note : n't need build unless would like change webllm package , follow [ use webllm ] ( # use-web-llm-package ) instead .", "webllm package web runtime designed [ mlc llm ] ( http : //github.com/mlc-ai/mlc-llm ) .", "1 .", "install prerequisite compilation : 1 .", "[ emscripten ] ( http : //emscripten.org ) .", "llvm-based compiler compiles c/c++ source code webassembly .", "- follow [ installation instruction ] ( http : //emscripten.org/docs/getting_started/downloads.html # installation-instructions-using-the-emsdk-recommended ) install latest emsdk .", "- source ` emsdk_env.sh ` ` source path/to/emsdk_env.sh ` , ` emcc ` reachable path command ` emcc ` work .", "4 .", "install jekyll following [ official guide ] ( http : //jekyllrb.com/docs/installation/ ) .", "package use website .", "needed 're using nextjs ( see next-simple-chat example ) .", "5 .", "install jekyll-remote-theme command .", "try [ gem mirror ] ( http : //gems.ruby-china.com/ ) install blocked .", "`` ` shell gem install jekyll-remote-theme `` ` verify successful installation trying ` emcc ` ` jekyll ` terminal , respectively .", "2 .", "setup necessary environment prepare necessary dependency web build : `` ` shell ./scripts/prep_deps.sh `` ` 3 .", "buld webllm package `` ` shell npm run build `` ` 4 .", "validate sub-packages go subfolders [ example ] ( example ) validate sub-packages .", "use parcelv2 bundling .", "although parcel good tracking parent directory change sometimes .", "make change webllm package , try edit ` package.json ` subfolder save , trigger parcel rebuild ." ] ]
Build WebLLM Package From Source NOTE: you don't need to build by yourself unless you would like to change the WebLLM package, follow [use WebLLM](#use-web-llm-package) instead. WebLLM package is a web runtime designed for [MLC LLM](https://github.com/mlc-ai/mlc-llm). 1. Install all the prerequisites for compilation: 1. [emscripten](https://emscripten.org). It is an LLVM-based compiler that compiles C/C++ source code to WebAssembly. - Follow the [installation instruction](https://emscripten.org/docs/getting_started/downloads.html#installation-instructions-using-the-emsdk-recommended) to install the latest emsdk. - Source `emsdk_env.sh` by `source path/to/emsdk_env.sh`, so that `emcc` is reachable from PATH and the command `emcc` works. 4. Install jekyll by following the [official guides](https://jekyllrb.com/docs/installation/). It is the package we use for website. This is not needed if you're using nextjs (see next-simple-chat in the examples). 5. Install jekyll-remote-theme by command. Try [gem mirror](https://gems.ruby-china.com/) if install blocked. ```shell gem install jekyll-remote-theme ``` We can verify the successful installation by trying out `emcc` and `jekyll` in terminal, respectively. 2. Setup necessary environment Prepare all the necessary dependencies for web build: ```shell ./scripts/prep_deps.sh ``` 3. Buld WebLLM Package ```shell npm run build ``` 4. Validate some of the sub-packages You can then go to the subfolders in [examples](examples) to validate some of the sub-packages. We use Parcelv2 for bundling. Although Parcel is not very good at tracking parent directory changes sometimes. When you make a change in the WebLLM package, try to edit the `package.json` of the subfolder and save it, which will trigger Parcel to rebuild.
https://github.com/mlc-ai/web-llm
-1
[ "chatgpt", "deep-learning", "language-model", "llm", "tvm", "webgpu", "webml" ]
https://raw.githubusercontent.com/nebuly-ai/nebuly/main/README.md
[ [ "installation", "easiest", "way", "install", "nebuly", "’", "sdk", "via", "`", "pip", "`", ":", "``", "`", "pip", "install", "nebuly", "``", "`", "installed", ",", "authenticate", "nebuly", "platform", "start", "building", "." ], [ "installation easiest way install nebuly ’ sdk via ` pip ` : `` ` pip install nebuly `` ` installed , authenticate nebuly platform start building ." ] ]
[ [ "installation", "easiest", "way", "install", "nebuly", "’", "sdk", "via", "`", "pip", "`", ":", "``", "`", "pip", "install", "nebuly", "``", "`", "installed", ",", "authenticate", "nebuly", "platform", "start", "building", "." ], [ "installation easiest way install nebuly ’ sdk via ` pip ` : `` ` pip install nebuly `` ` installed , authenticate nebuly platform start building ." ] ]
Installation The easiest way to install Nebuly’s SDK is via `pip`: ``` pip install nebuly ``` Once installed, authenticate to Nebuly platform and start building.
https://github.com/nebuly-ai/nebuly
0
[ "ai", "analytics", "artificial-intelligence", "deeplearning", "large-language-models", "llm" ]
https://raw.githubusercontent.com/LlamaFamily/Llama2-Chinese/main/README.md
[ [ "🍄", "模型量化", "我们对中文微调的模型参数进行了量化,方便以更少的计算资源运行。目前已经在", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/flagalpha", ")", "上传了13b中文微调模型", "[", "flagalpha/llama2-chinese-13b-chat", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat", ")", "的4bit压缩版本", "[", "flagalpha/llama2-chinese-13b-chat-4bit", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit", ")", ",具体调用方式如下:", "环境准备:", "``", "`", "pip", "install", "git+https", ":", "//github.com/panqiwei/autogptq.git", "``", "`", "``", "`", "python", "transformer", "import", "autotokenizer", "auto_gptq", "import", "autogptqforcausallm", "model", "=", "autogptqforcausallm.from_quantized", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "device=", "''", "cuda:0", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "use_fast=false", ")", "input_ids", "=", "tokenizer", "(", "[", "'", "<", ">", "human", ":", "怎么登上火星\\n", "<", "/s", ">", "<", ">", "assistant", ":", "'", "]", ",", "return_tensors=", "''", "pt", "''", ",", "add_special_tokens=false", ")", ".input_ids.to", "(", "'cuda", "'", ")", "generate_input", "=", "{", "``", "input_ids", "''", ":", "input_ids", ",", "``", "max_new_tokens", "''", ":512", ",", "``", "do_sample", "''", ":", "true", ",", "``", "top_k", "''", ":50", ",", "``", "top_p", "''", ":0.95", ",", "``", "temperature", "''", ":0.3", ",", "``", "repetition_penalty", "''", ":1.3", ",", "``", "eos_token_id", "''", ":", "tokenizer.eos_token_id", ",", "``", "bos_token_id", "''", ":", "tokenizer.bos_token_id", ",", "``", "pad_token_id", "''", ":", "tokenizer.pad_token_id", "}", "generate_ids", "=", "model.generate", "(", "*", "*", "generate_input", ")", "text", "=", "tokenizer.decode", "(", "generate_ids", "[", "0", "]", ")", "print", "(", "text", ")", "``", "`" ], [ "🍄 模型量化 我们对中文微调的模型参数进行了量化,方便以更少的计算资源运行。目前已经在 [ hugging face ] ( http : //huggingface.co/flagalpha ) 上传了13b中文微调模型 [ flagalpha/llama2-chinese-13b-chat ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat ) 的4bit压缩版本 [ flagalpha/llama2-chinese-13b-chat-4bit ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit ) ,具体调用方式如下: 环境准备: `` ` pip install git+https : //github.com/panqiwei/autogptq.git `` ` `` ` python transformer import autotokenizer auto_gptq import autogptqforcausallm model = autogptqforcausallm.from_quantized ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , device= '' cuda:0 '' ) tokenizer = autotokenizer.from_pretrained ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , use_fast=false ) input_ids = tokenizer ( [ ' < > human : 怎么登上火星\\n < /s > < > assistant : ' ] , return_tensors= '' pt '' , add_special_tokens=false ) .input_ids.to ( 'cuda ' ) generate_input = { `` input_ids '' : input_ids , `` max_new_tokens '' :512 , `` do_sample '' : true , `` top_k '' :50 , `` top_p '' :0.95 , `` temperature '' :0.3 , `` repetition_penalty '' :1.3 , `` eos_token_id '' : tokenizer.eos_token_id , `` bos_token_id '' : tokenizer.bos_token_id , `` pad_token_id '' : tokenizer.pad_token_id } generate_ids = model.generate ( * * generate_input ) text = tokenizer.decode ( generate_ids [ 0 ] ) print ( text ) `` `" ] ]
[ [ "🍄", "模型量化", "我们对中文微调的模型参数进行了量化,方便以更少的计算资源运行。目前已经在", "[", "hugging", "face", "]", "(", "http", ":", "//huggingface.co/flagalpha", ")", "上传了13b中文微调模型", "[", "flagalpha/llama2-chinese-13b-chat", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat", ")", "的4bit压缩版本", "[", "flagalpha/llama2-chinese-13b-chat-4bit", "]", "(", "http", ":", "//huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit", ")", ",具体调用方式如下:", "环境准备:", "``", "`", "pip", "install", "git+https", ":", "//github.com/panqiwei/autogptq.git", "``", "`", "``", "`", "python", "transformer", "import", "autotokenizer", "auto_gptq", "import", "autogptqforcausallm", "model", "=", "autogptqforcausallm.from_quantized", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "device=", "''", "cuda:0", "''", ")", "tokenizer", "=", "autotokenizer.from_pretrained", "(", "'flagalpha/llama2-chinese-13b-chat-4bit", "'", ",", "use_fast=false", ")", "input_ids", "=", "tokenizer", "(", "[", "'", "<", ">", "human", ":", "怎么登上火星\\n", "<", "/s", ">", "<", ">", "assistant", ":", "'", "]", ",", "return_tensors=", "''", "pt", "''", ",", "add_special_tokens=false", ")", ".input_ids.to", "(", "'cuda", "'", ")", "generate_input", "=", "{", "``", "input_ids", "''", ":", "input_ids", ",", "``", "max_new_tokens", "''", ":512", ",", "``", "do_sample", "''", ":", "true", ",", "``", "top_k", "''", ":50", ",", "``", "top_p", "''", ":0.95", ",", "``", "temperature", "''", ":0.3", ",", "``", "repetition_penalty", "''", ":1.3", ",", "``", "eos_token_id", "''", ":", "tokenizer.eos_token_id", ",", "``", "bos_token_id", "''", ":", "tokenizer.bos_token_id", ",", "``", "pad_token_id", "''", ":", "tokenizer.pad_token_id", "}", "generate_ids", "=", "model.generate", "(", "*", "*", "generate_input", ")", "text", "=", "tokenizer.decode", "(", "generate_ids", "[", "0", "]", ")", "print", "(", "text", ")", "``", "`" ], [ "🍄 模型量化 我们对中文微调的模型参数进行了量化,方便以更少的计算资源运行。目前已经在 [ hugging face ] ( http : //huggingface.co/flagalpha ) 上传了13b中文微调模型 [ flagalpha/llama2-chinese-13b-chat ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat ) 的4bit压缩版本 [ flagalpha/llama2-chinese-13b-chat-4bit ] ( http : //huggingface.co/flagalpha/llama2-chinese-13b-chat-4bit ) ,具体调用方式如下: 环境准备: `` ` pip install git+https : //github.com/panqiwei/autogptq.git `` ` `` ` python transformer import autotokenizer auto_gptq import autogptqforcausallm model = autogptqforcausallm.from_quantized ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , device= '' cuda:0 '' ) tokenizer = autotokenizer.from_pretrained ( 'flagalpha/llama2-chinese-13b-chat-4bit ' , use_fast=false ) input_ids = tokenizer ( [ ' < > human : 怎么登上火星\\n < /s > < > assistant : ' ] , return_tensors= '' pt '' , add_special_tokens=false ) .input_ids.to ( 'cuda ' ) generate_input = { `` input_ids '' : input_ids , `` max_new_tokens '' :512 , `` do_sample '' : true , `` top_k '' :50 , `` top_p '' :0.95 , `` temperature '' :0.3 , `` repetition_penalty '' :1.3 , `` eos_token_id '' : tokenizer.eos_token_id , `` bos_token_id '' : tokenizer.bos_token_id , `` pad_token_id '' : tokenizer.pad_token_id } generate_ids = model.generate ( * * generate_input ) text = tokenizer.decode ( generate_ids [ 0 ] ) print ( text ) `` `" ] ]
🍄 模型量化 我们对中文微调的模型参数进行了量化,方便以更少的计算资源运行。目前已经在[Hugging Face](https://huggingface.co/FlagAlpha)上传了13B中文微调模型[FlagAlpha/Llama2-Chinese-13b-Chat](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat)的4bit压缩版本[FlagAlpha/Llama2-Chinese-13b-Chat-4bit](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat-4bit),具体调用方式如下: 环境准备: ``` pip install git+https://github.com/PanQiWei/AutoGPTQ.git ``` ```python from transformers import AutoTokenizer from auto_gptq import AutoGPTQForCausalLM model = AutoGPTQForCausalLM.from_quantized('FlagAlpha/Llama2-Chinese-13b-Chat-4bit', device="cuda:0") tokenizer = AutoTokenizer.from_pretrained('FlagAlpha/Llama2-Chinese-13b-Chat-4bit',use_fast=False) input_ids = tokenizer(['<s>Human: 怎么登上火星\n</s><s>Assistant: '], return_tensors="pt",add_special_tokens=False).input_ids.to('cuda') generate_input = { "input_ids":input_ids, "max_new_tokens":512, "do_sample":True, "top_k":50, "top_p":0.95, "temperature":0.3, "repetition_penalty":1.3, "eos_token_id":tokenizer.eos_token_id, "bos_token_id":tokenizer.bos_token_id, "pad_token_id":tokenizer.pad_token_id } generate_ids = model.generate(**generate_input) text = tokenizer.decode(generate_ids[0]) print(text) ```
https://github.com/LlamaFamily/Llama2-Chinese
0
[ "finetune", "llama", "llama2", "llm", "lora", "pretrain" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "docker", "provide", "docker", "container", "help", "start", "running", "openllm", ":", "``", "`", "bash", "docker", "run", "--", "rm", "-it", "-p", "3000:3000", "ghcr.io/bentoml/openllm", "start", "facebook/opt-1.3b", "--", "backend", "pt", "``", "`", ">", "[", "!", "note", "]", ">", "given", "access", "gpus", "setup", "[", "nvidia-docker", "]", "(", "http", ":", "//github.com/nvidia/nvidia-container-toolkit", ")", ",", "additionally", "pas", "`", "--", "gpus", "`", ">", "use", "gpu", "faster", "inference", "optimization", ">", "``", "`", "bash", ">", "docker", "run", "--", "rm", "--", "gpus", "-p", "3000:3000", "-it", "ghcr.io/bentoml/openllm", "start", "huggingfaceh4/zephyr-7b-beta", "--", "backend", "vllm", ">", "``", "`" ], [ "docker provide docker container help start running openllm : `` ` bash docker run -- rm -it -p 3000:3000 ghcr.io/bentoml/openllm start facebook/opt-1.3b -- backend pt `` ` > [ ! note ] > given access gpus setup [ nvidia-docker ] ( http : //github.com/nvidia/nvidia-container-toolkit ) , additionally pas ` -- gpus ` > use gpu faster inference optimization > `` ` bash > docker run -- rm -- gpus -p 3000:3000 -it ghcr.io/bentoml/openllm start huggingfaceh4/zephyr-7b-beta -- backend vllm > `` `" ] ]
[ [ "docker", "provide", "docker", "container", "help", "start", "running", "openllm", ":", "``", "`", "bash", "docker", "run", "--", "rm", "-it", "-p", "3000:3000", "ghcr.io/bentoml/openllm", "start", "facebook/opt-1.3b", "--", "backend", "pt", "``", "`", ">", "[", "!", "note", "]", ">", "given", "access", "gpus", "setup", "[", "nvidia-docker", "]", "(", "http", ":", "//github.com/nvidia/nvidia-container-toolkit", ")", ",", "additionally", "pas", "`", "--", "gpus", "`", ">", "use", "gpu", "faster", "inference", "optimization", ">", "``", "`", "bash", ">", "docker", "run", "--", "rm", "--", "gpus", "-p", "3000:3000", "-it", "ghcr.io/bentoml/openllm", "start", "huggingfaceh4/zephyr-7b-beta", "--", "backend", "vllm", ">", "``", "`" ], [ "docker provide docker container help start running openllm : `` ` bash docker run -- rm -it -p 3000:3000 ghcr.io/bentoml/openllm start facebook/opt-1.3b -- backend pt `` ` > [ ! note ] > given access gpus setup [ nvidia-docker ] ( http : //github.com/nvidia/nvidia-container-toolkit ) , additionally pas ` -- gpus ` > use gpu faster inference optimization > `` ` bash > docker run -- rm -- gpus -p 3000:3000 -it ghcr.io/bentoml/openllm start huggingfaceh4/zephyr-7b-beta -- backend vllm > `` `" ] ]
Docker We provide a docker container that helps you start running OpenLLM: ```bash docker run --rm -it -p 3000:3000 ghcr.io/bentoml/openllm start facebook/opt-1.3b --backend pt ``` > [!NOTE] > Given you have access to GPUs and have setup [nvidia-docker](https://github.com/NVIDIA/nvidia-container-toolkit), you can additionally pass in `--gpus` > to use GPU for faster inference and optimization >```bash > docker run --rm --gpus all -p 3000:3000 -it ghcr.io/bentoml/openllm start HuggingFaceH4/zephyr-7b-beta --backend vllm > ```
https://github.com/bentoml/OpenLLM
1
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "🏃", "get", "started", "following", "provides", "instruction", "get", "started", "openllm", "locally", "." ], [ "🏃 get started following provides instruction get started openllm locally ." ] ]
[ [ "🏃", "get", "started", "following", "provides", "instruction", "get", "started", "openllm", "locally", "." ], [ "🏃 get started following provides instruction get started openllm locally ." ] ]
🏃 Get started The following provides instructions for how to get started with OpenLLM locally.
https://github.com/bentoml/OpenLLM
-1
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "prerequisite", "installed", "python", "3.8", "(", "later", ")", "`", "pip", "`", ".", "highly", "recommend", "using", "[", "virtual", "environment", "]", "(", "http", ":", "//docs.python.org/3/library/venv.html", ")", "prevent", "package", "conflict", "." ], [ "prerequisite installed python 3.8 ( later ) ` pip ` .", "highly recommend using [ virtual environment ] ( http : //docs.python.org/3/library/venv.html ) prevent package conflict ." ] ]
[ [ "prerequisite", "installed", "python", "3.8", "(", "later", ")", "`", "pip", "`", ".", "highly", "recommend", "using", "[", "virtual", "environment", "]", "(", "http", ":", "//docs.python.org/3/library/venv.html", ")", "prevent", "package", "conflict", "." ], [ "prerequisite installed python 3.8 ( later ) ` pip ` .", "highly recommend using [ virtual environment ] ( http : //docs.python.org/3/library/venv.html ) prevent package conflict ." ] ]
Prerequisites You have installed Python 3.8 (or later) and `pip`. We highly recommend using a [Virtual Environment](https://docs.python.org/3/library/venv.html) to prevent package conflicts.
https://github.com/bentoml/OpenLLM
-1
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "install", "openllm", "install", "openllm", "using", "`", "pip", "`", "follows", ":", "``", "`", "bash", "pip", "install", "openllm", "``", "`", "verify", "installation", ",", "run", ":", "``", "`", "bash", "$", "openllm", "-h", "usage", ":", "openllm", "[", "option", "]", "command", "[", "args", "]", "...", "██████╗", "██████╗", "███████╗███╗", "██╗██╗", "██╗", "███╗", "███╗", "██╔═══██╗██╔══██╗██╔════╝████╗", "██║██║", "██║", "████╗", "████║", "██║", "██║██████╔╝█████╗", "██╔██╗", "██║██║", "██║", "██╔████╔██║", "██║", "██║██╔═══╝", "██╔══╝", "██║╚██╗██║██║", "██║", "██║╚██╔╝██║", "╚██████╔╝██║", "███████╗██║", "╚████║███████╗███████╗██║", "╚═╝", "██║", "╚═════╝", "╚═╝", "╚══════╝╚═╝", "╚═══╝╚══════╝╚══════╝╚═╝", "╚═╝", ".", "open", "platform", "operating", "large", "language", "model", "production", ".", "fine-tune", ",", "serve", ",", "deploy", ",", "monitor", "llm", "ease", ".", "option", ":", "-v", ",", "--", "version", "show", "version", "exit", ".", "-h", ",", "--", "help", "show", "message", "exit", ".", "command", ":", "build", "package", "given", "model", "bentollm", ".", "import", "setup", "llm", "interactively", ".", "model", "list", "supported", "model", ".", "prune", "remove", "saved", "model", ",", "(", "optionally", "bentos", ")", "built", "openllm", "locally", ".", "query", "query", "llm", "interactively", ",", "terminal", ".", "start", "start", "llmserver", "supported", "llm", ".", "start-grpc", "start", "grpc", "llmserver", "supported", "llm", ".", "extension", ":", "build-base-container", "base", "image", "builder", "bentollm", ".", "dive-bentos", "dive", "bentollm", ".", "get-containerfile", "return", "containerfile", "given", "bento", ".", "get-prompt", "get", "default", "prompt", "used", "openllm", ".", "list-bentos", "list", "available", "bentos", "built", "openllm", ".", "list-models", "equivalent", "openllm", "model", "...", "playground", "openllm", "playground", ".", "``", "`" ], [ "install openllm install openllm using ` pip ` follows : `` ` bash pip install openllm `` ` verify installation , run : `` ` bash $ openllm -h usage : openllm [ option ] command [ args ] ... ██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗ ██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║ ██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║ ██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║ ╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝ .", "open platform operating large language model production .", "fine-tune , serve , deploy , monitor llm ease .", "option : -v , -- version show version exit .", "-h , -- help show message exit .", "command : build package given model bentollm .", "import setup llm interactively .", "model list supported model .", "prune remove saved model , ( optionally bentos ) built openllm locally .", "query query llm interactively , terminal .", "start start llmserver supported llm .", "start-grpc start grpc llmserver supported llm .", "extension : build-base-container base image builder bentollm .", "dive-bentos dive bentollm .", "get-containerfile return containerfile given bento .", "get-prompt get default prompt used openllm .", "list-bentos list available bentos built openllm .", "list-models equivalent openllm model ... playground openllm playground .", "`` `" ] ]
[ [ "install", "openllm", "install", "openllm", "using", "`", "pip", "`", "follows", ":", "``", "`", "bash", "pip", "install", "openllm", "``", "`", "verify", "installation", ",", "run", ":", "``", "`", "bash", "$", "openllm", "-h", "usage", ":", "openllm", "[", "option", "]", "command", "[", "args", "]", "...", "██████╗", "██████╗", "███████╗███╗", "██╗██╗", "██╗", "███╗", "███╗", "██╔═══██╗██╔══██╗██╔════╝████╗", "██║██║", "██║", "████╗", "████║", "██║", "██║██████╔╝█████╗", "██╔██╗", "██║██║", "██║", "██╔████╔██║", "██║", "██║██╔═══╝", "██╔══╝", "██║╚██╗██║██║", "██║", "██║╚██╔╝██║", "╚██████╔╝██║", "███████╗██║", "╚████║███████╗███████╗██║", "╚═╝", "██║", "╚═════╝", "╚═╝", "╚══════╝╚═╝", "╚═══╝╚══════╝╚══════╝╚═╝", "╚═╝", ".", "open", "platform", "operating", "large", "language", "model", "production", ".", "fine-tune", ",", "serve", ",", "deploy", ",", "monitor", "llm", "ease", ".", "option", ":", "-v", ",", "--", "version", "show", "version", "exit", ".", "-h", ",", "--", "help", "show", "message", "exit", ".", "command", ":", "build", "package", "given", "model", "bentollm", ".", "import", "setup", "llm", "interactively", ".", "model", "list", "supported", "model", ".", "prune", "remove", "saved", "model", ",", "(", "optionally", "bentos", ")", "built", "openllm", "locally", ".", "query", "query", "llm", "interactively", ",", "terminal", ".", "start", "start", "llmserver", "supported", "llm", ".", "start-grpc", "start", "grpc", "llmserver", "supported", "llm", ".", "extension", ":", "build-base-container", "base", "image", "builder", "bentollm", ".", "dive-bentos", "dive", "bentollm", ".", "get-containerfile", "return", "containerfile", "given", "bento", ".", "get-prompt", "get", "default", "prompt", "used", "openllm", ".", "list-bentos", "list", "available", "bentos", "built", "openllm", ".", "list-models", "equivalent", "openllm", "model", "...", "playground", "openllm", "playground", ".", "``", "`" ], [ "install openllm install openllm using ` pip ` follows : `` ` bash pip install openllm `` ` verify installation , run : `` ` bash $ openllm -h usage : openllm [ option ] command [ args ] ... ██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗ ██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║ ██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║ ██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║ ╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝ .", "open platform operating large language model production .", "fine-tune , serve , deploy , monitor llm ease .", "option : -v , -- version show version exit .", "-h , -- help show message exit .", "command : build package given model bentollm .", "import setup llm interactively .", "model list supported model .", "prune remove saved model , ( optionally bentos ) built openllm locally .", "query query llm interactively , terminal .", "start start llmserver supported llm .", "start-grpc start grpc llmserver supported llm .", "extension : build-base-container base image builder bentollm .", "dive-bentos dive bentollm .", "get-containerfile return containerfile given bento .", "get-prompt get default prompt used openllm .", "list-bentos list available bentos built openllm .", "list-models equivalent openllm model ... playground openllm playground .", "`` `" ] ]
Install OpenLLM Install OpenLLM by using `pip` as follows: ```bash pip install openllm ``` To verify the installation, run: ```bash $ openllm -h Usage: openllm [OPTIONS] COMMAND [ARGS]... ██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗ ██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║ ██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║ ██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║ ╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝. An open platform for operating large language models in production. Fine-tune, serve, deploy, and monitor any LLMs with ease. Options: -v, --version Show the version and exit. -h, --help Show this message and exit. Commands: build Package a given models into a BentoLLM. import Setup LLM interactively. models List all supported models. prune Remove all saved models, (and optionally bentos) built with OpenLLM locally. query Query a LLM interactively, from a terminal. start Start a LLMServer for any supported LLM. start-grpc Start a gRPC LLMServer for any supported LLM. Extensions: build-base-container Base image builder for BentoLLM. dive-bentos Dive into a BentoLLM. get-containerfile Return Containerfile of any given Bento. get-prompt Get the default prompt used by OpenLLM. list-bentos List available bentos built by OpenLLM. list-models This is equivalent to openllm models... playground OpenLLM Playground. ```
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "🧩", "supported", "model", "openllm", "currently", "support", "following", "model", ".", "default", ",", "openllm", "n't", "include", "dependency", "run", "model", ".", "extra", "model-specific", "dependency", "installed", "instruction", ".", "<", "!", "--", "update-readme.py", ":", "start", "--", ">", "<", "detail", ">", "<", "summary", ">", "baichuan", "<", "/summary", ">" ], [ "🧩 supported model openllm currently support following model .", "default , openllm n't include dependency run model .", "extra model-specific dependency installed instruction .", "< ! -- update-readme.py : start -- > < detail > < summary > baichuan < /summary >" ] ]
[ [ "🧩", "supported", "model", "openllm", "currently", "support", "following", "model", ".", "default", ",", "openllm", "n't", "include", "dependency", "run", "model", ".", "extra", "model-specific", "dependency", "installed", "instruction", ".", "<", "!", "--", "update-readme.py", ":", "start", "--", ">", "<", "detail", ">", "<", "summary", ">", "baichuan", "<", "/summary", ">" ], [ "🧩 supported model openllm currently support following model .", "default , openllm n't include dependency run model .", "extra model-specific dependency installed instruction .", "< ! -- update-readme.py : start -- > < detail > < summary > baichuan < /summary >" ] ]
🧩 Supported models OpenLLM currently supports the following models. By default, OpenLLM doesn't include dependencies to run all models. The extra model-specific dependencies can be installed with the instructions below. <!-- update-readme.py: start --> <details> <summary>Baichuan</summary>
https://github.com/bentoml/OpenLLM
-1
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "baichuan", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "baichuan", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "baichuan", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "baichuan", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=baichuan", ")", "see", "baichuan-compatible", "model", "." ], [ "quickstart > * * note : * * baichuan requires install : > `` ` bash > pip install `` openllm [ baichuan ] '' > `` ` run following command quickly spin baichuan server : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * baichuan variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=baichuan ) see baichuan-compatible model ." ] ]
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "baichuan", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "baichuan", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "baichuan", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "baichuan", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=baichuan", ")", "see", "baichuan-compatible", "model", "." ], [ "quickstart > * * note : * * baichuan requires install : > `` ` bash > pip install `` openllm [ baichuan ] '' > `` ` run following command quickly spin baichuan server : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * baichuan variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=baichuan ) see baichuan-compatible model ." ] ]
Quickstart > **Note:** Baichuan requires to install with: > ```bash > pip install "openllm[baichuan]" > ``` Run the following command to quickly spin up a Baichuan server: ```bash TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan-7b ``` In a different terminal, run the following command to interact with the server: ```bash export OPENLLM_ENDPOINT=http://localhost:3000 openllm query 'What are large language models?' ``` > **Note:** Any Baichuan variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=baichuan) to see more Baichuan-compatible models.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "chatglm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend pt `` ` < /details > < detail > < summary > chatglm < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "baichuan-inc/baichuan2-7b-base", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "chatglm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start baichuan-inc/baichuan2-7b-base -- backend pt `` ` < /details > < detail > < summary > chatglm < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash TRUST_REMOTE_CODE=True openllm start baichuan-inc/baichuan2-7b-base --backend pt ``` </details> <details> <summary>ChatGLM</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "chatglm", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "chatglm", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "chatglm", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "chatglm", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=chatglm", ")", "see", "chatglm-compatible", "model", "." ], [ "quickstart > * * note : * * chatglm requires install : > `` ` bash > pip install `` openllm [ chatglm ] '' > `` ` run following command quickly spin chatglm server : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * chatglm variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=chatglm ) see chatglm-compatible model ." ] ]
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "chatglm", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "chatglm", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "chatglm", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "chatglm", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=chatglm", ")", "see", "chatglm-compatible", "model", "." ], [ "quickstart > * * note : * * chatglm requires install : > `` ` bash > pip install `` openllm [ chatglm ] '' > `` ` run following command quickly spin chatglm server : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * chatglm variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=chatglm ) see chatglm-compatible model ." ] ]
Quickstart > **Note:** ChatGLM requires to install with: > ```bash > pip install "openllm[chatglm]" > ``` Run the following command to quickly spin up a ChatGLM server: ```bash TRUST_REMOTE_CODE=True openllm start thudm/chatglm-6b ``` In a different terminal, run the following command to interact with the server: ```bash export OPENLLM_ENDPOINT=http://localhost:3000 openllm query 'What are large language models?' ``` > **Note:** Any ChatGLM variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=chatglm) to see more ChatGLM-compatible models.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "dollyv2", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend pt `` ` < /details > < detail > < summary > dollyv2 < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "thudm/chatglm-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "dollyv2", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start thudm/chatglm-6b -- backend pt `` ` < /details > < detail > < summary > dollyv2 < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash TRUST_REMOTE_CODE=True openllm start thudm/chatglm-6b --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash TRUST_REMOTE_CODE=True openllm start thudm/chatglm-6b --backend pt ``` </details> <details> <summary>DollyV2</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "falcon", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start databricks/dolly-v2-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start databricks/dolly-v2-3b -- backend pt `` ` < /details > < detail > < summary > falcon < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "databricks/dolly-v2-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "falcon", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start databricks/dolly-v2-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start databricks/dolly-v2-3b -- backend pt `` ` < /details > < detail > < summary > falcon < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start databricks/dolly-v2-3b --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start databricks/dolly-v2-3b --backend pt ``` </details> <details> <summary>Falcon</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "falcon", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "falcon", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "falcon", "server", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "falcon", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=falcon", ")", "see", "falcon-compatible", "model", "." ], [ "quickstart > * * note : * * falcon requires install : > `` ` bash > pip install `` openllm [ falcon ] '' > `` ` run following command quickly spin falcon server : `` ` bash openllm start tiiuae/falcon-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * falcon variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=falcon ) see falcon-compatible model ." ] ]
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "falcon", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "falcon", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "falcon", "server", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "falcon", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=falcon", ")", "see", "falcon-compatible", "model", "." ], [ "quickstart > * * note : * * falcon requires install : > `` ` bash > pip install `` openllm [ falcon ] '' > `` ` run following command quickly spin falcon server : `` ` bash openllm start tiiuae/falcon-7b `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * falcon variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=falcon ) see falcon-compatible model ." ] ]
Quickstart > **Note:** Falcon requires to install with: > ```bash > pip install "openllm[falcon]" > ``` Run the following command to quickly spin up a Falcon server: ```bash openllm start tiiuae/falcon-7b ``` In a different terminal, run the following command to interact with the server: ```bash export OPENLLM_ENDPOINT=http://localhost:3000 openllm query 'What are large language models?' ``` > **Note:** Any Falcon variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=falcon) to see more Falcon-compatible models.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "flant5", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start tiiuae/falcon-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start tiiuae/falcon-7b -- backend pt `` ` < /details > < detail > < summary > flant5 < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "tiiuae/falcon-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "flant5", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start tiiuae/falcon-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start tiiuae/falcon-7b -- backend pt `` ` < /details > < detail > < summary > flant5 < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start tiiuae/falcon-7b --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start tiiuae/falcon-7b --backend pt ``` </details> <details> <summary>FlanT5</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "llama", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start eleutherai/gpt-neox-20b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start eleutherai/gpt-neox-20b -- backend pt `` ` < /details > < detail > < summary > llama < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "eleutherai/gpt-neox-20b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "llama", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start eleutherai/gpt-neox-20b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start eleutherai/gpt-neox-20b -- backend pt `` ` < /details > < detail > < summary > llama < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start eleutherai/gpt-neox-20b --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start eleutherai/gpt-neox-20b --backend pt ``` </details> <details> <summary>Llama</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mistral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend pt `` ` < /details > < detail > < summary > mistral < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "meta-llama/llama-2-70b-chat-hf", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mistral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start meta-llama/llama-2-70b-chat-hf -- backend pt `` ` < /details > < detail > < summary > mistral < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start meta-llama/Llama-2-70b-chat-hf --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start meta-llama/Llama-2-70b-chat-hf --backend pt ``` </details> <details> <summary>Mistral</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mixtral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend pt `` ` < /details > < detail > < summary > mixtral < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "huggingfaceh4/zephyr-7b-alpha", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mixtral", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start huggingfaceh4/zephyr-7b-alpha -- backend pt `` ` < /details > < detail > < summary > mixtral < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start HuggingFaceH4/zephyr-7b-alpha --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start HuggingFaceH4/zephyr-7b-alpha --backend pt ``` </details> <details> <summary>Mixtral</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mpt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend pt `` ` < /details > < detail > < summary > mpt < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "mistralai/mixtral-8x7b-instruct-v0.1", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "mpt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start mistralai/mixtral-8x7b-instruct-v0.1 -- backend pt `` ` < /details > < detail > < summary > mpt < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start mistralai/Mixtral-8x7B-Instruct-v0.1 --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start mistralai/Mixtral-8x7B-Instruct-v0.1 --backend pt ``` </details> <details> <summary>MPT</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "mpt", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "mpt", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "mpt", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b-instruct", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "mpt", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=mpt", ")", "see", "mpt-compatible", "model", "." ], [ "quickstart > * * note : * * mpt requires install : > `` ` bash > pip install `` openllm [ mpt ] '' > `` ` run following command quickly spin mpt server : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b-instruct `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * mpt variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=mpt ) see mpt-compatible model ." ] ]
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "mpt", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "mpt", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "mpt", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b-instruct", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "mpt", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=mpt", ")", "see", "mpt-compatible", "model", "." ], [ "quickstart > * * note : * * mpt requires install : > `` ` bash > pip install `` openllm [ mpt ] '' > `` ` run following command quickly spin mpt server : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b-instruct `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * mpt variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=mpt ) see mpt-compatible model ." ] ]
Quickstart > **Note:** MPT requires to install with: > ```bash > pip install "openllm[mpt]" > ``` Run the following command to quickly spin up a MPT server: ```bash TRUST_REMOTE_CODE=True openllm start mosaicml/mpt-7b-instruct ``` In a different terminal, run the following command to interact with the server: ```bash export OPENLLM_ENDPOINT=http://localhost:3000 openllm query 'What are large language models?' ``` > **Note:** Any MPT variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=mpt) to see more MPT-compatible models.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "opt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend pt `` ` < /details > < detail > < summary > opt < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "mosaicml/mpt-7b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "opt", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start mosaicml/mpt-7b -- backend pt `` ` < /details > < detail > < summary > opt < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash TRUST_REMOTE_CODE=True openllm start mosaicml/mpt-7b --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash TRUST_REMOTE_CODE=True openllm start mosaicml/mpt-7b --backend pt ``` </details> <details> <summary>OPT</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "phi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start facebook/opt-125m -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start facebook/opt-125m -- backend pt `` ` < /details > < detail > < summary > phi < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "facebook/opt-125m", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "phi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start facebook/opt-125m -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start facebook/opt-125m -- backend pt `` ` < /details > < detail > < summary > phi < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start facebook/opt-125m --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start facebook/opt-125m --backend pt ``` </details> <details> <summary>Phi</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "qwen", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend pt `` ` < /details > < detail > < summary > qwen < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-1_5", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "qwen", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start microsoft/phi-1_5 -- backend pt `` ` < /details > < detail > < summary > qwen < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash TRUST_REMOTE_CODE=True openllm start microsoft/phi-1_5 --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash TRUST_REMOTE_CODE=True openllm start microsoft/phi-1_5 --backend pt ``` </details> <details> <summary>Qwen</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "qwen", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "qwen", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "qwen", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "qwen", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=qwen", ")", "see", "qwen-compatible", "model", "." ], [ "quickstart > * * note : * * qwen requires install : > `` ` bash > pip install `` openllm [ qwen ] '' > `` ` run following command quickly spin qwen server : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * qwen variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=qwen ) see qwen-compatible model ." ] ]
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "qwen", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "qwen", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "qwen", "server", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "qwen", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=qwen", ")", "see", "qwen-compatible", "model", "." ], [ "quickstart > * * note : * * qwen requires install : > `` ` bash > pip install `` openllm [ qwen ] '' > `` ` run following command quickly spin qwen server : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * qwen variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=qwen ) see qwen-compatible model ." ] ]
Quickstart > **Note:** Qwen requires to install with: > ```bash > pip install "openllm[qwen]" > ``` Run the following command to quickly spin up a Qwen server: ```bash TRUST_REMOTE_CODE=True openllm start qwen/Qwen-7B-Chat ``` In a different terminal, run the following command to interact with the server: ```bash export OPENLLM_ENDPOINT=http://localhost:3000 openllm query 'What are large language models?' ``` > **Note:** Any Qwen variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=qwen) to see more Qwen-compatible models.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "stablelm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend pt `` ` < /details > < detail > < summary > stablelm < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "qwen/qwen-7b-chat", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "stablelm", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start qwen/qwen-7b-chat -- backend pt `` ` < /details > < detail > < summary > stablelm < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash TRUST_REMOTE_CODE=True openllm start qwen/Qwen-7B-Chat --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash TRUST_REMOTE_CODE=True openllm start qwen/Qwen-7B-Chat --backend pt ``` </details> <details> <summary>StableLM</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "starcoder", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend pt `` ` < /details > < detail > < summary > starcoder < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "stabilityai/stablelm-tuned-alpha-3b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "starcoder", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start stabilityai/stablelm-tuned-alpha-3b -- backend pt `` ` < /details > < detail > < summary > starcoder < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start stabilityai/stablelm-tuned-alpha-3b --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start stabilityai/stablelm-tuned-alpha-3b --backend pt ``` </details> <details> <summary>StarCoder</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "starcoder", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "starcoder", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "starcoder", "server", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "starcoder", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=starcoder", ")", "see", "starcoder-compatible", "model", "." ], [ "quickstart > * * note : * * starcoder requires install : > `` ` bash > pip install `` openllm [ starcoder ] '' > `` ` run following command quickly spin starcoder server : `` ` bash openllm start bigcode/starcoder `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * starcoder variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=starcoder ) see starcoder-compatible model ." ] ]
[ [ "quickstart", ">", "*", "*", "note", ":", "*", "*", "starcoder", "requires", "install", ":", ">", "``", "`", "bash", ">", "pip", "install", "``", "openllm", "[", "starcoder", "]", "''", ">", "``", "`", "run", "following", "command", "quickly", "spin", "starcoder", "server", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "``", "`", "different", "terminal", ",", "run", "following", "command", "interact", "server", ":", "``", "`", "bash", "export", "openllm_endpoint=http", ":", "//localhost:3000", "openllm", "query", "'what", "large", "language", "model", "?", "'", "``", "`", ">", "*", "*", "note", ":", "*", "*", "starcoder", "variant", "deployed", "openllm", ".", "visit", "[", "huggingface", "model", "hub", "]", "(", "http", ":", "//huggingface.co/models", "?", "sort=trending", "&", "search=starcoder", ")", "see", "starcoder-compatible", "model", "." ], [ "quickstart > * * note : * * starcoder requires install : > `` ` bash > pip install `` openllm [ starcoder ] '' > `` ` run following command quickly spin starcoder server : `` ` bash openllm start bigcode/starcoder `` ` different terminal , run following command interact server : `` ` bash export openllm_endpoint=http : //localhost:3000 openllm query 'what large language model ? '", "`` ` > * * note : * * starcoder variant deployed openllm .", "visit [ huggingface model hub ] ( http : //huggingface.co/models ? sort=trending & search=starcoder ) see starcoder-compatible model ." ] ]
Quickstart > **Note:** StarCoder requires to install with: > ```bash > pip install "openllm[starcoder]" > ``` Run the following command to quickly spin up a StarCoder server: ```bash openllm start bigcode/starcoder ``` In a different terminal, run the following command to interact with the server: ```bash export OPENLLM_ENDPOINT=http://localhost:3000 openllm query 'What are large language models?' ``` > **Note:** Any StarCoder variants can be deployed with OpenLLM. Visit the [HuggingFace Model Hub](https://huggingface.co/models?sort=trending&search=starcoder) to see more StarCoder-compatible models.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "yi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start bigcode/starcoder -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start bigcode/starcoder -- backend pt `` ` < /details > < detail > < summary > yi < /summary >" ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "openllm", "start", "bigcode/starcoder", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "yi", "<", "/summary", ">" ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash openllm start bigcode/starcoder -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash openllm start bigcode/starcoder -- backend pt `` ` < /details > < detail > < summary > yi < /summary >" ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash openllm start bigcode/starcoder --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash openllm start bigcode/starcoder --backend pt ``` </details> <details> <summary>Yi</summary>
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "!", "--", "update-readme.py", ":", "stop", "--", ">", "model", "integrated", "openllm", "welcome", "contribution", "want", "incorporate", "custom", "llm", "ecosystem", ".", "check", "[", "adding", "new", "model", "guide", "]", "(", "http", ":", "//github.com/bentoml/openllm/blob/main/adding_new_model.md", ")", "learn", "." ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend pt `` ` < /details > < ! -- update-readme.py : stop -- > model integrated openllm welcome contribution want incorporate custom llm ecosystem .", "check [ adding new model guide ] ( http : //github.com/bentoml/openllm/blob/main/adding_new_model.md ) learn ." ] ]
[ [ "supported", "backends", "openllm", "support", "vllm", "pytorch", "default", "backend", ".", "default", ",", "use", "vllm", "vllm", "available", ",", "otherwise", "fallback", "pytorch", ".", ">", "*", "*", "important", ":", "*", "*", "recommend", "user", "explicitly", "specify", "`", "--", "backend", "`", "choose", "desired", "backend", "run", "model", ".", "access", "gpu", ",", "always", "use", "`", "--", "backend", "vllm", "`", ".", "-", "vllm", "(", "recommended", ")", ":", "install", "vllm", ",", "run", "`", "pip", "install", "``", "openllm", "[", "vllm", "]", "''", "`", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "vllm", "``", "`", ">", "*", "*", "important", ":", "*", "*", "using", "vllm", "requires", "gpu", "architecture", "newer", "8.0", "get", "best", "performance", "serving", ".", "recommended", "serving", "usecase", "production", ",", "choose", "vllm", "serving", ".", ">", "*", "*", "note", ":", "*", "*", "currently", ",", "adapter", "yet", "supported", "vllm", ".", "-", "pytorch", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "01-ai/yi-6b", "--", "backend", "pt", "``", "`", "<", "/details", ">", "<", "!", "--", "update-readme.py", ":", "stop", "--", ">", "model", "integrated", "openllm", "welcome", "contribution", "want", "incorporate", "custom", "llm", "ecosystem", ".", "check", "[", "adding", "new", "model", "guide", "]", "(", "http", ":", "//github.com/bentoml/openllm/blob/main/adding_new_model.md", ")", "learn", "." ], [ "supported backends openllm support vllm pytorch default backend .", "default , use vllm vllm available , otherwise fallback pytorch .", "> * * important : * * recommend user explicitly specify ` -- backend ` choose desired backend run model .", "access gpu , always use ` -- backend vllm ` .", "- vllm ( recommended ) : install vllm , run ` pip install `` openllm [ vllm ] '' ` `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend vllm `` ` > * * important : * * using vllm requires gpu architecture newer 8.0 get best performance serving .", "recommended serving usecase production , choose vllm serving .", "> * * note : * * currently , adapter yet supported vllm .", "- pytorch : `` ` bash trust_remote_code=true openllm start 01-ai/yi-6b -- backend pt `` ` < /details > < ! -- update-readme.py : stop -- > model integrated openllm welcome contribution want incorporate custom llm ecosystem .", "check [ adding new model guide ] ( http : //github.com/bentoml/openllm/blob/main/adding_new_model.md ) learn ." ] ]
Supported backends OpenLLM will support vLLM and PyTorch as default backend. By default, it will use vLLM if vLLM is available, otherwise fallback to PyTorch. > **Important:** We recommend user to explicitly specify `--backend` to choose the desired backend to run the model. If you have access to a GPU, always use `--backend vllm`. - vLLM (Recommended): To install vLLM, run `pip install "openllm[vllm]"` ```bash TRUST_REMOTE_CODE=True openllm start 01-ai/Yi-6B --backend vllm ``` > **Important:** Using vLLM requires a GPU that has architecture newer than 8.0 to get the best performance for serving. It is recommended that for all serving usecase in production, you should choose vLLM for serving. > **Note:** Currently, adapters are yet to be supported with vLLM. - PyTorch: ```bash TRUST_REMOTE_CODE=True openllm start 01-ai/Yi-6B --backend pt ``` </details> <!-- update-readme.py: stop --> More models will be integrated with OpenLLM and we welcome your contributions if you want to incorporate your custom LLMs into the ecosystem. Check out [Adding a New Model Guide](https://github.com/bentoml/OpenLLM/blob/main/ADDING_NEW_MODEL.md) to learn more.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "pytorch", "backend", "pytorch", "backend", ",", "openllm", "support", "`", "int8", "`", ",", "`", "int4", "`", ",", "`", "gptq", "`", ".", "using", "int8", "int4", "quantization", "`", "bitsandbytes", "`", ",", "use", "following", "command", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-2", "--", "quantize", "int8", "``", "`", "run", "inference", "`", "gptq", "`", ",", "simply", "pas", "`", "--", "quantize", "gptq", "`", ":", "``", "`", "bash", "openllm", "start", "thebloke/llama-2-7b-chat-gptq", "--", "quantize", "gptq", "``", "`", ">", "[", "!", "note", "]", ">", "order", "run", "gptq", ",", "make", "sure", "run", "`", "pip", "install", "``", "openllm", "[", "gptq", "]", "''", "`", ">", "first", "install", "dependency", ".", "gptq", "paper", ",", "recommended", "quantized", "weight", "serving", ".", ">", "see", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "information", "gptq", "quantization", "." ], [ "pytorch backend pytorch backend , openllm support ` int8 ` , ` int4 ` , ` gptq ` .", "using int8 int4 quantization ` bitsandbytes ` , use following command : `` ` bash trust_remote_code=true openllm start microsoft/phi-2 -- quantize int8 `` ` run inference ` gptq ` , simply pas ` -- quantize gptq ` : `` ` bash openllm start thebloke/llama-2-7b-chat-gptq -- quantize gptq `` ` > [ ! note ] > order run gptq , make sure run ` pip install `` openllm [ gptq ] '' ` > first install dependency .", "gptq paper , recommended quantized weight serving .", "> see [ autogptq ] ( http : //github.com/panqiwei/autogptq ) information gptq quantization ." ] ]
[ [ "pytorch", "backend", "pytorch", "backend", ",", "openllm", "support", "`", "int8", "`", ",", "`", "int4", "`", ",", "`", "gptq", "`", ".", "using", "int8", "int4", "quantization", "`", "bitsandbytes", "`", ",", "use", "following", "command", ":", "``", "`", "bash", "trust_remote_code=true", "openllm", "start", "microsoft/phi-2", "--", "quantize", "int8", "``", "`", "run", "inference", "`", "gptq", "`", ",", "simply", "pas", "`", "--", "quantize", "gptq", "`", ":", "``", "`", "bash", "openllm", "start", "thebloke/llama-2-7b-chat-gptq", "--", "quantize", "gptq", "``", "`", ">", "[", "!", "note", "]", ">", "order", "run", "gptq", ",", "make", "sure", "run", "`", "pip", "install", "``", "openllm", "[", "gptq", "]", "''", "`", ">", "first", "install", "dependency", ".", "gptq", "paper", ",", "recommended", "quantized", "weight", "serving", ".", ">", "see", "[", "autogptq", "]", "(", "http", ":", "//github.com/panqiwei/autogptq", ")", "information", "gptq", "quantization", "." ], [ "pytorch backend pytorch backend , openllm support ` int8 ` , ` int4 ` , ` gptq ` .", "using int8 int4 quantization ` bitsandbytes ` , use following command : `` ` bash trust_remote_code=true openllm start microsoft/phi-2 -- quantize int8 `` ` run inference ` gptq ` , simply pas ` -- quantize gptq ` : `` ` bash openllm start thebloke/llama-2-7b-chat-gptq -- quantize gptq `` ` > [ ! note ] > order run gptq , make sure run ` pip install `` openllm [ gptq ] '' ` > first install dependency .", "gptq paper , recommended quantized weight serving .", "> see [ autogptq ] ( http : //github.com/panqiwei/autogptq ) information gptq quantization ." ] ]
PyTorch backend With PyTorch backend, OpenLLM supports `int8`, `int4`, and `gptq`. For using int8 and int4 quantization through `bitsandbytes`, you can use the following command: ```bash TRUST_REMOTE_CODE=True openllm start microsoft/phi-2 --quantize int8 ``` To run inference with `gptq`, simply pass `--quantize gptq`: ```bash openllm start TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq ``` > [!NOTE] > In order to run GPTQ, make sure you run `pip install "openllm[gptq]"` > first to install the dependency. From the GPTQ paper, it is recommended to quantized the weights before serving. > See [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) for more information on GPTQ quantization.
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "🐍", "python", "sdk", "llm", "instantiated", "`", "openllm.llm", "`", ":", "``", "`", "python", "import", "openllm", "llm", "=", "openllm.llm", "(", "'microsoft/phi-2", "'", ")", "``", "`", "main", "inference", "api", "streaming", "`", "generate_iterator", "`", "method", ":", "``", "`", "python", "async", "generation", "llm.generate_iterator", "(", "'what", "meaning", "life", "?", "'", ")", ":", "print", "(", "generation.outputs", "[", "0", "]", ".text", ")", "``", "`", ">", "[", "!", "note", "]", ">", "motivation", "behind", "making", "`", "llm.generate_iterator", "`", "async", "generator", "provide", "support", "continuous", "batching", "vllm", "backend", ".", "async", "endpoint", ",", "prompt", ">", "added", "correctly", "request", "queue", "process", "vllm", "backend", ".", "also", "_one-shot_", "`", "generate", "`", "method", ":", "``", "`", "python", "await", "llm.generate", "(", "'what", "meaning", "life", "?", "'", ")", "``", "`", "method", "easy", "use", "one-shot", "generation", "use", "case", ",", "merely", "served", "example", "use", "`", "llm.generate_iterator", "`", "us", "`", "generate_iterator", "`", "hood", ".", ">", "[", "!", "important", "]", ">", "need", "call", "code", "synchronous", "context", ",", "use", "`", "asyncio.run", "`", "wrap", "async", "function", ":", ">", ">", "``", "`", "python", ">", "import", "asyncio", ">", "async", "def", "generate", "(", "prompt", ",", "*", "*", "attrs", ")", ":", "return", "await", "llm.generate", "(", "prompt", ",", "*", "*", "attrs", ")", ">", "asyncio.run", "(", "generate", "(", "``", "meaning", "life", "''", ",", "temperature=0.23", ")", ")", ">", "``", "`" ], [ "🐍 python sdk llm instantiated ` openllm.llm ` : `` ` python import openllm llm = openllm.llm ( 'microsoft/phi-2 ' ) `` ` main inference api streaming ` generate_iterator ` method : `` ` python async generation llm.generate_iterator ( 'what meaning life ?", "' ) : print ( generation.outputs [ 0 ] .text ) `` ` > [ ! note ] > motivation behind making ` llm.generate_iterator ` async generator provide support continuous batching vllm backend .", "async endpoint , prompt > added correctly request queue process vllm backend .", "also _one-shot_ ` generate ` method : `` ` python await llm.generate ( 'what meaning life ? ' )", "`` ` method easy use one-shot generation use case , merely served example use ` llm.generate_iterator ` us ` generate_iterator ` hood .", "> [ ! important ] > need call code synchronous context , use ` asyncio.run ` wrap async function : > > `` ` python > import asyncio > async def generate ( prompt , * * attrs ) : return await llm.generate ( prompt , * * attrs ) > asyncio.run ( generate ( `` meaning life '' , temperature=0.23 ) ) > `` `" ] ]
[ [ "🐍", "python", "sdk", "llm", "instantiated", "`", "openllm.llm", "`", ":", "``", "`", "python", "import", "openllm", "llm", "=", "openllm.llm", "(", "'microsoft/phi-2", "'", ")", "``", "`", "main", "inference", "api", "streaming", "`", "generate_iterator", "`", "method", ":", "``", "`", "python", "async", "generation", "llm.generate_iterator", "(", "'what", "meaning", "life", "?", "'", ")", ":", "print", "(", "generation.outputs", "[", "0", "]", ".text", ")", "``", "`", ">", "[", "!", "note", "]", ">", "motivation", "behind", "making", "`", "llm.generate_iterator", "`", "async", "generator", "provide", "support", "continuous", "batching", "vllm", "backend", ".", "async", "endpoint", ",", "prompt", ">", "added", "correctly", "request", "queue", "process", "vllm", "backend", ".", "also", "_one-shot_", "`", "generate", "`", "method", ":", "``", "`", "python", "await", "llm.generate", "(", "'what", "meaning", "life", "?", "'", ")", "``", "`", "method", "easy", "use", "one-shot", "generation", "use", "case", ",", "merely", "served", "example", "use", "`", "llm.generate_iterator", "`", "us", "`", "generate_iterator", "`", "hood", ".", ">", "[", "!", "important", "]", ">", "need", "call", "code", "synchronous", "context", ",", "use", "`", "asyncio.run", "`", "wrap", "async", "function", ":", ">", ">", "``", "`", "python", ">", "import", "asyncio", ">", "async", "def", "generate", "(", "prompt", ",", "*", "*", "attrs", ")", ":", "return", "await", "llm.generate", "(", "prompt", ",", "*", "*", "attrs", ")", ">", "asyncio.run", "(", "generate", "(", "``", "meaning", "life", "''", ",", "temperature=0.23", ")", ")", ">", "``", "`" ], [ "🐍 python sdk llm instantiated ` openllm.llm ` : `` ` python import openllm llm = openllm.llm ( 'microsoft/phi-2 ' ) `` ` main inference api streaming ` generate_iterator ` method : `` ` python async generation llm.generate_iterator ( 'what meaning life ?", "' ) : print ( generation.outputs [ 0 ] .text ) `` ` > [ ! note ] > motivation behind making ` llm.generate_iterator ` async generator provide support continuous batching vllm backend .", "async endpoint , prompt > added correctly request queue process vllm backend .", "also _one-shot_ ` generate ` method : `` ` python await llm.generate ( 'what meaning life ? ' )", "`` ` method easy use one-shot generation use case , merely served example use ` llm.generate_iterator ` us ` generate_iterator ` hood .", "> [ ! important ] > need call code synchronous context , use ` asyncio.run ` wrap async function : > > `` ` python > import asyncio > async def generate ( prompt , * * attrs ) : return await llm.generate ( prompt , * * attrs ) > asyncio.run ( generate ( `` meaning life '' , temperature=0.23 ) ) > `` `" ] ]
🐍 Python SDK Each LLM can be instantiated with `openllm.LLM`: ```python import openllm llm = openllm.LLM('microsoft/phi-2') ``` The main inference API is the streaming `generate_iterator` method: ```python async for generation in llm.generate_iterator('What is the meaning of life?'): print(generation.outputs[0].text) ``` > [!NOTE] > The motivation behind making `llm.generate_iterator` an async generator is to provide support for Continuous batching with vLLM backend. By having the async endpoints, each prompt > will be added correctly to the request queue to process with vLLM backend. There is also a _one-shot_ `generate` method: ```python await llm.generate('What is the meaning of life?') ``` This method is easy to use for one-shot generation use case, but merely served as an example how to use `llm.generate_iterator` as it uses `generate_iterator` under the hood. > [!IMPORTANT] > If you need to call your code in a synchronous context, you can use `asyncio.run` that wraps an async function: > > ```python > import asyncio > async def generate(prompt, **attrs): return await llm.generate(prompt, **attrs) > asyncio.run(generate("The meaning of life is", temperature=0.23)) > ```
https://github.com/bentoml/OpenLLM
-1
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/bentoml/OpenLLM/main/README.md
[ [ "transformer", "agent", "openllm", "seamlessly", "integrates", "[", "transformer", "agent", "]", "(", "http", ":", "//huggingface.co/docs/transformers/transformers_agents", ")", ".", ">", "[", "!", "warning", "]", ">", "transformer", "agent", "still", "experimental", "stage", ".", ">", "recommended", "install", "openllm", "`", "pip", "install", "-r", "nightly-requirements.txt", "`", ">", "get", "latest", "api", "update", "huggingface", "agent", ".", "``", "`", "python", "import", "transformer", "agent", "=", "transformers.hfagent", "(", "'http", ":", "//localhost:3000/hf/agent", "'", ")" ], [ "transformer agent openllm seamlessly integrates [ transformer agent ] ( http : //huggingface.co/docs/transformers/transformers_agents ) .", "> [ ! warning ] > transformer agent still experimental stage .", "> recommended install openllm ` pip install -r nightly-requirements.txt ` > get latest api update huggingface agent .", "`` ` python import transformer agent = transformers.hfagent ( 'http : //localhost:3000/hf/agent ' )" ] ]
[ [ "transformer", "agent", "openllm", "seamlessly", "integrates", "[", "transformer", "agent", "]", "(", "http", ":", "//huggingface.co/docs/transformers/transformers_agents", ")", ".", ">", "[", "!", "warning", "]", ">", "transformer", "agent", "still", "experimental", "stage", ".", ">", "recommended", "install", "openllm", "`", "pip", "install", "-r", "nightly-requirements.txt", "`", ">", "get", "latest", "api", "update", "huggingface", "agent", ".", "``", "`", "python", "import", "transformer", "agent", "=", "transformers.hfagent", "(", "'http", ":", "//localhost:3000/hf/agent", "'", ")" ], [ "transformer agent openllm seamlessly integrates [ transformer agent ] ( http : //huggingface.co/docs/transformers/transformers_agents ) .", "> [ ! warning ] > transformer agent still experimental stage .", "> recommended install openllm ` pip install -r nightly-requirements.txt ` > get latest api update huggingface agent .", "`` ` python import transformer agent = transformers.hfagent ( 'http : //localhost:3000/hf/agent ' )" ] ]
Transformers Agents OpenLLM seamlessly integrates with [Transformers Agents](https://huggingface.co/docs/transformers/transformers_agents). > [!WARNING] > The Transformers Agent is still at an experimental stage. It is > recommended to install OpenLLM with `pip install -r nightly-requirements.txt` > to get the latest API update for HuggingFace agent. ```python import transformers agent = transformers.HfAgent('http://localhost:3000/hf/agent')
https://github.com/bentoml/OpenLLM
0
[ "ai", "bentoml", "falcon", "fine-tuning", "llama", "llama2", "llm", "llm-inference", "llm-ops", "llm-serving", "llmops", "mistral", "ml", "mlops", "model-inference", "mpt", "open-source-llm", "openllm", "stablelm", "vicuna" ]
https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md
[ [ "quick", "setup", "join", "<", "href=", "''", "http", ":", "//discord.gg/9geqrxmvye", "''", ">", "discord", "<", "/a", ">", "<", "/strong", ">", "message", "memgpt", "bot", "(", "`", "#", "memgpt", "`", "channel", ")", ".", "run", "following", "command", "(", "messaged", "``", "memgpt", "bot", "''", ")", ":", "*", "`", "/profile", "`", "(", "create", "profile", ")", "*", "`", "/key", "`", "(", "enter", "openai", "key", ")", "*", "`", "/create", "`", "(", "create", "memgpt", "chatbot", ")", "make", "sure", "privacy", "setting", "server", "open", "memgpt", "bot", "dm", ":", "\\", "memgpt", "→", "privacy", "setting", "→", "direct", "message", "set", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/dm_settings.png", "''", "alt=", "''", "set", "dm", "setting", "memgpt", "server", "open", "memgpt", "memgpt", "bot", "message", "''", "width=", "''", "400", "''", ">", "<", "/div", ">", "see", "full", "list", "available", "command", "enter", "`", "/", "`", "message", "box", ".", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/slash_commands.png", "''", "alt=", "''", "memgpt", "bot", "slash", "command", "''", "width=", "''", "400", "''", ">", "<", "/div", ">" ], [ "quick setup join < href= '' http : //discord.gg/9geqrxmvye '' > discord < /a > < /strong > message memgpt bot ( ` # memgpt ` channel ) .", "run following command ( messaged `` memgpt bot '' ) : * ` /profile ` ( create profile ) * ` /key ` ( enter openai key ) * ` /create ` ( create memgpt chatbot ) make sure privacy setting server open memgpt bot dm : \\ memgpt → privacy setting → direct message set < div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/dm_settings.png '' alt= '' set dm setting memgpt server open memgpt memgpt bot message '' width= '' 400 '' > < /div > see full list available command enter ` / ` message box .", "< div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/slash_commands.png '' alt= '' memgpt bot slash command '' width= '' 400 '' > < /div >" ] ]
[ [ "quick", "setup", "join", "<", "href=", "''", "http", ":", "//discord.gg/9geqrxmvye", "''", ">", "discord", "<", "/a", ">", "<", "/strong", ">", "message", "memgpt", "bot", "(", "`", "#", "memgpt", "`", "channel", ")", ".", "run", "following", "command", "(", "messaged", "``", "memgpt", "bot", "''", ")", ":", "*", "`", "/profile", "`", "(", "create", "profile", ")", "*", "`", "/key", "`", "(", "enter", "openai", "key", ")", "*", "`", "/create", "`", "(", "create", "memgpt", "chatbot", ")", "make", "sure", "privacy", "setting", "server", "open", "memgpt", "bot", "dm", ":", "\\", "memgpt", "→", "privacy", "setting", "→", "direct", "message", "set", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/dm_settings.png", "''", "alt=", "''", "set", "dm", "setting", "memgpt", "server", "open", "memgpt", "memgpt", "bot", "message", "''", "width=", "''", "400", "''", ">", "<", "/div", ">", "see", "full", "list", "available", "command", "enter", "`", "/", "`", "message", "box", ".", "<", "div", "align=", "''", "center", "''", ">", "<", "img", "src=", "''", "http", ":", "//research.memgpt.ai/assets/img/discord/slash_commands.png", "''", "alt=", "''", "memgpt", "bot", "slash", "command", "''", "width=", "''", "400", "''", ">", "<", "/div", ">" ], [ "quick setup join < href= '' http : //discord.gg/9geqrxmvye '' > discord < /a > < /strong > message memgpt bot ( ` # memgpt ` channel ) .", "run following command ( messaged `` memgpt bot '' ) : * ` /profile ` ( create profile ) * ` /key ` ( enter openai key ) * ` /create ` ( create memgpt chatbot ) make sure privacy setting server open memgpt bot dm : \\ memgpt → privacy setting → direct message set < div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/dm_settings.png '' alt= '' set dm setting memgpt server open memgpt memgpt bot message '' width= '' 400 '' > < /div > see full list available command enter ` / ` message box .", "< div align= '' center '' > < img src= '' http : //research.memgpt.ai/assets/img/discord/slash_commands.png '' alt= '' memgpt bot slash command '' width= '' 400 '' > < /div >" ] ]
Quick setup Join <a href="https://discord.gg/9GEQrxmVyE">Discord</a></strong> and message the MemGPT bot (in the `#memgpt` channel). Then run the following commands (messaged to "MemGPT Bot"): * `/profile` (to create your profile) * `/key` (to enter your OpenAI key) * `/create` (to create a MemGPT chatbot) Make sure your privacy settings on this server are open so that MemGPT Bot can DM you: \ MemGPT → Privacy Settings → Direct Messages set to ON <div align="center"> <img src="https://research.memgpt.ai/assets/img/discord/dm_settings.png" alt="set DMs settings on MemGPT server to be open in MemGPT so that MemGPT Bot can message you" width="400"> </div> You can see the full list of available commands when you enter `/` into the message box. <div align="center"> <img src="https://research.memgpt.ai/assets/img/discord/slash_commands.png" alt="MemGPT Bot slash commands" width="400"> </div>
https://github.com/cpacker/MemGPT
-1
[ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ]
https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md
[ [ "running", "memgpt", "locally", "install", "memgpt", ":", "``", "`", "sh", "pip", "install", "-u", "pymemgpt", "``", "`", ",", "run", "memgpt", "start", "chatting", "memgpt", "agent", ":", "``", "`", "sh", "memgpt", "run", "``", "`", "'re", "running", "memgpt", "first", "time", ",", "'ll", "see", "two", "quickstart", "option", ":", "1", ".", "*", "*", "openai", "*", "*", ":", "select", "'d", "like", "run", "memgpt", "openai", "model", "like", "gpt-4", "(", "requires", "openai", "api", "key", ")", "2", ".", "*", "*", "memgpt", "free", "endpoint", "*", "*", ":", "select", "'d", "like", "try", "memgpt", "top", "open", "llm", "free", "(", "currently", "variant", "mixtral", "8x7b", "!", ")", "neither", "option", "require", "llm", "running", "machine", ".", "'d", "like", "run", "memgpt", "custom", "llm", "setup", "(", "openai", "azure", ")", ",", "select", "*", "*", "*", "*", "proceed", "advanced", "setup", "." ], [ "running memgpt locally install memgpt : `` ` sh pip install -u pymemgpt `` ` , run memgpt start chatting memgpt agent : `` ` sh memgpt run `` ` 're running memgpt first time , 'll see two quickstart option : 1 .", "* * openai * * : select 'd like run memgpt openai model like gpt-4 ( requires openai api key ) 2 .", "* * memgpt free endpoint * * : select 'd like try memgpt top open llm free ( currently variant mixtral 8x7b ! )", "neither option require llm running machine .", "'d like run memgpt custom llm setup ( openai azure ) , select * * * * proceed advanced setup ." ] ]
[ [ "running", "memgpt", "locally", "install", "memgpt", ":", "``", "`", "sh", "pip", "install", "-u", "pymemgpt", "``", "`", ",", "run", "memgpt", "start", "chatting", "memgpt", "agent", ":", "``", "`", "sh", "memgpt", "run", "``", "`", "'re", "running", "memgpt", "first", "time", ",", "'ll", "see", "two", "quickstart", "option", ":", "1", ".", "*", "*", "openai", "*", "*", ":", "select", "'d", "like", "run", "memgpt", "openai", "model", "like", "gpt-4", "(", "requires", "openai", "api", "key", ")", "2", ".", "*", "*", "memgpt", "free", "endpoint", "*", "*", ":", "select", "'d", "like", "try", "memgpt", "top", "open", "llm", "free", "(", "currently", "variant", "mixtral", "8x7b", "!", ")", "neither", "option", "require", "llm", "running", "machine", ".", "'d", "like", "run", "memgpt", "custom", "llm", "setup", "(", "openai", "azure", ")", ",", "select", "*", "*", "*", "*", "proceed", "advanced", "setup", "." ], [ "running memgpt locally install memgpt : `` ` sh pip install -u pymemgpt `` ` , run memgpt start chatting memgpt agent : `` ` sh memgpt run `` ` 're running memgpt first time , 'll see two quickstart option : 1 .", "* * openai * * : select 'd like run memgpt openai model like gpt-4 ( requires openai api key ) 2 .", "* * memgpt free endpoint * * : select 'd like try memgpt top open llm free ( currently variant mixtral 8x7b ! )", "neither option require llm running machine .", "'d like run memgpt custom llm setup ( openai azure ) , select * * * * proceed advanced setup ." ] ]
Running MemGPT locally Install MemGPT: ```sh pip install -U pymemgpt ``` Now, you can run MemGPT and start chatting with a MemGPT agent with: ```sh memgpt run ``` If you're running MemGPT for the first time, you'll see two quickstart options: 1. **OpenAI**: select this if you'd like to run MemGPT with OpenAI models like GPT-4 (requires an OpenAI API key) 2. **MemGPT Free Endpoint**: select this if you'd like to try MemGPT on a top open LLM for free (currently variants of Mixtral 8x7b!) Neither of these options require you to have an LLM running on your own machine. If you'd like to run MemGPT with your custom LLM setup (or on OpenAI Azure), select **Other** to proceed to the advanced setup.
https://github.com/cpacker/MemGPT
0
[ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ]
https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md
[ [ "advanced", "setup", "reconfigure", "memgpt", "'s", "default", "setting", "running", ":", "``", "`", "sh", "memgpt", "configure", "``", "`" ], [ "advanced setup reconfigure memgpt 's default setting running : `` ` sh memgpt configure `` `" ] ]
[ [ "advanced", "setup", "reconfigure", "memgpt", "'s", "default", "setting", "running", ":", "``", "`", "sh", "memgpt", "configure", "``", "`" ], [ "advanced setup reconfigure memgpt 's default setting running : `` ` sh memgpt configure `` `" ] ]
Advanced setup You can reconfigure MemGPT's default settings by running: ```sh memgpt configure ```
https://github.com/cpacker/MemGPT
-1
[ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ]
https://raw.githubusercontent.com/cpacker/MemGPT/main/README.md
[ [ "installing", "source", "install", "memgpt", "source", ",", "start", "cloning", "repo", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "``", "`", "navigate", "main", "`", "memgpt", "`", "directory", ",", ":", "``", "`", "sh", "pip", "install", "-e", ".", "``", "`", ",", "able", "run", "`", "memgpt", "`", "command-line", "using", "downloaded", "source", "code", ".", "dependency", "issue", "using", "`", "pip", "install", "-e", ".", "`", ",", "recommend", "install", "package", "using", "poetry", "(", "see", ")", ".", "installing", "memgpt", "source", "using", "poetry", "ensure", "using", "exact", "package", "version", "tested", "production", "build", ".", "<", "detail", ">", "<", "summary", ">", "<", "strong", ">", "installing", "source", "(", "using", "poetry", ")", "<", "/strong", ">", "<", "/summary", ">", "first", ",", "install", "poetry", "using", "[", "official", "instruction", "]", "(", "http", ":", "//python-poetry.org/docs/", "#", "installing-with-the-official-installer", ")", ".", ",", "install", "memgpt", "source", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "poetry", "shell", "poetry", "install", "``", "`", "<", "/details", ">" ], [ "installing source install memgpt source , start cloning repo : `` ` sh git clone git @ github.com : cpacker/memgpt.git `` ` navigate main ` memgpt ` directory , : `` ` sh pip install -e .", "`` ` , able run ` memgpt ` command-line using downloaded source code .", "dependency issue using ` pip install -e . ` , recommend install package using poetry ( see ) .", "installing memgpt source using poetry ensure using exact package version tested production build .", "< detail > < summary > < strong > installing source ( using poetry ) < /strong > < /summary > first , install poetry using [ official instruction ] ( http : //python-poetry.org/docs/ # installing-with-the-official-installer ) .", ", install memgpt source : `` ` sh git clone git @ github.com : cpacker/memgpt.git poetry shell poetry install `` ` < /details >" ] ]
[ [ "installing", "source", "install", "memgpt", "source", ",", "start", "cloning", "repo", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "``", "`", "navigate", "main", "`", "memgpt", "`", "directory", ",", ":", "``", "`", "sh", "pip", "install", "-e", ".", "``", "`", ",", "able", "run", "`", "memgpt", "`", "command-line", "using", "downloaded", "source", "code", ".", "dependency", "issue", "using", "`", "pip", "install", "-e", ".", "`", ",", "recommend", "install", "package", "using", "poetry", "(", "see", ")", ".", "installing", "memgpt", "source", "using", "poetry", "ensure", "using", "exact", "package", "version", "tested", "production", "build", ".", "<", "detail", ">", "<", "summary", ">", "<", "strong", ">", "installing", "source", "(", "using", "poetry", ")", "<", "/strong", ">", "<", "/summary", ">", "first", ",", "install", "poetry", "using", "[", "official", "instruction", "]", "(", "http", ":", "//python-poetry.org/docs/", "#", "installing-with-the-official-installer", ")", ".", ",", "install", "memgpt", "source", ":", "``", "`", "sh", "git", "clone", "git", "@", "github.com", ":", "cpacker/memgpt.git", "poetry", "shell", "poetry", "install", "``", "`", "<", "/details", ">" ], [ "installing source install memgpt source , start cloning repo : `` ` sh git clone git @ github.com : cpacker/memgpt.git `` ` navigate main ` memgpt ` directory , : `` ` sh pip install -e .", "`` ` , able run ` memgpt ` command-line using downloaded source code .", "dependency issue using ` pip install -e . ` , recommend install package using poetry ( see ) .", "installing memgpt source using poetry ensure using exact package version tested production build .", "< detail > < summary > < strong > installing source ( using poetry ) < /strong > < /summary > first , install poetry using [ official instruction ] ( http : //python-poetry.org/docs/ # installing-with-the-official-installer ) .", ", install memgpt source : `` ` sh git clone git @ github.com : cpacker/memgpt.git poetry shell poetry install `` ` < /details >" ] ]
Installing from source To install MemGPT from source, start by cloning the repo: ```sh git clone git@github.com:cpacker/MemGPT.git ``` Then navigate to the main `MemGPT` directory, and do: ```sh pip install -e . ``` Now, you should be able to run `memgpt` from the command-line using the downloaded source code. If you are having dependency issues using `pip install -e .`, we recommend you install the package using Poetry (see below). Installing MemGPT from source using Poetry will ensure that you are using exact package versions that have been tested for the production build. <details> <summary> <strong>Installing from source (using Poetry)</strong> </summary> First, install Poetry using [the official instructions here](https://python-poetry.org/docs/#installing-with-the-official-installer). Then, you can install MemGPT from source with: ```sh git clone git@github.com:cpacker/MemGPT.git poetry shell poetry install ``` </details>
https://github.com/cpacker/MemGPT
0
[ "chat", "chatbot", "gpt", "gpt-4", "llm", "llm-agent" ]
https://raw.githubusercontent.com/embedchain/embedchain/main/README.md
[ [ "🔧", "quick", "install" ], [ "🔧 quick install" ] ]
[ [ "🔧", "quick", "install" ], [ "🔧 quick install" ] ]
🔧 Quick install
https://github.com/embedchain/embedchain
-1
[ "ai", "application", "chatbots", "chatgpt", "embeddings", "llm", "python", "rag", "vector-database" ]
https://raw.githubusercontent.com/embedchain/embedchain/main/README.md
[ [ "python", "api", "``", "`", "bash", "pip", "install", "embedchain", "``", "`" ], [ "python api `` ` bash pip install embedchain `` `" ] ]
[ [ "python", "api", "``", "`", "bash", "pip", "install", "embedchain", "``", "`" ], [ "python api `` ` bash pip install embedchain `` `" ] ]
Python API ```bash pip install embedchain ```
https://github.com/embedchain/embedchain
0
[ "ai", "application", "chatbots", "chatgpt", "embeddings", "llm", "python", "rag", "vector-database" ]
https://raw.githubusercontent.com/embedchain/embedchain/main/README.md
[ [ "📖", "documentation", "comprehensive", "guide", "api", "documentation", "available", "help", "get", "embedchain", ":", "-", "[", "introduction", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/introduction", "#", "what-is-embedchain", ")", "-", "[", "getting", "started", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/quickstart", ")", "-", "[", "example", "]", "(", "http", ":", "//docs.embedchain.ai/examples", ")", "-", "[", "supported", "data", "type", "]", "(", "http", ":", "//docs.embedchain.ai/components/data-sources/overview", ")" ], [ "📖 documentation comprehensive guide api documentation available help get embedchain : - [ introduction ] ( http : //docs.embedchain.ai/get-started/introduction # what-is-embedchain ) - [ getting started ] ( http : //docs.embedchain.ai/get-started/quickstart ) - [ example ] ( http : //docs.embedchain.ai/examples ) - [ supported data type ] ( http : //docs.embedchain.ai/components/data-sources/overview )" ] ]
[ [ "📖", "documentation", "comprehensive", "guide", "api", "documentation", "available", "help", "get", "embedchain", ":", "-", "[", "introduction", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/introduction", "#", "what-is-embedchain", ")", "-", "[", "getting", "started", "]", "(", "http", ":", "//docs.embedchain.ai/get-started/quickstart", ")", "-", "[", "example", "]", "(", "http", ":", "//docs.embedchain.ai/examples", ")", "-", "[", "supported", "data", "type", "]", "(", "http", ":", "//docs.embedchain.ai/components/data-sources/overview", ")" ], [ "📖 documentation comprehensive guide api documentation available help get embedchain : - [ introduction ] ( http : //docs.embedchain.ai/get-started/introduction # what-is-embedchain ) - [ getting started ] ( http : //docs.embedchain.ai/get-started/quickstart ) - [ example ] ( http : //docs.embedchain.ai/examples ) - [ supported data type ] ( http : //docs.embedchain.ai/components/data-sources/overview )" ] ]
📖 Documentation Comprehensive guides and API documentation are available to help you get the most out of Embedchain: - [Introduction](https://docs.embedchain.ai/get-started/introduction#what-is-embedchain) - [Getting Started](https://docs.embedchain.ai/get-started/quickstart) - [Examples](https://docs.embedchain.ai/examples) - [Supported data types](https://docs.embedchain.ai/components/data-sources/overview)
https://github.com/embedchain/embedchain
-1
[ "ai", "application", "chatbots", "chatgpt", "embeddings", "llm", "python", "rag", "vector-database" ]
https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md
[ [ "🚀", "install", "deep", "lake", "deep", "lake", "installed", "using", "pip", ":", "``", "`", "sh", "pip3", "install", "deeplake", "``", "`", "*", "*", "default", ",", "deep", "lake", "install", "dependency", "audio", ",", "video", ",", "google-cloud", ",", "feature", ".", "detail", "installation", "option", "[", "available", "]", "(", "http", ":", "//docs.deeplake.ai/en/latest/installation.html", ")", ".", "*", "*" ], [ "🚀 install deep lake deep lake installed using pip : `` ` sh pip3 install deeplake `` ` * * default , deep lake install dependency audio , video , google-cloud , feature .", "detail installation option [ available ] ( http : //docs.deeplake.ai/en/latest/installation.html ) .", "* *" ] ]
[ [ "🚀", "install", "deep", "lake", "deep", "lake", "installed", "using", "pip", ":", "``", "`", "sh", "pip3", "install", "deeplake", "``", "`", "*", "*", "default", ",", "deep", "lake", "install", "dependency", "audio", ",", "video", ",", "google-cloud", ",", "feature", ".", "detail", "installation", "option", "[", "available", "]", "(", "http", ":", "//docs.deeplake.ai/en/latest/installation.html", ")", ".", "*", "*" ], [ "🚀 install deep lake deep lake installed using pip : `` ` sh pip3 install deeplake `` ` * * default , deep lake install dependency audio , video , google-cloud , feature .", "detail installation option [ available ] ( http : //docs.deeplake.ai/en/latest/installation.html ) .", "* *" ] ]
🚀 How to install Deep Lake Deep Lake can be installed using pip: ```sh pip3 install deeplake ``` **By default, Deep Lake does not install dependencies for audio, video, google-cloud, and other features. Details on all installation options are [available here](https://docs.deeplake.ai/en/latest/Installation.html).**
https://github.com/activeloopai/deeplake
-1
[ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ]
https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md
[ [ "-", "[", "vector", "store", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/vector-store", ")" ], [ "- [ vector store getting started guide ] ( http : //docs.activeloop.ai/getting-started/vector-store )" ] ]
[ [ "-", "[", "vector", "store", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/vector-store", ")" ], [ "- [ vector store getting started guide ] ( http : //docs.activeloop.ai/getting-started/vector-store )" ] ]
- [Vector Store Getting Started Guide](https://docs.activeloop.ai/getting-started/vector-store)
https://github.com/activeloopai/deeplake
-1
[ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ]
https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md
[ [ "-", "[", "deep", "learning", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/deep-learning", ")" ], [ "- [ deep learning getting started guide ] ( http : //docs.activeloop.ai/getting-started/deep-learning )" ] ]
[ [ "-", "[", "deep", "learning", "getting", "started", "guide", "]", "(", "http", ":", "//docs.activeloop.ai/getting-started/deep-learning", ")" ], [ "- [ deep learning getting started guide ] ( http : //docs.activeloop.ai/getting-started/deep-learning )" ] ]
- [Deep Learning Getting Started Guide](https://docs.activeloop.ai/getting-started/deep-learning)
https://github.com/activeloopai/deeplake
-1
[ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ]
https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md
[ [ "📚", "documentation", "getting", "started", "guide", ",", "example", ",", "tutorial", ",", "api", "reference", ",", "useful", "information", "found", "[", "documentation", "page", "]", "(", "http", ":", "//docs.activeloop.ai/", "?", "utm_source=github", "&", "utm_medium=repo", "&", "utm_campaign=readme", ")", "." ], [ "📚 documentation getting started guide , example , tutorial , api reference , useful information found [ documentation page ] ( http : //docs.activeloop.ai/ ? utm_source=github & utm_medium=repo & utm_campaign=readme ) ." ] ]
[ [ "📚", "documentation", "getting", "started", "guide", ",", "example", ",", "tutorial", ",", "api", "reference", ",", "useful", "information", "found", "[", "documentation", "page", "]", "(", "http", ":", "//docs.activeloop.ai/", "?", "utm_source=github", "&", "utm_medium=repo", "&", "utm_campaign=readme", ")", "." ], [ "📚 documentation getting started guide , example , tutorial , api reference , useful information found [ documentation page ] ( http : //docs.activeloop.ai/ ? utm_source=github & utm_medium=repo & utm_campaign=readme ) ." ] ]
📚 Documentation Getting started guides, examples, tutorials, API reference, and other useful information can be found on our [documentation page](http://docs.activeloop.ai/?utm_source=github&utm_medium=repo&utm_campaign=readme).
https://github.com/activeloopai/deeplake
-1
[ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ]
https://raw.githubusercontent.com/activeloopai/deeplake/main/README.md
[ [ "disclaimer", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "dataset", "license", "<", "/b", ">", "<", "/summary", ">", "deep", "lake", "user", "may", "access", "variety", "publicly", "available", "datasets", ".", "host", "distribute", "datasets", ",", "vouch", "quality", "fairness", ",", "claim", "license", "use", "datasets", ".", "responsibility", "determine", "whether", "permission", "use", "datasets", "license", ".", "'re", "dataset", "owner", "want", "dataset", "included", "library", ",", "please", "get", "touch", "[", "github", "issue", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/issues/new", ")", ".", "thank", "contribution", "ml", "community", "!", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "usage", "tracking", "<", "/b", ">", "<", "/summary", ">", "default", ",", "collect", "usage", "data", "using", "bugout", "(", "'s", "[", "code", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py", "#", "l24", ")", ")", ".", "collect", "user", "data", "anonymized", "ip", "address", "data", ",", "log", "deep", "lake", "library", "'s", "action", ".", "help", "team", "understand", "tool", "used", "build", "feature", "matter", "!", "register", "activeloop", ",", "data", "longer", "anonymous", ".", "always", "opt-out", "reporting", "setting", "environmental", "variable", "``", "`", "bugger_off", "``", "`", "``", "`", "true", "``", "`", ":", "<", "/details", ">" ], [ "disclaimer < detail > < summary > < b > dataset license < /b > < /summary > deep lake user may access variety publicly available datasets .", "host distribute datasets , vouch quality fairness , claim license use datasets .", "responsibility determine whether permission use datasets license .", "'re dataset owner want dataset included library , please get touch [ github issue ] ( http : //github.com/activeloopai/deeplake/issues/new ) .", "thank contribution ml community !", "< /details > < detail > < summary > < b > usage tracking < /b > < /summary > default , collect usage data using bugout ( 's [ code ] ( http : //github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py # l24 ) ) .", "collect user data anonymized ip address data , log deep lake library 's action .", "help team understand tool used build feature matter !", "register activeloop , data longer anonymous .", "always opt-out reporting setting environmental variable `` ` bugger_off `` ` `` ` true `` ` : < /details >" ] ]
[ [ "disclaimer", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "dataset", "license", "<", "/b", ">", "<", "/summary", ">", "deep", "lake", "user", "may", "access", "variety", "publicly", "available", "datasets", ".", "host", "distribute", "datasets", ",", "vouch", "quality", "fairness", ",", "claim", "license", "use", "datasets", ".", "responsibility", "determine", "whether", "permission", "use", "datasets", "license", ".", "'re", "dataset", "owner", "want", "dataset", "included", "library", ",", "please", "get", "touch", "[", "github", "issue", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/issues/new", ")", ".", "thank", "contribution", "ml", "community", "!", "<", "/details", ">", "<", "detail", ">", "<", "summary", ">", "<", "b", ">", "usage", "tracking", "<", "/b", ">", "<", "/summary", ">", "default", ",", "collect", "usage", "data", "using", "bugout", "(", "'s", "[", "code", "]", "(", "http", ":", "//github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py", "#", "l24", ")", ")", ".", "collect", "user", "data", "anonymized", "ip", "address", "data", ",", "log", "deep", "lake", "library", "'s", "action", ".", "help", "team", "understand", "tool", "used", "build", "feature", "matter", "!", "register", "activeloop", ",", "data", "longer", "anonymous", ".", "always", "opt-out", "reporting", "setting", "environmental", "variable", "``", "`", "bugger_off", "``", "`", "``", "`", "true", "``", "`", ":", "<", "/details", ">" ], [ "disclaimer < detail > < summary > < b > dataset license < /b > < /summary > deep lake user may access variety publicly available datasets .", "host distribute datasets , vouch quality fairness , claim license use datasets .", "responsibility determine whether permission use datasets license .", "'re dataset owner want dataset included library , please get touch [ github issue ] ( http : //github.com/activeloopai/deeplake/issues/new ) .", "thank contribution ml community !", "< /details > < detail > < summary > < b > usage tracking < /b > < /summary > default , collect usage data using bugout ( 's [ code ] ( http : //github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py # l24 ) ) .", "collect user data anonymized ip address data , log deep lake library 's action .", "help team understand tool used build feature matter !", "register activeloop , data longer anonymous .", "always opt-out reporting setting environmental variable `` ` bugger_off `` ` `` ` true `` ` : < /details >" ] ]
Disclaimers <details> <summary><b> Dataset Licenses</b></summary> Deep Lake users may have access to a variety of publicly available datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have a license to use the datasets. It is your responsibility to determine whether you have permission to use the datasets under their license. If you're a dataset owner and do not want your dataset to be included in this library, please get in touch through a [GitHub issue](https://github.com/activeloopai/deeplake/issues/new). Thank you for your contribution to the ML community! </details> <details> <summary><b> Usage Tracking</b></summary> By default, we collect usage data using Bugout (here's the [code](https://github.com/activeloopai/deeplake/blob/853456a314b4fb5623c936c825601097b0685119/deeplake/__init__.py#L24) that does it). It does not collect user data other than anonymized IP address data, and it only logs the Deep Lake library's own actions. This helps our team understand how the tool is used and how to build features that matter to you! After you register with Activeloop, data is no longer anonymous. You can always opt-out of reporting by setting an environmental variable ```BUGGER_OFF``` to ```True```: </details>
https://github.com/activeloopai/deeplake
-1
[ "ai", "computer-vision", "cv", "data-science", "data-version-control", "datalake", "datasets", "deep-learning", "image-processing", "langchain", "large-language-models", "llm", "machine-learning", "ml", "mlops", "python", "pytorch", "tensorflow", "vector-database", "vector-search" ]
https://raw.githubusercontent.com/microsoft/TypeChat/main/README.md
[ [ "getting", "started", "install", "typechat", ":", "``", "`", "npm", "install", "typechat", "``", "`", "also", "build", "typechat", "source", ":", "``", "`", "git", "clone", "http", ":", "//github.com/microsoft/typechat", "cd", "typechat", "npm", "run", "build", "``", "`", "see", "typechat", "action", ",", "recommend", "exploring", "[", "typechat", "example", "project", "]", "(", "./examples", ")", ".", "try", "local", "machine", "github", "codespace", ".", "learn", "typechat", ",", "visit", "[", "documentation", "]", "(", "http", ":", "//microsoft.github.io/typechat", ")", "includes", "information", "typechat", "get", "started", "." ], [ "getting started install typechat : `` ` npm install typechat `` ` also build typechat source : `` ` git clone http : //github.com/microsoft/typechat cd typechat npm run build `` ` see typechat action , recommend exploring [ typechat example project ] ( ./examples ) .", "try local machine github codespace .", "learn typechat , visit [ documentation ] ( http : //microsoft.github.io/typechat ) includes information typechat get started ." ] ]
[ [ "getting", "started", "install", "typechat", ":", "``", "`", "npm", "install", "typechat", "``", "`", "also", "build", "typechat", "source", ":", "``", "`", "git", "clone", "http", ":", "//github.com/microsoft/typechat", "cd", "typechat", "npm", "run", "build", "``", "`", "see", "typechat", "action", ",", "recommend", "exploring", "[", "typechat", "example", "project", "]", "(", "./examples", ")", ".", "try", "local", "machine", "github", "codespace", ".", "learn", "typechat", ",", "visit", "[", "documentation", "]", "(", "http", ":", "//microsoft.github.io/typechat", ")", "includes", "information", "typechat", "get", "started", "." ], [ "getting started install typechat : `` ` npm install typechat `` ` also build typechat source : `` ` git clone http : //github.com/microsoft/typechat cd typechat npm run build `` ` see typechat action , recommend exploring [ typechat example project ] ( ./examples ) .", "try local machine github codespace .", "learn typechat , visit [ documentation ] ( http : //microsoft.github.io/typechat ) includes information typechat get started ." ] ]
Getting Started Install TypeChat: ``` npm install typechat ``` You can also build TypeChat from source: ``` git clone https://github.com/microsoft/TypeChat cd TypeChat npm run build ``` To see TypeChat in action, we recommend exploring the [TypeChat example projects](./examples). You can try them on your local machine or in a GitHub Codespace. To learn more about TypeChat, visit the [documentation](https://microsoft.github.io/TypeChat) which includes more information on TypeChat and how to get started.
https://github.com/microsoft/TypeChat
2
[ "ai", "llm", "natural-language", "types" ]
https://raw.githubusercontent.com/mistralai/mistral-src/main/README.md
[ [ "installation", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`" ], [ "installation `` ` pip install -r requirements.txt `` `" ] ]
[ [ "installation", "``", "`", "pip", "install", "-r", "requirements.txt", "``", "`" ], [ "installation `` ` pip install -r requirements.txt `` `" ] ]
Installation ``` pip install -r requirements.txt ```
https://github.com/mistralai/mistral-src
0
[ "llm", "llm-inference", "mistralai" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "llama", "2", "fine-tuning", "/", "inference", "recipe", ",", "example", ",", "benchmark", "demo", "apps", "*", "*", "[", "update", "feb.", "5", ",", "2024", "]", "added", "support", "code", "llama", "70b", "instruct", "example", "[", "inference", "script", "]", "(", "./examples/code_llama/code_instruct_example.py", ")", ".", "detail", "formatting", "prompt", "code", "llama", "70b", "instruct", "model", "please", "refer", "[", "document", "]", "(", "./docs/inference.md", ")", "*", "*", ".", "*", "*", "[", "update", "dec.", "28", ",", "2023", "]", "added", "support", "llama", "guard", "safety", "checker", "example", "inference", "script", "also", "standalone", "inference", "example", "script", "prompt", "formatting", ".", "detail", "[", "]", "(", "./examples/llama_guard/readme.md", ")", ".", "detail", "formatting", "data", "fine", "tuning", "llama", "guard", ",", "provide", "script", "sample", "usage", "[", "]", "(", "./src/llama_recipes/data/llama_guard/readme.md", ")", ".", "*", "*", "*", "*", "[", "update", "dec", "14", ",", "2023", "]", "recently", "released", "series", "llama", "2", "demo", "apps", "[", "]", "(", "./demo_apps", ")", ".", "apps", "show", "run", "llama", "(", "locally", ",", "cloud", ",", "on-prem", ")", ",", "use", "azure", "llama", "2", "api", "(", "model-as-a-service", ")", ",", "ask", "llama", "question", "general", "custom", "data", "(", "pdf", ",", "db", ",", "live", ")", ",", "integrate", "llama", "whatsapp", "messenger", ",", "implement", "end-to-end", "chatbot", "rag", "(", "retrieval", "augmented", "generation", ")", ".", "*", "*", "'llama-recipes", "'", "repository", "companion", "[", "llama", "2", "model", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", ".", "goal", "repository", "provide", "example", "quickly", "get", "started", "fine-tuning", "domain", "adaptation", "run", "inference", "fine-tuned", "model", ".", "ease", "use", ",", "example", "use", "hugging", "face", "converted", "version", "model", ".", "see", "step", "conversion", "model", "[", "]", "(", "#", "model-conversion-to-hugging-face", ")", ".", "addition", ",", "also", "provide", "number", "demo", "apps", ",", "showcase", "llama", "2", "usage", "along", "ecosystem", "solution", "run", "llama", "2", "locally", ",", "cloud", ",", "on-prem", ".", "llama", "2", "new", "technology", "carry", "potential", "risk", "use", ".", "testing", "conducted", "date", "—", "could", "—", "cover", "scenario", ".", "order", "help", "developer", "address", "risk", ",", "created", "[", "responsible", "use", "guide", "]", "(", "http", ":", "//github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf", ")", ".", "detail", "found", "research", "paper", "well", ".", "downloading", "model", ",", "follow", "instruction", "[", "llama", "2", "repo", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", "." ], [ "llama 2 fine-tuning / inference recipe , example , benchmark demo apps * * [ update feb. 5 , 2024 ] added support code llama 70b instruct example [ inference script ] ( ./examples/code_llama/code_instruct_example.py ) .", "detail formatting prompt code llama 70b instruct model please refer [ document ] ( ./docs/inference.md ) * * .", "* * [ update dec. 28 , 2023 ] added support llama guard safety checker example inference script also standalone inference example script prompt formatting .", "detail [ ] ( ./examples/llama_guard/readme.md ) .", "detail formatting data fine tuning llama guard , provide script sample usage [ ] ( ./src/llama_recipes/data/llama_guard/readme.md ) .", "* * * * [ update dec 14 , 2023 ] recently released series llama 2 demo apps [ ] ( ./demo_apps ) .", "apps show run llama ( locally , cloud , on-prem ) , use azure llama 2 api ( model-as-a-service ) , ask llama question general custom data ( pdf , db , live ) , integrate llama whatsapp messenger , implement end-to-end chatbot rag ( retrieval augmented generation ) .", "* * 'llama-recipes ' repository companion [ llama 2 model ] ( http : //github.com/facebookresearch/llama ) .", "goal repository provide example quickly get started fine-tuning domain adaptation run inference fine-tuned model .", "ease use , example use hugging face converted version model .", "see step conversion model [ ] ( # model-conversion-to-hugging-face ) .", "addition , also provide number demo apps , showcase llama 2 usage along ecosystem solution run llama 2 locally , cloud , on-prem .", "llama 2 new technology carry potential risk use .", "testing conducted date — could — cover scenario .", "order help developer address risk , created [ responsible use guide ] ( http : //github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf ) .", "detail found research paper well .", "downloading model , follow instruction [ llama 2 repo ] ( http : //github.com/facebookresearch/llama ) ." ] ]
[ [ "llama", "2", "fine-tuning", "/", "inference", "recipe", ",", "example", ",", "benchmark", "demo", "apps", "*", "*", "[", "update", "feb.", "5", ",", "2024", "]", "added", "support", "code", "llama", "70b", "instruct", "example", "[", "inference", "script", "]", "(", "./examples/code_llama/code_instruct_example.py", ")", ".", "detail", "formatting", "prompt", "code", "llama", "70b", "instruct", "model", "please", "refer", "[", "document", "]", "(", "./docs/inference.md", ")", "*", "*", ".", "*", "*", "[", "update", "dec.", "28", ",", "2023", "]", "added", "support", "llama", "guard", "safety", "checker", "example", "inference", "script", "also", "standalone", "inference", "example", "script", "prompt", "formatting", ".", "detail", "[", "]", "(", "./examples/llama_guard/readme.md", ")", ".", "detail", "formatting", "data", "fine", "tuning", "llama", "guard", ",", "provide", "script", "sample", "usage", "[", "]", "(", "./src/llama_recipes/data/llama_guard/readme.md", ")", ".", "*", "*", "*", "*", "[", "update", "dec", "14", ",", "2023", "]", "recently", "released", "series", "llama", "2", "demo", "apps", "[", "]", "(", "./demo_apps", ")", ".", "apps", "show", "run", "llama", "(", "locally", ",", "cloud", ",", "on-prem", ")", ",", "use", "azure", "llama", "2", "api", "(", "model-as-a-service", ")", ",", "ask", "llama", "question", "general", "custom", "data", "(", "pdf", ",", "db", ",", "live", ")", ",", "integrate", "llama", "whatsapp", "messenger", ",", "implement", "end-to-end", "chatbot", "rag", "(", "retrieval", "augmented", "generation", ")", ".", "*", "*", "'llama-recipes", "'", "repository", "companion", "[", "llama", "2", "model", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", ".", "goal", "repository", "provide", "example", "quickly", "get", "started", "fine-tuning", "domain", "adaptation", "run", "inference", "fine-tuned", "model", ".", "ease", "use", ",", "example", "use", "hugging", "face", "converted", "version", "model", ".", "see", "step", "conversion", "model", "[", "]", "(", "#", "model-conversion-to-hugging-face", ")", ".", "addition", ",", "also", "provide", "number", "demo", "apps", ",", "showcase", "llama", "2", "usage", "along", "ecosystem", "solution", "run", "llama", "2", "locally", ",", "cloud", ",", "on-prem", ".", "llama", "2", "new", "technology", "carry", "potential", "risk", "use", ".", "testing", "conducted", "date", "—", "could", "—", "cover", "scenario", ".", "order", "help", "developer", "address", "risk", ",", "created", "[", "responsible", "use", "guide", "]", "(", "http", ":", "//github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf", ")", ".", "detail", "found", "research", "paper", "well", ".", "downloading", "model", ",", "follow", "instruction", "[", "llama", "2", "repo", "]", "(", "http", ":", "//github.com/facebookresearch/llama", ")", "." ], [ "llama 2 fine-tuning / inference recipe , example , benchmark demo apps * * [ update feb. 5 , 2024 ] added support code llama 70b instruct example [ inference script ] ( ./examples/code_llama/code_instruct_example.py ) .", "detail formatting prompt code llama 70b instruct model please refer [ document ] ( ./docs/inference.md ) * * .", "* * [ update dec. 28 , 2023 ] added support llama guard safety checker example inference script also standalone inference example script prompt formatting .", "detail [ ] ( ./examples/llama_guard/readme.md ) .", "detail formatting data fine tuning llama guard , provide script sample usage [ ] ( ./src/llama_recipes/data/llama_guard/readme.md ) .", "* * * * [ update dec 14 , 2023 ] recently released series llama 2 demo apps [ ] ( ./demo_apps ) .", "apps show run llama ( locally , cloud , on-prem ) , use azure llama 2 api ( model-as-a-service ) , ask llama question general custom data ( pdf , db , live ) , integrate llama whatsapp messenger , implement end-to-end chatbot rag ( retrieval augmented generation ) .", "* * 'llama-recipes ' repository companion [ llama 2 model ] ( http : //github.com/facebookresearch/llama ) .", "goal repository provide example quickly get started fine-tuning domain adaptation run inference fine-tuned model .", "ease use , example use hugging face converted version model .", "see step conversion model [ ] ( # model-conversion-to-hugging-face ) .", "addition , also provide number demo apps , showcase llama 2 usage along ecosystem solution run llama 2 locally , cloud , on-prem .", "llama 2 new technology carry potential risk use .", "testing conducted date — could — cover scenario .", "order help developer address risk , created [ responsible use guide ] ( http : //github.com/facebookresearch/llama/blob/main/responsible-use-guide.pdf ) .", "detail found research paper well .", "downloading model , follow instruction [ llama 2 repo ] ( http : //github.com/facebookresearch/llama ) ." ] ]
Llama 2 Fine-tuning / Inference Recipes, Examples, Benchmarks and Demo Apps **[Update Feb. 5, 2024] We added support for Code Llama 70B instruct in our example [inference script](./examples/code_llama/code_instruct_example.py). For details on formatting the prompt for Code Llama 70B instruct model please refer to [this document](./docs/inference.md)**. **[Update Dec. 28, 2023] We added support for Llama Guard as a safety checker for our example inference script and also with standalone inference with an example script and prompt formatting. More details [here](./examples/llama_guard/README.md). For details on formatting data for fine tuning Llama Guard, we provide a script and sample usage [here](./src/llama_recipes/data/llama_guard/README.md).** **[Update Dec 14, 2023] We recently released a series of Llama 2 demo apps [here](./demo_apps). These apps show how to run Llama (locally, in the cloud, or on-prem), how to use Azure Llama 2 API (Model-as-a-Service), how to ask Llama questions in general or about custom data (PDF, DB, or live), how to integrate Llama with WhatsApp and Messenger, and how to implement an end-to-end chatbot with RAG (Retrieval Augmented Generation).** The 'llama-recipes' repository is a companion to the [Llama 2 model](https://github.com/facebookresearch/llama). The goal of this repository is to provide examples to quickly get started with fine-tuning for domain adaptation and how to run inference for the fine-tuned models. For ease of use, the examples use Hugging Face converted versions of the models. See steps for conversion of the model [here](#model-conversion-to-hugging-face). In addition, we also provide a number of demo apps, to showcase the Llama 2 usage along with other ecosystem solutions to run Llama 2 locally, in the cloud, and on-prem. Llama 2 is a new technology that carries potential risks with use. Testing conducted to date has not — and could not — cover all scenarios. In order to help developers address these risks, we have created the [Responsible Use Guide](https://github.com/facebookresearch/llama/blob/main/Responsible-Use-Guide.pdf). More details can be found in our research paper as well. For downloading the models, follow the instructions on [Llama 2 repo](https://github.com/facebookresearch/llama).
https://github.com/facebookresearch/llama-recipes
-1
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "table", "content", "1", ".", "[", "quick", "start", "]", "(", "#", "quick-start", ")", "2", ".", "[", "model", "conversion", "]", "(", "#", "model-conversion-to-hugging-face", ")", "3", ".", "[", "fine-tuning", "]", "(", "#", "fine-tuning", ")", "-", "[", "single", "gpu", "]", "(", "#", "single-gpu", ")", "-", "[", "multi", "gpu", "one", "node", "]", "(", "#", "multiple-gpus-one-node", ")", "-", "[", "multi", "gpu", "multi", "node", "]", "(", "#", "multi-gpu-multi-node", ")", "4", ".", "[", "inference", "]", "(", "./docs/inference.md", ")", "5", ".", "[", "demo", "apps", "]", "(", "#", "demo-apps", ")", "6", ".", "[", "repository", "organization", "]", "(", "#", "repository-organization", ")", "7", ".", "[", "license", "acceptable", "use", "policy", "]", "(", "#", "license", ")" ], [ "table content 1 .", "[ quick start ] ( # quick-start ) 2 .", "[ model conversion ] ( # model-conversion-to-hugging-face ) 3 .", "[ fine-tuning ] ( # fine-tuning ) - [ single gpu ] ( # single-gpu ) - [ multi gpu one node ] ( # multiple-gpus-one-node ) - [ multi gpu multi node ] ( # multi-gpu-multi-node ) 4 .", "[ inference ] ( ./docs/inference.md ) 5 .", "[ demo apps ] ( # demo-apps ) 6 .", "[ repository organization ] ( # repository-organization ) 7 .", "[ license acceptable use policy ] ( # license )" ] ]
[ [ "table", "content", "1", ".", "[", "quick", "start", "]", "(", "#", "quick-start", ")", "2", ".", "[", "model", "conversion", "]", "(", "#", "model-conversion-to-hugging-face", ")", "3", ".", "[", "fine-tuning", "]", "(", "#", "fine-tuning", ")", "-", "[", "single", "gpu", "]", "(", "#", "single-gpu", ")", "-", "[", "multi", "gpu", "one", "node", "]", "(", "#", "multiple-gpus-one-node", ")", "-", "[", "multi", "gpu", "multi", "node", "]", "(", "#", "multi-gpu-multi-node", ")", "4", ".", "[", "inference", "]", "(", "./docs/inference.md", ")", "5", ".", "[", "demo", "apps", "]", "(", "#", "demo-apps", ")", "6", ".", "[", "repository", "organization", "]", "(", "#", "repository-organization", ")", "7", ".", "[", "license", "acceptable", "use", "policy", "]", "(", "#", "license", ")" ], [ "table content 1 .", "[ quick start ] ( # quick-start ) 2 .", "[ model conversion ] ( # model-conversion-to-hugging-face ) 3 .", "[ fine-tuning ] ( # fine-tuning ) - [ single gpu ] ( # single-gpu ) - [ multi gpu one node ] ( # multiple-gpus-one-node ) - [ multi gpu multi node ] ( # multi-gpu-multi-node ) 4 .", "[ inference ] ( ./docs/inference.md ) 5 .", "[ demo apps ] ( # demo-apps ) 6 .", "[ repository organization ] ( # repository-organization ) 7 .", "[ license acceptable use policy ] ( # license )" ] ]
Table of Contents 1. [Quick start](#quick-start) 2. [Model Conversion](#model-conversion-to-hugging-face) 3. [Fine-tuning](#fine-tuning) - [Single GPU](#single-gpu) - [Multi GPU One Node](#multiple-gpus-one-node) - [Multi GPU Multi Node](#multi-gpu-multi-node) 4. [Inference](./docs/inference.md) 5. [Demo Apps](#demo-apps) 6. [Repository Organization](#repository-organization) 7. [License and Acceptable Use Policy](#license)
https://github.com/facebookresearch/llama-recipes
-1
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "quick", "start", "[", "llama", "2", "jupyter", "notebook", "]", "(", "./examples/quickstart.ipynb", ")", ":", "jupyter", "notebook", "step", "finetune", "llama", "2", "model", "text", "summarization", "task", "using", "[", "samsum", "]", "(", "http", ":", "//huggingface.co/datasets/samsum", ")", ".", "notebook", "us", "parameter", "efficient", "finetuning", "(", "peft", ")", "int8", "quantization", "finetune", "7b", "single", "gpu", "like", "a10", "24gb", "gpu", "memory", "." ], [ "quick start [ llama 2 jupyter notebook ] ( ./examples/quickstart.ipynb ) : jupyter notebook step finetune llama 2 model text summarization task using [ samsum ] ( http : //huggingface.co/datasets/samsum ) .", "notebook us parameter efficient finetuning ( peft ) int8 quantization finetune 7b single gpu like a10 24gb gpu memory ." ] ]
[ [ "quick", "start", "[", "llama", "2", "jupyter", "notebook", "]", "(", "./examples/quickstart.ipynb", ")", ":", "jupyter", "notebook", "step", "finetune", "llama", "2", "model", "text", "summarization", "task", "using", "[", "samsum", "]", "(", "http", ":", "//huggingface.co/datasets/samsum", ")", ".", "notebook", "us", "parameter", "efficient", "finetuning", "(", "peft", ")", "int8", "quantization", "finetune", "7b", "single", "gpu", "like", "a10", "24gb", "gpu", "memory", "." ], [ "quick start [ llama 2 jupyter notebook ] ( ./examples/quickstart.ipynb ) : jupyter notebook step finetune llama 2 model text summarization task using [ samsum ] ( http : //huggingface.co/datasets/samsum ) .", "notebook us parameter efficient finetuning ( peft ) int8 quantization finetune 7b single gpu like a10 24gb gpu memory ." ] ]
Quick Start [Llama 2 Jupyter Notebook](./examples/quickstart.ipynb): This jupyter notebook steps you through how to finetune a Llama 2 model on the text summarization task using the [samsum](https://huggingface.co/datasets/samsum). The notebook uses parameter efficient finetuning (PEFT) and int8 quantization to finetune a 7B on a single GPU like an A10 with 24GB gpu memory.
https://github.com/facebookresearch/llama-recipes
-1
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "installation", "llama-recipes", "provides", "pip", "distribution", "easy", "install", "usage", "project", ".", "alternatively", ",", "installed", "source", "." ], [ "installation llama-recipes provides pip distribution easy install usage project .", "alternatively , installed source ." ] ]
[ [ "installation", "llama-recipes", "provides", "pip", "distribution", "easy", "install", "usage", "project", ".", "alternatively", ",", "installed", "source", "." ], [ "installation llama-recipes provides pip distribution easy install usage project .", "alternatively , installed source ." ] ]
Installation Llama-recipes provides a pip distribution for easy install and usage in other projects. Alternatively, it can be installed from source.
https://github.com/facebookresearch/llama-recipes
2
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "install", "pip", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "``", "`" ], [ "install pip `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes `` `" ] ]
[ [ "install", "pip", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "``", "`" ], [ "install pip `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes `` `" ] ]
Install with pip ``` pip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes ```
https://github.com/facebookresearch/llama-recipes
0
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "install", "optional", "dependency", "llama-recipes", "offer", "installation", "optional", "package", ".", "three", "optional", "dependency", "group", ".", "run", "unit", "test", "install", "required", "dependency", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "test", "]", "``", "`", "vllm", "example", "need", "additional", "requirement", "installed", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "vllm", "]", "``", "`", "use", "sensitive", "topic", "safety", "checker", "install", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "auditnlg", "]", "``", "`", "optional", "dependency", "also", "combine", "[", "option1", ",", "option2", "]", "." ], [ "install optional dependency llama-recipes offer installation optional package .", "three optional dependency group .", "run unit test install required dependency : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ test ] `` ` vllm example need additional requirement installed : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ vllm ] `` ` use sensitive topic safety checker install : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ auditnlg ] `` ` optional dependency also combine [ option1 , option2 ] ." ] ]
[ [ "install", "optional", "dependency", "llama-recipes", "offer", "installation", "optional", "package", ".", "three", "optional", "dependency", "group", ".", "run", "unit", "test", "install", "required", "dependency", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "test", "]", "``", "`", "vllm", "example", "need", "additional", "requirement", "installed", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "vllm", "]", "``", "`", "use", "sensitive", "topic", "safety", "checker", "install", ":", "``", "`", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "llama-recipes", "[", "auditnlg", "]", "``", "`", "optional", "dependency", "also", "combine", "[", "option1", ",", "option2", "]", "." ], [ "install optional dependency llama-recipes offer installation optional package .", "three optional dependency group .", "run unit test install required dependency : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ test ] `` ` vllm example need additional requirement installed : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ vllm ] `` ` use sensitive topic safety checker install : `` ` pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 llama-recipes [ auditnlg ] `` ` optional dependency also combine [ option1 , option2 ] ." ] ]
Install with optional dependencies Llama-recipes offers the installation of optional packages. There are three optional dependency groups. To run the unit tests we can install the required dependencies with: ``` pip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes[tests] ``` For the vLLM example we need additional requirements that can be installed with: ``` pip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes[vllm] ``` To use the sensitive topics safety checker install with: ``` pip install --extra-index-url https://download.pytorch.org/whl/test/cu118 llama-recipes[auditnlg] ``` Optional dependencies can also be combines with [option1,option2].
https://github.com/facebookresearch/llama-recipes
0
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "install", "source", "install", "source", "e.g", ".", "development", "use", "command", ".", "'re", "using", "hatchling", "build", "backend", "requires", "up-to-date", "pip", "well", "setuptools", "package", ".", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "``", "`", "development", "contributing", "llama-recipes", "please", "install", "optional", "dependency", ":", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "[", "test", ",", "auditnlg", ",", "vllm", "]", "``", "`", "⚠️", "*", "*", "note", "*", "*", "⚠️", "feature", "(", "especially", "fine-tuning", "fsdp", "+", "peft", ")", "currently", "require", "pytorch", "nightlies", "installed", ".", "please", "make", "sure", "install", "nightlies", "'re", "using", "feature", "following", "[", "guide", "]", "(", "http", ":", "//pytorch.org/get-started/locally/", ")", ".", "*", "*", "note", "*", "*", "setting", "defined", "[", "config", "file", "]", "(", "src/llama_recipes/configs/", ")", "passed", "args", "cli", "running", "script", ",", "need", "change", "config", "file", "directly", ".", "*", "*", "depth", "information", "checkout", "following", ":", "*", "*", "*", "[", "single", "gpu", "fine-tuning", "]", "(", "./docs/single_gpu.md", ")", "*", "[", "multi-gpu", "fine-tuning", "]", "(", "./docs/multi_gpu.md", ")", "*", "[", "llm", "fine-tuning", "]", "(", "./docs/llm_finetuning.md", ")", "*", "[", "adding", "custom", "datasets", "]", "(", "./docs/dataset.md", ")", "*", "[", "inference", "]", "(", "./docs/inference.md", ")", "*", "[", "evaluation", "harness", "]", "(", "./eval/readme.md", ")", "*", "[", "faq", "]", "(", "./docs/faq.md", ")" ], [ "install source install source e.g .", "development use command .", "'re using hatchling build backend requires up-to-date pip well setuptools package .", "`` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "`` ` development contributing llama-recipes please install optional dependency : `` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "[ test , auditnlg , vllm ] `` ` ⚠️ * * note * * ⚠️ feature ( especially fine-tuning fsdp + peft ) currently require pytorch nightlies installed .", "please make sure install nightlies 're using feature following [ guide ] ( http : //pytorch.org/get-started/locally/ ) .", "* * note * * setting defined [ config file ] ( src/llama_recipes/configs/ ) passed args cli running script , need change config file directly .", "* * depth information checkout following : * * * [ single gpu fine-tuning ] ( ./docs/single_gpu.md ) * [ multi-gpu fine-tuning ] ( ./docs/multi_gpu.md ) * [ llm fine-tuning ] ( ./docs/llm_finetuning.md ) * [ adding custom datasets ] ( ./docs/dataset.md ) * [ inference ] ( ./docs/inference.md ) * [ evaluation harness ] ( ./eval/readme.md ) * [ faq ] ( ./docs/faq.md )" ] ]
[ [ "install", "source", "install", "source", "e.g", ".", "development", "use", "command", ".", "'re", "using", "hatchling", "build", "backend", "requires", "up-to-date", "pip", "well", "setuptools", "package", ".", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "``", "`", "development", "contributing", "llama-recipes", "please", "install", "optional", "dependency", ":", "``", "`", "git", "clone", "git", "@", "github.com", ":", "facebookresearch/llama-recipes.git", "cd", "llama-recipes", "pip", "install", "-u", "pip", "setuptools", "pip", "install", "--", "extra-index-url", "http", ":", "//download.pytorch.org/whl/test/cu118", "-e", ".", "[", "test", ",", "auditnlg", ",", "vllm", "]", "``", "`", "⚠️", "*", "*", "note", "*", "*", "⚠️", "feature", "(", "especially", "fine-tuning", "fsdp", "+", "peft", ")", "currently", "require", "pytorch", "nightlies", "installed", ".", "please", "make", "sure", "install", "nightlies", "'re", "using", "feature", "following", "[", "guide", "]", "(", "http", ":", "//pytorch.org/get-started/locally/", ")", ".", "*", "*", "note", "*", "*", "setting", "defined", "[", "config", "file", "]", "(", "src/llama_recipes/configs/", ")", "passed", "args", "cli", "running", "script", ",", "need", "change", "config", "file", "directly", ".", "*", "*", "depth", "information", "checkout", "following", ":", "*", "*", "*", "[", "single", "gpu", "fine-tuning", "]", "(", "./docs/single_gpu.md", ")", "*", "[", "multi-gpu", "fine-tuning", "]", "(", "./docs/multi_gpu.md", ")", "*", "[", "llm", "fine-tuning", "]", "(", "./docs/llm_finetuning.md", ")", "*", "[", "adding", "custom", "datasets", "]", "(", "./docs/dataset.md", ")", "*", "[", "inference", "]", "(", "./docs/inference.md", ")", "*", "[", "evaluation", "harness", "]", "(", "./eval/readme.md", ")", "*", "[", "faq", "]", "(", "./docs/faq.md", ")" ], [ "install source install source e.g .", "development use command .", "'re using hatchling build backend requires up-to-date pip well setuptools package .", "`` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "`` ` development contributing llama-recipes please install optional dependency : `` ` git clone git @ github.com : facebookresearch/llama-recipes.git cd llama-recipes pip install -u pip setuptools pip install -- extra-index-url http : //download.pytorch.org/whl/test/cu118 -e .", "[ test , auditnlg , vllm ] `` ` ⚠️ * * note * * ⚠️ feature ( especially fine-tuning fsdp + peft ) currently require pytorch nightlies installed .", "please make sure install nightlies 're using feature following [ guide ] ( http : //pytorch.org/get-started/locally/ ) .", "* * note * * setting defined [ config file ] ( src/llama_recipes/configs/ ) passed args cli running script , need change config file directly .", "* * depth information checkout following : * * * [ single gpu fine-tuning ] ( ./docs/single_gpu.md ) * [ multi-gpu fine-tuning ] ( ./docs/multi_gpu.md ) * [ llm fine-tuning ] ( ./docs/llm_finetuning.md ) * [ adding custom datasets ] ( ./docs/dataset.md ) * [ inference ] ( ./docs/inference.md ) * [ evaluation harness ] ( ./eval/readme.md ) * [ faq ] ( ./docs/faq.md )" ] ]
Install from source To install from source e.g. for development use these commands. We're using hatchling as our build backend which requires an up-to-date pip as well as setuptools package. ``` git clone git@github.com:facebookresearch/llama-recipes.git cd llama-recipes pip install -U pip setuptools pip install --extra-index-url https://download.pytorch.org/whl/test/cu118 -e . ``` For development and contributing to llama-recipes please install all optional dependencies: ``` git clone git@github.com:facebookresearch/llama-recipes.git cd llama-recipes pip install -U pip setuptools pip install --extra-index-url https://download.pytorch.org/whl/test/cu118 -e .[tests,auditnlg,vllm] ``` ⚠️ **Note** ⚠️ Some features (especially fine-tuning with FSDP + PEFT) currently require PyTorch nightlies to be installed. Please make sure to install the nightlies if you're using these features following [this guide](https://pytorch.org/get-started/locally/). **Note** All the setting defined in [config files](src/llama_recipes/configs/) can be passed as args through CLI when running the script, there is no need to change from config files directly. **For more in depth information checkout the following:** * [Single GPU Fine-tuning](./docs/single_gpu.md) * [Multi-GPU Fine-tuning](./docs/multi_gpu.md) * [LLM Fine-tuning](./docs/LLM_finetuning.md) * [Adding custom datasets](./docs/Dataset.md) * [Inference](./docs/inference.md) * [Evaluation Harness](./eval/README.md) * [FAQs](./docs/FAQ.md)
https://github.com/facebookresearch/llama-recipes
0
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "model", "conversion", "hugging", "face", "recipe", "notebook", "folder", "using", "llama", "2", "model", "definition", "provided", "hugging", "face", "'s", "transformer", "library", ".", "given", "original", "checkpoint", "resides", "models/7b", "install", "requirement", "convert", "checkpoint", ":", "``", "`", "bash" ], [ "model conversion hugging face recipe notebook folder using llama 2 model definition provided hugging face 's transformer library .", "given original checkpoint resides models/7b install requirement convert checkpoint : `` ` bash" ] ]
[ [ "model", "conversion", "hugging", "face", "recipe", "notebook", "folder", "using", "llama", "2", "model", "definition", "provided", "hugging", "face", "'s", "transformer", "library", ".", "given", "original", "checkpoint", "resides", "models/7b", "install", "requirement", "convert", "checkpoint", ":", "``", "`", "bash" ], [ "model conversion hugging face recipe notebook folder using llama 2 model definition provided hugging face 's transformer library .", "given original checkpoint resides models/7b install requirement convert checkpoint : `` ` bash" ] ]
Model conversion to Hugging Face The recipes and notebooks in this folder are using the Llama 2 model definition provided by Hugging Face's transformers library. Given that the original checkpoint resides under models/7B you can install all requirements and convert the checkpoint with: ```bash
https://github.com/facebookresearch/llama-recipes
-1
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "install", "hugging", "face", "transformer", "source", "pip", "freeze", "|", "grep", "transformer" ], [ "install hugging face transformer source pip freeze | grep transformer" ] ]
[ [ "install", "hugging", "face", "transformer", "source", "pip", "freeze", "|", "grep", "transformer" ], [ "install hugging face transformer source pip freeze | grep transformer" ] ]
Install Hugging Face Transformers from source pip freeze | grep transformers
https://github.com/facebookresearch/llama-recipes
2
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "verify", "version", "4.31.0", "higher", "git", "clone", "git", "@", "github.com", ":", "huggingface/transformers.git", "cd", "transformer", "pip", "install", "protobuf", "python", "src/transformers/models/llama/convert_llama_weights_to_hf.py", "\\", "--", "input_dir", "/path/to/downloaded/llama/weights", "--", "model_size", "7b", "--", "output_dir", "/output/path", "``", "`" ], [ "verify version 4.31.0 higher git clone git @ github.com : huggingface/transformers.git cd transformer pip install protobuf python src/transformers/models/llama/convert_llama_weights_to_hf.py \\ -- input_dir /path/to/downloaded/llama/weights -- model_size 7b -- output_dir /output/path `` `" ] ]
[ [ "verify", "version", "4.31.0", "higher", "git", "clone", "git", "@", "github.com", ":", "huggingface/transformers.git", "cd", "transformer", "pip", "install", "protobuf", "python", "src/transformers/models/llama/convert_llama_weights_to_hf.py", "\\", "--", "input_dir", "/path/to/downloaded/llama/weights", "--", "model_size", "7b", "--", "output_dir", "/output/path", "``", "`" ], [ "verify version 4.31.0 higher git clone git @ github.com : huggingface/transformers.git cd transformer pip install protobuf python src/transformers/models/llama/convert_llama_weights_to_hf.py \\ -- input_dir /path/to/downloaded/llama/weights -- model_size 7b -- output_dir /output/path `` `" ] ]
verify it is version 4.31.0 or higher git clone git@github.com:huggingface/transformers.git cd transformers pip install protobuf python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ```
https://github.com/facebookresearch/llama-recipes
0
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "single", "multi", "gpu", "finetune", "want", "dive", "right", "single", "multi", "gpu", "fine-tuning", ",", "run", "example", "single", "gpu", "like", "a10", ",", "t4", ",", "v100", ",", "a100", "etc", ".", "parameter", "example", "recipe", "need", "tuned", "desired", "result", "based", "model", ",", "method", ",", "data", "task", "hand", ".", "*", "*", "note", ":", "*", "*", "*", "change", "dataset", "command", "pas", "`", "dataset", "`", "arg", ".", "current", "option", "integrated", "dataset", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "`", "samsum_dataset", "`", ".", "additionally", ",", "integrate", "openassistant/oasst1", "dataset", "[", "example", "custom", "dataset", "]", "(", "./examples/custom_dataset.py", ")", ".", "description", "use", "dataset", "add", "custom", "datasets", "found", "[", "dataset.md", "]", "(", "./docs/dataset.md", "#", "using-custom-datasets", ")", ".", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "please", "make", "sure", "use", "suggested", "instruction", "[", "]", "(", "./docs/single_gpu.md", "#", "how-to-run-with-different-datasets", ")", "set", ".", "*", "default", "dataset", "lora", "config", "set", "`", "samsum_dataset", "`", ".", "*", "make", "sure", "set", "right", "path", "model", "[", "training", "config", "]", "(", "src/llama_recipes/configs/training.py", ")", ".", "*", "save", "loss", "perplexity", "metric", "evaluation", ",", "enable", "passing", "`", "--", "save_metrics", "`", "finetuning", "script", ".", "file", "plotted", "using", "[", "plot_metrics.py", "]", "(", "./examples/plot_metrics.py", ")", "script", ",", "`", "python", "examples/plot_metrics.py", "--", "file_path", "path/to/metrics.json", "`" ], [ "single multi gpu finetune want dive right single multi gpu fine-tuning , run example single gpu like a10 , t4 , v100 , a100 etc .", "parameter example recipe need tuned desired result based model , method , data task hand .", "* * note : * * * change dataset command pas ` dataset ` arg .", "current option integrated dataset ` grammar_dataset ` , ` alpaca_dataset ` ` samsum_dataset ` .", "additionally , integrate openassistant/oasst1 dataset [ example custom dataset ] ( ./examples/custom_dataset.py ) .", "description use dataset add custom datasets found [ dataset.md ] ( ./docs/dataset.md # using-custom-datasets ) .", "` grammar_dataset ` , ` alpaca_dataset ` please make sure use suggested instruction [ ] ( ./docs/single_gpu.md # how-to-run-with-different-datasets ) set .", "* default dataset lora config set ` samsum_dataset ` .", "* make sure set right path model [ training config ] ( src/llama_recipes/configs/training.py ) .", "* save loss perplexity metric evaluation , enable passing ` -- save_metrics ` finetuning script .", "file plotted using [ plot_metrics.py ] ( ./examples/plot_metrics.py ) script , ` python examples/plot_metrics.py -- file_path path/to/metrics.json `" ] ]
[ [ "single", "multi", "gpu", "finetune", "want", "dive", "right", "single", "multi", "gpu", "fine-tuning", ",", "run", "example", "single", "gpu", "like", "a10", ",", "t4", ",", "v100", ",", "a100", "etc", ".", "parameter", "example", "recipe", "need", "tuned", "desired", "result", "based", "model", ",", "method", ",", "data", "task", "hand", ".", "*", "*", "note", ":", "*", "*", "*", "change", "dataset", "command", "pas", "`", "dataset", "`", "arg", ".", "current", "option", "integrated", "dataset", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "`", "samsum_dataset", "`", ".", "additionally", ",", "integrate", "openassistant/oasst1", "dataset", "[", "example", "custom", "dataset", "]", "(", "./examples/custom_dataset.py", ")", ".", "description", "use", "dataset", "add", "custom", "datasets", "found", "[", "dataset.md", "]", "(", "./docs/dataset.md", "#", "using-custom-datasets", ")", ".", "`", "grammar_dataset", "`", ",", "`", "alpaca_dataset", "`", "please", "make", "sure", "use", "suggested", "instruction", "[", "]", "(", "./docs/single_gpu.md", "#", "how-to-run-with-different-datasets", ")", "set", ".", "*", "default", "dataset", "lora", "config", "set", "`", "samsum_dataset", "`", ".", "*", "make", "sure", "set", "right", "path", "model", "[", "training", "config", "]", "(", "src/llama_recipes/configs/training.py", ")", ".", "*", "save", "loss", "perplexity", "metric", "evaluation", ",", "enable", "passing", "`", "--", "save_metrics", "`", "finetuning", "script", ".", "file", "plotted", "using", "[", "plot_metrics.py", "]", "(", "./examples/plot_metrics.py", ")", "script", ",", "`", "python", "examples/plot_metrics.py", "--", "file_path", "path/to/metrics.json", "`" ], [ "single multi gpu finetune want dive right single multi gpu fine-tuning , run example single gpu like a10 , t4 , v100 , a100 etc .", "parameter example recipe need tuned desired result based model , method , data task hand .", "* * note : * * * change dataset command pas ` dataset ` arg .", "current option integrated dataset ` grammar_dataset ` , ` alpaca_dataset ` ` samsum_dataset ` .", "additionally , integrate openassistant/oasst1 dataset [ example custom dataset ] ( ./examples/custom_dataset.py ) .", "description use dataset add custom datasets found [ dataset.md ] ( ./docs/dataset.md # using-custom-datasets ) .", "` grammar_dataset ` , ` alpaca_dataset ` please make sure use suggested instruction [ ] ( ./docs/single_gpu.md # how-to-run-with-different-datasets ) set .", "* default dataset lora config set ` samsum_dataset ` .", "* make sure set right path model [ training config ] ( src/llama_recipes/configs/training.py ) .", "* save loss perplexity metric evaluation , enable passing ` -- save_metrics ` finetuning script .", "file plotted using [ plot_metrics.py ] ( ./examples/plot_metrics.py ) script , ` python examples/plot_metrics.py -- file_path path/to/metrics.json `" ] ]
Single and Multi GPU Finetune If you want to dive right into single or multi GPU fine-tuning, run the examples below on a single GPU like A10, T4, V100, A100 etc. All the parameters in the examples and recipes below need to be further tuned to have desired results based on the model, method, data and task at hand. **Note:** * To change the dataset in the commands below pass the `dataset` arg. Current options for integrated dataset are `grammar_dataset`, `alpaca_dataset`and `samsum_dataset`. Additionally, we integrate the OpenAssistant/oasst1 dataset as an [example for a custom dataset](./examples/custom_dataset.py). A description of how to use your own dataset and how to add custom datasets can be found in [Dataset.md](./docs/Dataset.md#using-custom-datasets). For `grammar_dataset`, `alpaca_dataset` please make sure you use the suggested instructions from [here](./docs/single_gpu.md#how-to-run-with-different-datasets) to set them up. * Default dataset and other LORA config has been set to `samsum_dataset`. * Make sure to set the right path to the model in the [training config](src/llama_recipes/configs/training.py). * To save the loss and perplexity metrics for evaluation, enable this by passing `--save_metrics` to the finetuning script. The file can be plotted using the [plot_metrics.py](./examples/plot_metrics.py) script, `python examples/plot_metrics.py --file_path path/to/metrics.json`
https://github.com/facebookresearch/llama-recipes
-1
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/facebookresearch/llama-recipes/main/README.md
[ [ "repository", "organization", "repository", "organized", "following", "way", ":", "[", "benchmark", "]", "(", "./benchmarks", ")", ":", "contains", "series", "benchmark", "script", "llama", "2", "model", "inference", "various", "backends", ".", "[", "configs", "]", "(", "src/llama_recipes/configs/", ")", ":", "contains", "configuration", "file", "peft", "method", ",", "fsdp", ",", "datasets", ".", "[", "doc", "]", "(", "docs/", ")", ":", "example", "recipe", "single", "multi-gpu", "fine-tuning", "recipe", ".", "[", "datasets", "]", "(", "src/llama_recipes/datasets/", ")", ":", "contains", "individual", "script", "dataset", "download", "process", ".", "note", ":", "use", "datasets", "compliance", "dataset", "'s", "underlying", "license", "(", "including", "limited", "non-commercial", "us", ")", "[", "demo_apps", "]", "(", "./demo_apps", ")", ":", "contains", "series", "llama2-powered", "apps", ",", "quickstart", "deployment", "ask", "llama", "question", "unstructured", "data", ",", "structured", "data", ",", "live", "data", ",", "video", "summary", ".", "[", "example", "]", "(", "./examples/", ")", ":", "contains", "example", "script", "finetuning", "inference", "llama", "2", "model", "well", "use", "safely", ".", "[", "inference", "]", "(", "src/llama_recipes/inference/", ")", ":", "includes", "module", "inference", "fine-tuned", "model", ".", "[", "model_checkpointing", "]", "(", "src/llama_recipes/model_checkpointing/", ")", ":", "contains", "fsdp", "checkpoint", "handler", ".", "[", "policy", "]", "(", "src/llama_recipes/policies/", ")", ":", "contains", "fsdp", "script", "provide", "different", "policy", ",", "mixed", "precision", ",", "transformer", "wrapping", "policy", "activation", "checkpointing", "along", "precision", "optimizer", "(", "used", "running", "fsdp", "pure", "bf16", "mode", ")", ".", "[", "utils", "]", "(", "src/llama_recipes/utils/", ")", ":", "utility", "file", ":", "-", "`", "train_utils.py", "`", "provides", "training/eval", "loop", "train", "utils", ".", "-", "`", "dataset_utils.py", "`", "get", "preprocessed", "datasets", ".", "-", "`", "config_utils.py", "`", "override", "configs", "received", "cli", ".", "-", "`", "fsdp_utils.py", "`", "provides", "fsdp", "wrapping", "policy", "peft", "method", ".", "-", "`", "memory_utils.py", "`", "context", "manager", "track", "different", "memory", "stats", "train", "loop", "." ], [ "repository organization repository organized following way : [ benchmark ] ( ./benchmarks ) : contains series benchmark script llama 2 model inference various backends .", "[ configs ] ( src/llama_recipes/configs/ ) : contains configuration file peft method , fsdp , datasets .", "[ doc ] ( docs/ ) : example recipe single multi-gpu fine-tuning recipe .", "[ datasets ] ( src/llama_recipes/datasets/ ) : contains individual script dataset download process .", "note : use datasets compliance dataset 's underlying license ( including limited non-commercial us ) [ demo_apps ] ( ./demo_apps ) : contains series llama2-powered apps , quickstart deployment ask llama question unstructured data , structured data , live data , video summary .", "[ example ] ( ./examples/ ) : contains example script finetuning inference llama 2 model well use safely .", "[ inference ] ( src/llama_recipes/inference/ ) : includes module inference fine-tuned model .", "[ model_checkpointing ] ( src/llama_recipes/model_checkpointing/ ) : contains fsdp checkpoint handler .", "[ policy ] ( src/llama_recipes/policies/ ) : contains fsdp script provide different policy , mixed precision , transformer wrapping policy activation checkpointing along precision optimizer ( used running fsdp pure bf16 mode ) .", "[ utils ] ( src/llama_recipes/utils/ ) : utility file : - ` train_utils.py ` provides training/eval loop train utils .", "- ` dataset_utils.py ` get preprocessed datasets .", "- ` config_utils.py ` override configs received cli .", "- ` fsdp_utils.py ` provides fsdp wrapping policy peft method .", "- ` memory_utils.py ` context manager track different memory stats train loop ." ] ]
[ [ "repository", "organization", "repository", "organized", "following", "way", ":", "[", "benchmark", "]", "(", "./benchmarks", ")", ":", "contains", "series", "benchmark", "script", "llama", "2", "model", "inference", "various", "backends", ".", "[", "configs", "]", "(", "src/llama_recipes/configs/", ")", ":", "contains", "configuration", "file", "peft", "method", ",", "fsdp", ",", "datasets", ".", "[", "doc", "]", "(", "docs/", ")", ":", "example", "recipe", "single", "multi-gpu", "fine-tuning", "recipe", ".", "[", "datasets", "]", "(", "src/llama_recipes/datasets/", ")", ":", "contains", "individual", "script", "dataset", "download", "process", ".", "note", ":", "use", "datasets", "compliance", "dataset", "'s", "underlying", "license", "(", "including", "limited", "non-commercial", "us", ")", "[", "demo_apps", "]", "(", "./demo_apps", ")", ":", "contains", "series", "llama2-powered", "apps", ",", "quickstart", "deployment", "ask", "llama", "question", "unstructured", "data", ",", "structured", "data", ",", "live", "data", ",", "video", "summary", ".", "[", "example", "]", "(", "./examples/", ")", ":", "contains", "example", "script", "finetuning", "inference", "llama", "2", "model", "well", "use", "safely", ".", "[", "inference", "]", "(", "src/llama_recipes/inference/", ")", ":", "includes", "module", "inference", "fine-tuned", "model", ".", "[", "model_checkpointing", "]", "(", "src/llama_recipes/model_checkpointing/", ")", ":", "contains", "fsdp", "checkpoint", "handler", ".", "[", "policy", "]", "(", "src/llama_recipes/policies/", ")", ":", "contains", "fsdp", "script", "provide", "different", "policy", ",", "mixed", "precision", ",", "transformer", "wrapping", "policy", "activation", "checkpointing", "along", "precision", "optimizer", "(", "used", "running", "fsdp", "pure", "bf16", "mode", ")", ".", "[", "utils", "]", "(", "src/llama_recipes/utils/", ")", ":", "utility", "file", ":", "-", "`", "train_utils.py", "`", "provides", "training/eval", "loop", "train", "utils", ".", "-", "`", "dataset_utils.py", "`", "get", "preprocessed", "datasets", ".", "-", "`", "config_utils.py", "`", "override", "configs", "received", "cli", ".", "-", "`", "fsdp_utils.py", "`", "provides", "fsdp", "wrapping", "policy", "peft", "method", ".", "-", "`", "memory_utils.py", "`", "context", "manager", "track", "different", "memory", "stats", "train", "loop", "." ], [ "repository organization repository organized following way : [ benchmark ] ( ./benchmarks ) : contains series benchmark script llama 2 model inference various backends .", "[ configs ] ( src/llama_recipes/configs/ ) : contains configuration file peft method , fsdp , datasets .", "[ doc ] ( docs/ ) : example recipe single multi-gpu fine-tuning recipe .", "[ datasets ] ( src/llama_recipes/datasets/ ) : contains individual script dataset download process .", "note : use datasets compliance dataset 's underlying license ( including limited non-commercial us ) [ demo_apps ] ( ./demo_apps ) : contains series llama2-powered apps , quickstart deployment ask llama question unstructured data , structured data , live data , video summary .", "[ example ] ( ./examples/ ) : contains example script finetuning inference llama 2 model well use safely .", "[ inference ] ( src/llama_recipes/inference/ ) : includes module inference fine-tuned model .", "[ model_checkpointing ] ( src/llama_recipes/model_checkpointing/ ) : contains fsdp checkpoint handler .", "[ policy ] ( src/llama_recipes/policies/ ) : contains fsdp script provide different policy , mixed precision , transformer wrapping policy activation checkpointing along precision optimizer ( used running fsdp pure bf16 mode ) .", "[ utils ] ( src/llama_recipes/utils/ ) : utility file : - ` train_utils.py ` provides training/eval loop train utils .", "- ` dataset_utils.py ` get preprocessed datasets .", "- ` config_utils.py ` override configs received cli .", "- ` fsdp_utils.py ` provides fsdp wrapping policy peft method .", "- ` memory_utils.py ` context manager track different memory stats train loop ." ] ]
Repository Organization This repository is organized in the following way: [benchmarks](./benchmarks): Contains a series of benchmark scripts for Llama 2 models inference on various backends. [configs](src/llama_recipes/configs/): Contains the configuration files for PEFT methods, FSDP, Datasets. [docs](docs/): Example recipes for single and multi-gpu fine-tuning recipes. [datasets](src/llama_recipes/datasets/): Contains individual scripts for each dataset to download and process. Note: Use of any of the datasets should be in compliance with the dataset's underlying licenses (including but not limited to non-commercial uses) [demo_apps](./demo_apps): Contains a series of Llama2-powered apps, from quickstart deployments to how to ask Llama questions about unstructured data, structured data, live data, and video summary. [examples](./examples/): Contains examples script for finetuning and inference of the Llama 2 model as well as how to use them safely. [inference](src/llama_recipes/inference/): Includes modules for inference for the fine-tuned models. [model_checkpointing](src/llama_recipes/model_checkpointing/): Contains FSDP checkpoint handlers. [policies](src/llama_recipes/policies/): Contains FSDP scripts to provide different policies, such as mixed precision, transformer wrapping policy and activation checkpointing along with any precision optimizer (used for running FSDP with pure bf16 mode). [utils](src/llama_recipes/utils/): Utility files for: - `train_utils.py` provides training/eval loop and more train utils. - `dataset_utils.py` to get preprocessed datasets. - `config_utils.py` to override the configs received from CLI. - `fsdp_utils.py` provides FSDP wrapping policy for PEFT methods. - `memory_utils.py` context manager to track different memory stats in train loop.
https://github.com/facebookresearch/llama-recipes
-1
[ "ai", "finetuning", "langchain", "llama", "llama2", "llm", "machine-learning", "python", "pytorch", "vllm" ]
https://raw.githubusercontent.com/microsoft/promptflow/main/README.md
[ [ "installation", "get", "started", "quickly", ",", "use", "pre-built", "development", "environment", ".", "*", "*", "click", "button", "*", "*", "open", "repo", "github", "codespaces", ",", "continue", "readme", "!", "[", "!", "[", "open", "github", "codespaces", "]", "(", "http", ":", "//github.com/codespaces/badge.svg", ")", "]", "(", "http", ":", "//codespaces.new/microsoft/promptflow", "?", "quickstart=1", ")", "want", "get", "started", "local", "environment", ",", "first", "install", "package", ":", "ensure", "python", "environment", ",", "`", "python=3.9", "`", "recommended", ".", "``", "`", "sh", "pip", "install", "promptflow", "promptflow-tools", "``", "`" ], [ "installation get started quickly , use pre-built development environment .", "* * click button * * open repo github codespaces , continue readme !", "[ !", "[ open github codespaces ] ( http : //github.com/codespaces/badge.svg ) ] ( http : //codespaces.new/microsoft/promptflow ? quickstart=1 ) want get started local environment , first install package : ensure python environment , ` python=3.9 ` recommended .", "`` ` sh pip install promptflow promptflow-tools `` `" ] ]
[ [ "installation", "get", "started", "quickly", ",", "use", "pre-built", "development", "environment", ".", "*", "*", "click", "button", "*", "*", "open", "repo", "github", "codespaces", ",", "continue", "readme", "!", "[", "!", "[", "open", "github", "codespaces", "]", "(", "http", ":", "//github.com/codespaces/badge.svg", ")", "]", "(", "http", ":", "//codespaces.new/microsoft/promptflow", "?", "quickstart=1", ")", "want", "get", "started", "local", "environment", ",", "first", "install", "package", ":", "ensure", "python", "environment", ",", "`", "python=3.9", "`", "recommended", ".", "``", "`", "sh", "pip", "install", "promptflow", "promptflow-tools", "``", "`" ], [ "installation get started quickly , use pre-built development environment .", "* * click button * * open repo github codespaces , continue readme !", "[ !", "[ open github codespaces ] ( http : //github.com/codespaces/badge.svg ) ] ( http : //codespaces.new/microsoft/promptflow ? quickstart=1 ) want get started local environment , first install package : ensure python environment , ` python=3.9 ` recommended .", "`` ` sh pip install promptflow promptflow-tools `` `" ] ]
Installation To get started quickly, you can use a pre-built development environment. **Click the button below** to open the repo in GitHub Codespaces, and then continue the readme! [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/microsoft/promptflow?quickstart=1) If you want to get started in your local environment, first install the packages: Ensure you have a python environment, `python=3.9` is recommended. ```sh pip install promptflow promptflow-tools ```
https://github.com/microsoft/promptflow
0
[ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ]
https://raw.githubusercontent.com/microsoft/promptflow/main/README.md
[ [ "quick", "start", "⚡", "*", "*", "create", "chatbot", "prompt", "flow", "*", "*", "run", "command", "initiate", "prompt", "flow", "chat", "template", ",", "creates", "folder", "named", "`", "my_chatbot", "`", "generates", "required", "file", "within", ":", "``", "`", "sh", "pf", "flow", "init", "--", "flow", "./my_chatbot", "--", "type", "chat", "``", "`", "*", "*", "setup", "connection", "api", "key", "*", "*", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "openai.yaml", "`", "file", "`", "my_chatbot", "`", "folder", ",", "store", "openai", "key", "(", "override", "key", "name", "--", "set", "avoid", "yaml", "file", "change", ")", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "--", "name", "open_ai_connection", "``", "`", "azure", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "azure_openai.yaml", "`", "file", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/azure_openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "api_base=", "<", "your_api_base", ">", "--", "name", "open_ai_connection", "``", "`", "*", "*", "chat", "flow", "*", "*", "`", "my_chatbot", "`", "folder", ",", "'s", "`", "flow.dag.yaml", "`", "file", "outline", "flow", ",", "including", "inputs/outputs", ",", "node", ",", "connection", ",", "llm", "model", ",", "etc", ">", "note", "`", "chat", "`", "node", ",", "'re", "using", "connection", "named", "`", "open_ai_connection", "`", "(", "specified", "`", "connection", "`", "field", ")", "`", "gpt-35-turbo", "`", "model", "(", "specified", "`", "deployment_name", "`", "field", ")", ".", "deployment_name", "filed", "specify", "openai", "model", ",", "azure", "openai", "deployment", "resource", ".", "interact", "chatbot", "running", ":", "(", "press", "`", "ctrl", "+", "c", "`", "end", "session", ")", "``", "`", "sh", "pf", "flow", "test", "--", "flow", "./my_chatbot", "--", "interactive", "``", "`", "*", "*", "core", "value", ":", "ensuring", "``", "high", "quality", "”", "prototype", "production", "*", "*", "explore", "[", "*", "*", "15-minute", "tutorial", "*", "*", "]", "(", "examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md", ")", "guide", "prompt", "tuning", "➡", "batch", "testing", "➡", "evaluation", ",", "designed", "ensure", "high", "quality", "ready", "production", ".", "next", "step", "!", "continue", "*", "*", "tutorial", "*", "*", "👇", "section", "delve", "deeper", "prompt", "flow", "." ], [ "quick start ⚡ * * create chatbot prompt flow * * run command initiate prompt flow chat template , creates folder named ` my_chatbot ` generates required file within : `` ` sh pf flow init -- flow ./my_chatbot -- type chat `` ` * * setup connection api key * * openai key , establish connection running command , using ` openai.yaml ` file ` my_chatbot ` folder , store openai key ( override key name -- set avoid yaml file change ) : `` ` sh pf connection create -- file ./my_chatbot/openai.yaml -- set api_key= < your_api_key > -- name open_ai_connection `` ` azure openai key , establish connection running command , using ` azure_openai.yaml ` file : `` ` sh pf connection create -- file ./my_chatbot/azure_openai.yaml -- set api_key= < your_api_key > api_base= < your_api_base > -- name open_ai_connection `` ` * * chat flow * * ` my_chatbot ` folder , 's ` flow.dag.yaml ` file outline flow , including inputs/outputs , node , connection , llm model , etc > note ` chat ` node , 're using connection named ` open_ai_connection ` ( specified ` connection ` field ) ` gpt-35-turbo ` model ( specified ` deployment_name ` field ) .", "deployment_name filed specify openai model , azure openai deployment resource .", "interact chatbot running : ( press ` ctrl + c ` end session ) `` ` sh pf flow test -- flow ./my_chatbot -- interactive `` ` * * core value : ensuring `` high quality ” prototype production * * explore [ * * 15-minute tutorial * * ] ( examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md ) guide prompt tuning ➡ batch testing ➡ evaluation , designed ensure high quality ready production .", "next step !", "continue * * tutorial * * 👇 section delve deeper prompt flow ." ] ]
[ [ "quick", "start", "⚡", "*", "*", "create", "chatbot", "prompt", "flow", "*", "*", "run", "command", "initiate", "prompt", "flow", "chat", "template", ",", "creates", "folder", "named", "`", "my_chatbot", "`", "generates", "required", "file", "within", ":", "``", "`", "sh", "pf", "flow", "init", "--", "flow", "./my_chatbot", "--", "type", "chat", "``", "`", "*", "*", "setup", "connection", "api", "key", "*", "*", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "openai.yaml", "`", "file", "`", "my_chatbot", "`", "folder", ",", "store", "openai", "key", "(", "override", "key", "name", "--", "set", "avoid", "yaml", "file", "change", ")", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "--", "name", "open_ai_connection", "``", "`", "azure", "openai", "key", ",", "establish", "connection", "running", "command", ",", "using", "`", "azure_openai.yaml", "`", "file", ":", "``", "`", "sh", "pf", "connection", "create", "--", "file", "./my_chatbot/azure_openai.yaml", "--", "set", "api_key=", "<", "your_api_key", ">", "api_base=", "<", "your_api_base", ">", "--", "name", "open_ai_connection", "``", "`", "*", "*", "chat", "flow", "*", "*", "`", "my_chatbot", "`", "folder", ",", "'s", "`", "flow.dag.yaml", "`", "file", "outline", "flow", ",", "including", "inputs/outputs", ",", "node", ",", "connection", ",", "llm", "model", ",", "etc", ">", "note", "`", "chat", "`", "node", ",", "'re", "using", "connection", "named", "`", "open_ai_connection", "`", "(", "specified", "`", "connection", "`", "field", ")", "`", "gpt-35-turbo", "`", "model", "(", "specified", "`", "deployment_name", "`", "field", ")", ".", "deployment_name", "filed", "specify", "openai", "model", ",", "azure", "openai", "deployment", "resource", ".", "interact", "chatbot", "running", ":", "(", "press", "`", "ctrl", "+", "c", "`", "end", "session", ")", "``", "`", "sh", "pf", "flow", "test", "--", "flow", "./my_chatbot", "--", "interactive", "``", "`", "*", "*", "core", "value", ":", "ensuring", "``", "high", "quality", "”", "prototype", "production", "*", "*", "explore", "[", "*", "*", "15-minute", "tutorial", "*", "*", "]", "(", "examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md", ")", "guide", "prompt", "tuning", "➡", "batch", "testing", "➡", "evaluation", ",", "designed", "ensure", "high", "quality", "ready", "production", ".", "next", "step", "!", "continue", "*", "*", "tutorial", "*", "*", "👇", "section", "delve", "deeper", "prompt", "flow", "." ], [ "quick start ⚡ * * create chatbot prompt flow * * run command initiate prompt flow chat template , creates folder named ` my_chatbot ` generates required file within : `` ` sh pf flow init -- flow ./my_chatbot -- type chat `` ` * * setup connection api key * * openai key , establish connection running command , using ` openai.yaml ` file ` my_chatbot ` folder , store openai key ( override key name -- set avoid yaml file change ) : `` ` sh pf connection create -- file ./my_chatbot/openai.yaml -- set api_key= < your_api_key > -- name open_ai_connection `` ` azure openai key , establish connection running command , using ` azure_openai.yaml ` file : `` ` sh pf connection create -- file ./my_chatbot/azure_openai.yaml -- set api_key= < your_api_key > api_base= < your_api_base > -- name open_ai_connection `` ` * * chat flow * * ` my_chatbot ` folder , 's ` flow.dag.yaml ` file outline flow , including inputs/outputs , node , connection , llm model , etc > note ` chat ` node , 're using connection named ` open_ai_connection ` ( specified ` connection ` field ) ` gpt-35-turbo ` model ( specified ` deployment_name ` field ) .", "deployment_name filed specify openai model , azure openai deployment resource .", "interact chatbot running : ( press ` ctrl + c ` end session ) `` ` sh pf flow test -- flow ./my_chatbot -- interactive `` ` * * core value : ensuring `` high quality ” prototype production * * explore [ * * 15-minute tutorial * * ] ( examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md ) guide prompt tuning ➡ batch testing ➡ evaluation , designed ensure high quality ready production .", "next step !", "continue * * tutorial * * 👇 section delve deeper prompt flow ." ] ]
Quick Start ⚡ **Create a chatbot with prompt flow** Run the command to initiate a prompt flow from a chat template, it creates folder named `my_chatbot` and generates required files within it: ```sh pf flow init --flow ./my_chatbot --type chat ``` **Setup a connection for your API key** For OpenAI key, establish a connection by running the command, using the `openai.yaml` file in the `my_chatbot` folder, which stores your OpenAI key (override keys and name with --set to avoid yaml file changes): ```sh pf connection create --file ./my_chatbot/openai.yaml --set api_key=<your_api_key> --name open_ai_connection ``` For Azure OpenAI key, establish the connection by running the command, using the `azure_openai.yaml` file: ```sh pf connection create --file ./my_chatbot/azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection ``` **Chat with your flow** In the `my_chatbot` folder, there's a `flow.dag.yaml` file that outlines the flow, including inputs/outputs, nodes, connection, and the LLM model, etc > Note that in the `chat` node, we're using a connection named `open_ai_connection` (specified in `connection` field) and the `gpt-35-turbo` model (specified in `deployment_name` field). The deployment_name filed is to specify the OpenAI model, or the Azure OpenAI deployment resource. Interact with your chatbot by running: (press `Ctrl + C` to end the session) ```sh pf flow test --flow ./my_chatbot --interactive ``` **Core value: ensuring "High Quality” from prototype to production** Explore our [**15-minute tutorial**](examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md) that guides you through prompt tuning ➡ batch testing ➡ evaluation, all designed to ensure high quality ready for production. Next Step! Continue with the **Tutorial** 👇 section to delve deeper into prompt flow.
https://github.com/microsoft/promptflow
-1
[ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ]
https://raw.githubusercontent.com/microsoft/promptflow/main/README.md
[ [ "v", "code", "extension", "also", "offer", "v", "code", "extension", "(", "flow", "designer", ")", "interactive", "flow", "development", "experience", "ui", ".", "<", "img", "src=", "''", "examples/tutorials/quick-start/media/vsc.png", "''", "alt=", "''", "vsc", "''", "width=", "''", "1000", "''", "/", ">", "install", "<", "href=", "''", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=prompt-flow.prompt-flow", "''", ">", "visualstudio", "marketplace", "<", "/a", ">", "." ], [ "v code extension also offer v code extension ( flow designer ) interactive flow development experience ui .", "< img src= '' examples/tutorials/quick-start/media/vsc.png '' alt= '' vsc '' width= '' 1000 '' / > install < href= '' http : //marketplace.visualstudio.com/items ? itemname=prompt-flow.prompt-flow '' > visualstudio marketplace < /a > ." ] ]
[ [ "v", "code", "extension", "also", "offer", "v", "code", "extension", "(", "flow", "designer", ")", "interactive", "flow", "development", "experience", "ui", ".", "<", "img", "src=", "''", "examples/tutorials/quick-start/media/vsc.png", "''", "alt=", "''", "vsc", "''", "width=", "''", "1000", "''", "/", ">", "install", "<", "href=", "''", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=prompt-flow.prompt-flow", "''", ">", "visualstudio", "marketplace", "<", "/a", ">", "." ], [ "v code extension also offer v code extension ( flow designer ) interactive flow development experience ui .", "< img src= '' examples/tutorials/quick-start/media/vsc.png '' alt= '' vsc '' width= '' 1000 '' / > install < href= '' http : //marketplace.visualstudio.com/items ? itemname=prompt-flow.prompt-flow '' > visualstudio marketplace < /a > ." ] ]
VS Code Extension We also offer a VS Code extension (a flow designer) for an interactive flow development experience with UI. <img src="examples/tutorials/quick-start/media/vsc.png" alt="vsc" width="1000"/> You can install it from the <a href="https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow">visualstudio marketplace</a>.
https://github.com/microsoft/promptflow
-1
[ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ]
https://raw.githubusercontent.com/microsoft/promptflow/main/README.md
[ [ "deep", "delve", "flow", "development", "[", "getting", "started", "prompt", "flow", "]", "(", "./docs/cloud/azureai/quick-start/index.md", ")", ":", "step", "step", "guidance", "invoke", "first", "flow", "run", "." ], [ "deep delve flow development [ getting started prompt flow ] ( ./docs/cloud/azureai/quick-start/index.md ) : step step guidance invoke first flow run ." ] ]
[ [ "deep", "delve", "flow", "development", "[", "getting", "started", "prompt", "flow", "]", "(", "./docs/cloud/azureai/quick-start/index.md", ")", ":", "step", "step", "guidance", "invoke", "first", "flow", "run", "." ], [ "deep delve flow development [ getting started prompt flow ] ( ./docs/cloud/azureai/quick-start/index.md ) : step step guidance invoke first flow run ." ] ]
Deep delve into flow development [Getting started with prompt flow](./docs/cloud/azureai/quick-start/index.md): A step by step guidance to invoke your first flow run.
https://github.com/microsoft/promptflow
-1
[ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ]
https://raw.githubusercontent.com/microsoft/promptflow/main/README.md
[ [ "learn", "use", "case", "[", "tutorial", ":", "chat", "pdf", "]", "(", "http", ":", "//github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md", ")", ":", "end-to-end", "tutorial", "build", "high", "quality", "chat", "application", "prompt", "flow", ",", "including", "flow", "development", "evaluation", "metric", ".", ">", "example", "found", "[", "]", "(", "http", ":", "//microsoft.github.io/promptflow/tutorials/index.html", "#", "sample", ")", ".", "welcome", "contribution", "new", "use", "case", "!" ], [ "learn use case [ tutorial : chat pdf ] ( http : //github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md ) : end-to-end tutorial build high quality chat application prompt flow , including flow development evaluation metric .", "> example found [ ] ( http : //microsoft.github.io/promptflow/tutorials/index.html # sample ) .", "welcome contribution new use case !" ] ]
[ [ "learn", "use", "case", "[", "tutorial", ":", "chat", "pdf", "]", "(", "http", ":", "//github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md", ")", ":", "end-to-end", "tutorial", "build", "high", "quality", "chat", "application", "prompt", "flow", ",", "including", "flow", "development", "evaluation", "metric", ".", ">", "example", "found", "[", "]", "(", "http", ":", "//microsoft.github.io/promptflow/tutorials/index.html", "#", "sample", ")", ".", "welcome", "contribution", "new", "use", "case", "!" ], [ "learn use case [ tutorial : chat pdf ] ( http : //github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md ) : end-to-end tutorial build high quality chat application prompt flow , including flow development evaluation metric .", "> example found [ ] ( http : //microsoft.github.io/promptflow/tutorials/index.html # sample ) .", "welcome contribution new use case !" ] ]
Learn from use cases [Tutorial: Chat with PDF](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md): An end-to-end tutorial on how to build a high quality chat application with prompt flow, including flow development and evaluation with metrics. > More examples can be found [here](https://microsoft.github.io/promptflow/tutorials/index.html#samples). We welcome contributions of new use cases!
https://github.com/microsoft/promptflow
2
[ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ]
https://raw.githubusercontent.com/microsoft/promptflow/main/README.md
[ [ "setup", "contributor", "'re", "interested", "contributing", ",", "please", "start", "dev", "setup", "guide", ":", "[", "dev_setup.md", "]", "(", "./docs/dev/dev_setup.md", ")", ".", "next", "step", "!", "continue", "*", "*", "contributing", "*", "*", "👇", "section", "contribute", "prompt", "flow", "." ], [ "setup contributor 're interested contributing , please start dev setup guide : [ dev_setup.md ] ( ./docs/dev/dev_setup.md ) .", "next step !", "continue * * contributing * * 👇 section contribute prompt flow ." ] ]
[ [ "setup", "contributor", "'re", "interested", "contributing", ",", "please", "start", "dev", "setup", "guide", ":", "[", "dev_setup.md", "]", "(", "./docs/dev/dev_setup.md", ")", ".", "next", "step", "!", "continue", "*", "*", "contributing", "*", "*", "👇", "section", "contribute", "prompt", "flow", "." ], [ "setup contributor 're interested contributing , please start dev setup guide : [ dev_setup.md ] ( ./docs/dev/dev_setup.md ) .", "next step !", "continue * * contributing * * 👇 section contribute prompt flow ." ] ]
Setup for contributors If you're interested in contributing, please start with our dev setup guide: [dev_setup.md](./docs/dev/dev_setup.md). Next Step! Continue with the **Contributing** 👇 section to contribute to prompt flow.
https://github.com/microsoft/promptflow
-1
[ "ai", "ai-application-development", "ai-applications", "chatgpt", "gpt", "llm", "prompt", "prompt-engineering" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "installation", "``", "`", "shell", "pip", "install", "shell-gpt", "``", "`", "'ll", "need", "openai", "api", "key", ",", "generate", "one", "[", "]", "(", "http", ":", "//beta.openai.com/account/api-keys", ")", ".", "prompted", "key", "stored", "`", "~/.config/shell_gpt/.sgptrc", "`", "." ], [ "installation `` ` shell pip install shell-gpt `` ` 'll need openai api key , generate one [ ] ( http : //beta.openai.com/account/api-keys ) .", "prompted key stored ` ~/.config/shell_gpt/.sgptrc ` ." ] ]
[ [ "installation", "``", "`", "shell", "pip", "install", "shell-gpt", "``", "`", "'ll", "need", "openai", "api", "key", ",", "generate", "one", "[", "]", "(", "http", ":", "//beta.openai.com/account/api-keys", ")", ".", "prompted", "key", "stored", "`", "~/.config/shell_gpt/.sgptrc", "`", "." ], [ "installation `` ` shell pip install shell-gpt `` ` 'll need openai api key , generate one [ ] ( http : //beta.openai.com/account/api-keys ) .", "prompted key stored ` ~/.config/shell_gpt/.sgptrc ` ." ] ]
Installation ```shell pip install shell-gpt ``` You'll need an OpenAI API key, you can generate one [here](https://beta.openai.com/account/api-keys). You will be prompted for your key which will then be stored in `~/.config/shell_gpt/.sgptrc`.
https://github.com/TheR1D/shell_gpt
0
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "shell", "integration", "*", "*", "handy", "feature", "*", "*", ",", "allows", "use", "`", "sgpt", "`", "shell", "completion", "directly", "terminal", ",", "without", "need", "type", "`", "sgpt", "`", "prompt", "argument", ".", "shell", "integration", "enables", "use", "shellgpt", "hotkeys", "terminal", ",", "supported", "bash", "zsh", "shell", ".", "feature", "put", "`", "sgpt", "`", "completion", "directly", "terminal", "buffer", "(", "input", "line", ")", ",", "allowing", "immediate", "editing", "suggested", "command", ".", "http", ":", "//github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1", "install", "shell", "integration", ",", "run", "`", "sgpt", "--", "install-integration", "`", "restart", "terminal", "apply", "change", ".", "add", "line", "`", ".bashrc", "`", "`", ".zshrc", "`", "file", ".", ",", "use", "`", "ctrl+l", "`", "(", "default", ")", "invoke", "shellgpt", ".", "press", "`", "ctrl+l", "`", "replace", "current", "input", "line", "(", "buffer", ")", "suggested", "command", ".", "edit", "press", "`", "enter", "`", "execute", "." ], [ "shell integration * * handy feature * * , allows use ` sgpt ` shell completion directly terminal , without need type ` sgpt ` prompt argument .", "shell integration enables use shellgpt hotkeys terminal , supported bash zsh shell .", "feature put ` sgpt ` completion directly terminal buffer ( input line ) , allowing immediate editing suggested command .", "http : //github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1 install shell integration , run ` sgpt -- install-integration ` restart terminal apply change .", "add line ` .bashrc ` ` .zshrc ` file .", ", use ` ctrl+l ` ( default ) invoke shellgpt .", "press ` ctrl+l ` replace current input line ( buffer ) suggested command .", "edit press ` enter ` execute ." ] ]
[ [ "shell", "integration", "*", "*", "handy", "feature", "*", "*", ",", "allows", "use", "`", "sgpt", "`", "shell", "completion", "directly", "terminal", ",", "without", "need", "type", "`", "sgpt", "`", "prompt", "argument", ".", "shell", "integration", "enables", "use", "shellgpt", "hotkeys", "terminal", ",", "supported", "bash", "zsh", "shell", ".", "feature", "put", "`", "sgpt", "`", "completion", "directly", "terminal", "buffer", "(", "input", "line", ")", ",", "allowing", "immediate", "editing", "suggested", "command", ".", "http", ":", "//github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1", "install", "shell", "integration", ",", "run", "`", "sgpt", "--", "install-integration", "`", "restart", "terminal", "apply", "change", ".", "add", "line", "`", ".bashrc", "`", "`", ".zshrc", "`", "file", ".", ",", "use", "`", "ctrl+l", "`", "(", "default", ")", "invoke", "shellgpt", ".", "press", "`", "ctrl+l", "`", "replace", "current", "input", "line", "(", "buffer", ")", "suggested", "command", ".", "edit", "press", "`", "enter", "`", "execute", "." ], [ "shell integration * * handy feature * * , allows use ` sgpt ` shell completion directly terminal , without need type ` sgpt ` prompt argument .", "shell integration enables use shellgpt hotkeys terminal , supported bash zsh shell .", "feature put ` sgpt ` completion directly terminal buffer ( input line ) , allowing immediate editing suggested command .", "http : //github.com/ther1d/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1 install shell integration , run ` sgpt -- install-integration ` restart terminal apply change .", "add line ` .bashrc ` ` .zshrc ` file .", ", use ` ctrl+l ` ( default ) invoke shellgpt .", "press ` ctrl+l ` replace current input line ( buffer ) suggested command .", "edit press ` enter ` execute ." ] ]
Shell integration This is a **very handy feature**, which allows you to use `sgpt` shell completions directly in your terminal, without the need to type `sgpt` with prompt and arguments. Shell integration enables the use of ShellGPT with hotkeys in your terminal, supported by both Bash and ZSH shells. This feature puts `sgpt` completions directly into terminal buffer (input line), allowing for immediate editing of suggested commands. https://github.com/TheR1D/shell_gpt/assets/16740832/bead0dab-0dd9-436d-88b7-6abfb2c556c1 To install shell integration, run `sgpt --install-integration` and restart your terminal to apply changes. This will add few lines to your `.bashrc` or `.zshrc` file. After that, you can use `Ctrl+l` (by default) to invoke ShellGPT. When you press `Ctrl+l` it will replace you current input line (buffer) with suggested command. You can then edit it and just press `Enter` to execute.
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "function", "calling", "[", "function", "call", "]", "(", "http", ":", "//platform.openai.com/docs/guides/function-calling", ")", "powerful", "feature", "openai", "provides", ".", "allows", "llm", "execute", "function", "system", ",", "used", "accomplish", "variety", "task", ".", "install", "[", "default", "function", "]", "(", "http", ":", "//github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/", ")", "run", ":", "``", "`", "shell", "sgpt", "--", "install-functions", "``", "`", "shellgpt", "convenient", "way", "define", "function", "use", ".", "order", "create", "custom", "function", ",", "navigate", "`", "~/.config/shell_gpt/functions", "`", "create", "new", ".py", "file", "function", "name", ".", "inside", "file", ",", "define", "function", "using", "following", "syntax", ":", "``", "`", "python" ], [ "function calling [ function call ] ( http : //platform.openai.com/docs/guides/function-calling ) powerful feature openai provides .", "allows llm execute function system , used accomplish variety task .", "install [ default function ] ( http : //github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/ ) run : `` ` shell sgpt -- install-functions `` ` shellgpt convenient way define function use .", "order create custom function , navigate ` ~/.config/shell_gpt/functions ` create new .py file function name .", "inside file , define function using following syntax : `` ` python" ] ]
[ [ "function", "calling", "[", "function", "call", "]", "(", "http", ":", "//platform.openai.com/docs/guides/function-calling", ")", "powerful", "feature", "openai", "provides", ".", "allows", "llm", "execute", "function", "system", ",", "used", "accomplish", "variety", "task", ".", "install", "[", "default", "function", "]", "(", "http", ":", "//github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/", ")", "run", ":", "``", "`", "shell", "sgpt", "--", "install-functions", "``", "`", "shellgpt", "convenient", "way", "define", "function", "use", ".", "order", "create", "custom", "function", ",", "navigate", "`", "~/.config/shell_gpt/functions", "`", "create", "new", ".py", "file", "function", "name", ".", "inside", "file", ",", "define", "function", "using", "following", "syntax", ":", "``", "`", "python" ], [ "function calling [ function call ] ( http : //platform.openai.com/docs/guides/function-calling ) powerful feature openai provides .", "allows llm execute function system , used accomplish variety task .", "install [ default function ] ( http : //github.com/ther1d/shell_gpt/tree/main/sgpt/default_functions/ ) run : `` ` shell sgpt -- install-functions `` ` shellgpt convenient way define function use .", "order create custom function , navigate ` ~/.config/shell_gpt/functions ` create new .py file function name .", "inside file , define function using following syntax : `` ` python" ] ]
Function calling [Function calls](https://platform.openai.com/docs/guides/function-calling) is a powerful feature OpenAI provides. It allows LLM to execute functions in your system, which can be used to accomplish a variety of tasks. To install [default functions](https://github.com/TheR1D/shell_gpt/tree/main/sgpt/default_functions/) run: ```shell sgpt --install-functions ``` ShellGPT has a convenient way to define functions and use them. In order to create your custom function, navigate to `~/.config/shell_gpt/functions` and create a new .py file with the function name. Inside this file, you can define your function using the following syntax: ```python
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "-", ">", "test.json", "``", "`", "note", "reason", "function", "(", "execute_shell_command", ")", "return", "error", ",", "llm", "might", "try", "accomplish", "task", "based", "output", ".", "let", "'s", "say", "n't", "installed", "`", "jq", "`", "system", ",", "ask", "llm", "parse", "json", "file", ":", "``", "`", "shell", "sgpt", "``", "parse", "/tmp/test.json", "file", "using", "jq", "return", "email", "value", "''" ], [ "- > test.json `` ` note reason function ( execute_shell_command ) return error , llm might try accomplish task based output .", "let 's say n't installed ` jq ` system , ask llm parse json file : `` ` shell sgpt `` parse /tmp/test.json file using jq return email value ''" ] ]
[ [ "-", ">", "test.json", "``", "`", "note", "reason", "function", "(", "execute_shell_command", ")", "return", "error", ",", "llm", "might", "try", "accomplish", "task", "based", "output", ".", "let", "'s", "say", "n't", "installed", "`", "jq", "`", "system", ",", "ask", "llm", "parse", "json", "file", ":", "``", "`", "shell", "sgpt", "``", "parse", "/tmp/test.json", "file", "using", "jq", "return", "email", "value", "''" ], [ "- > test.json `` ` note reason function ( execute_shell_command ) return error , llm might try accomplish task based output .", "let 's say n't installed ` jq ` system , ask llm parse json file : `` ` shell sgpt `` parse /tmp/test.json file using jq return email value ''" ] ]
-> test.json ``` Note that if for some reason the function (execute_shell_command) will return an error, LLM might try to accomplish the task based on the output. Let's say we don't have installed `jq` in our system, and we ask LLM to parse JSON file: ```shell sgpt "parse /tmp/test.json file using jq and return only email value"
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "-", ">", "appears", "jq", "installed", "system", ".", "let", "try", "install", "using", "brew", "." ], [ "- > appears jq installed system .", "let try install using brew ." ] ]
[ [ "-", ">", "appears", "jq", "installed", "system", ".", "let", "try", "install", "using", "brew", "." ], [ "- > appears jq installed system .", "let try install using brew ." ] ]
-> It appears that jq is not installed on the system. Let me try to install it using brew.
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "-", ">", "@", "functioncall", "execute_shell_command", "(", "shell_command=", "''", "brew", "install", "jq", "''", ")" ], [ "- > @ functioncall execute_shell_command ( shell_command= '' brew install jq '' )" ] ]
[ [ "-", ">", "@", "functioncall", "execute_shell_command", "(", "shell_command=", "''", "brew", "install", "jq", "''", ")" ], [ "- > @ functioncall execute_shell_command ( shell_command= '' brew install jq '' )" ] ]
-> @FunctionCall execute_shell_command(shell_command="brew install jq")
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "-", ">", "jq", "successfully", "installed", ".", "let", "try", "parse", "file", "." ], [ "- > jq successfully installed .", "let try parse file ." ] ]
[ [ "-", ">", "jq", "successfully", "installed", ".", "let", "try", "parse", "file", "." ], [ "- > jq successfully installed .", "let try parse file ." ] ]
-> jq has been successfully installed. Let me try to parse the file again.
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/TheR1D/shell_gpt/main/README.md
[ [ "runtime", "configuration", "file", "setup", "parameter", "runtime", "configuration", "file", "`", "~/.config/shell_gpt/.sgptrc", "`", ":", "``", "`", "text" ], [ "runtime configuration file setup parameter runtime configuration file ` ~/.config/shell_gpt/.sgptrc ` : `` ` text" ] ]
[ [ "runtime", "configuration", "file", "setup", "parameter", "runtime", "configuration", "file", "`", "~/.config/shell_gpt/.sgptrc", "`", ":", "``", "`", "text" ], [ "runtime configuration file setup parameter runtime configuration file ` ~/.config/shell_gpt/.sgptrc ` : `` ` text" ] ]
Runtime configuration file You can setup some parameters in runtime configuration file `~/.config/shell_gpt/.sgptrc`: ```text
https://github.com/TheR1D/shell_gpt
-1
[ "chatgpt", "cheat-sheet", "cli", "commands", "gpt-3", "gpt-4", "linux", "llm", "openai", "productivity", "python", "shell", "terminal" ]
https://raw.githubusercontent.com/continuedev/continue/main/README.md
[ [ "getting", "started" ], [ "getting started" ] ]
[ [ "getting", "started" ], [ "getting started" ] ]
Getting Started
https://github.com/continuedev/continue
-1
[ "ai", "chatgpt", "copilot", "developer-tools", "intellij", "jetbrains", "llm", "open-source", "openai", "pycharm", "software-development", "visual-studio-code", "vscode" ]
https://raw.githubusercontent.com/continuedev/continue/main/README.md
[ [ "download", "[", "v", "code", "]", "(", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=continue.continue", ")", "[", "jetbrains", "]", "(", "http", ":", "//plugins.jetbrains.com/plugin/22707-continue-extension", ")", "try", "continue", "free", "using", "proxy", "server", "securely", "make", "call", "api", "key", "model", "like", "gpt-4", ",", "gemini", "pro", ",", "phind", "codellama", "via", "openai", ",", "google", ",", "together", "respectively", ".", "'re", "ready", "use", "api", "key", "different", "model", "/", "provider", ",", "press", "`", "+", "`", "button", "bottom", "left", "add", "new", "model", "`", "config.json", "`", ".", "learn", "model", "provider", "[", "]", "(", "http", ":", "//continue.dev/docs/model-setup/overview", ")", "." ], [ "download [ v code ] ( http : //marketplace.visualstudio.com/items ? itemname=continue.continue ) [ jetbrains ] ( http : //plugins.jetbrains.com/plugin/22707-continue-extension ) try continue free using proxy server securely make call api key model like gpt-4 , gemini pro , phind codellama via openai , google , together respectively .", "'re ready use api key different model / provider , press ` + ` button bottom left add new model ` config.json ` .", "learn model provider [ ] ( http : //continue.dev/docs/model-setup/overview ) ." ] ]
[ [ "download", "[", "v", "code", "]", "(", "http", ":", "//marketplace.visualstudio.com/items", "?", "itemname=continue.continue", ")", "[", "jetbrains", "]", "(", "http", ":", "//plugins.jetbrains.com/plugin/22707-continue-extension", ")", "try", "continue", "free", "using", "proxy", "server", "securely", "make", "call", "api", "key", "model", "like", "gpt-4", ",", "gemini", "pro", ",", "phind", "codellama", "via", "openai", ",", "google", ",", "together", "respectively", ".", "'re", "ready", "use", "api", "key", "different", "model", "/", "provider", ",", "press", "`", "+", "`", "button", "bottom", "left", "add", "new", "model", "`", "config.json", "`", ".", "learn", "model", "provider", "[", "]", "(", "http", ":", "//continue.dev/docs/model-setup/overview", ")", "." ], [ "download [ v code ] ( http : //marketplace.visualstudio.com/items ? itemname=continue.continue ) [ jetbrains ] ( http : //plugins.jetbrains.com/plugin/22707-continue-extension ) try continue free using proxy server securely make call api key model like gpt-4 , gemini pro , phind codellama via openai , google , together respectively .", "'re ready use api key different model / provider , press ` + ` button bottom left add new model ` config.json ` .", "learn model provider [ ] ( http : //continue.dev/docs/model-setup/overview ) ." ] ]
Download for [VS Code](https://marketplace.visualstudio.com/items?itemName=Continue.continue) and [JetBrains](https://plugins.jetbrains.com/plugin/22707-continue-extension) You can try out Continue for free using a proxy server that securely makes calls with our API key to models like GPT-4, Gemini Pro, and Phind CodeLlama via OpenAI, Google, and Together respectively. Once you're ready to use your own API key or a different model / provider, press the `+` button in the bottom left to add a new model to your `config.json`. Learn more about the models and providers [here](https://continue.dev/docs/model-setup/overview).
https://github.com/continuedev/continue
-1
[ "ai", "chatgpt", "copilot", "developer-tools", "intellij", "jetbrains", "llm", "open-source", "openai", "pycharm", "software-development", "visual-studio-code", "vscode" ]