{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b9583f02",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/xiongjiedai/llama.cpp\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/xiongjiedai/Library/Python/3.9/lib/python/site-packages/IPython/core/magics/osm.py:393: UserWarning: using bookmarks requires you to install the `pickleshare` library.\n",
      "  bkms = self.shell.db.get('bookmarks', {})\n",
      "/Users/xiongjiedai/Library/Python/3.9/lib/python/site-packages/IPython/core/magics/osm.py:417: UserWarning: using dhist requires you to install the `pickleshare` library.\n",
      "  self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Already up to date.\n"
     ]
    }
   ],
   "source": [
    "!git clone https://github.com/ggerganov/llama.cpp # Get the code for the first time use\n",
    "%cd ~/llama.cpp\n",
    "!git pull"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4f651103-a6c8-4b7d-9f0f-be0553b22acf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ggml-vocab-aquila.gguf             ggml-vocab-gpt2.gguf\n",
      "ggml-vocab-baichuan.gguf           ggml-vocab-llama-bpe.gguf\n",
      "ggml-vocab-bert-bge.gguf           ggml-vocab-llama-bpe.gguf.inp\n",
      "ggml-vocab-bert-bge.gguf.inp       ggml-vocab-llama-bpe.gguf.out\n",
      "ggml-vocab-bert-bge.gguf.out       ggml-vocab-llama-spm.gguf\n",
      "ggml-vocab-command-r.gguf          ggml-vocab-llama-spm.gguf.inp\n",
      "ggml-vocab-command-r.gguf.inp      ggml-vocab-llama-spm.gguf.out\n",
      "ggml-vocab-command-r.gguf.out      ggml-vocab-mpt.gguf\n",
      "ggml-vocab-deepseek-coder.gguf     ggml-vocab-mpt.gguf.inp\n",
      "ggml-vocab-deepseek-coder.gguf.inp ggml-vocab-mpt.gguf.out\n",
      "ggml-vocab-deepseek-coder.gguf.out ggml-vocab-phi-3.gguf\n",
      "ggml-vocab-deepseek-llm.gguf       ggml-vocab-phi-3.gguf.inp\n",
      "ggml-vocab-deepseek-llm.gguf.inp   ggml-vocab-phi-3.gguf.out\n",
      "ggml-vocab-deepseek-llm.gguf.out   ggml-vocab-refact.gguf\n",
      "ggml-vocab-falcon.gguf             ggml-vocab-refact.gguf.inp\n",
      "ggml-vocab-falcon.gguf.inp         ggml-vocab-refact.gguf.out\n",
      "ggml-vocab-falcon.gguf.out         ggml-vocab-stablelm.gguf\n",
      "ggml-vocab-gpt-2.gguf              ggml-vocab-starcoder.gguf\n",
      "ggml-vocab-gpt-2.gguf.inp          ggml-vocab-starcoder.gguf.inp\n",
      "ggml-vocab-gpt-2.gguf.out          ggml-vocab-starcoder.gguf.out\n",
      "ggml-vocab-gpt-neox.gguf\n"
     ]
    }
   ],
   "source": [
    "# Obtain the original LLaMA model weights and place them in ./models\n",
    "!ls ./models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "cbaefb88",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/xiongjiedai/llama.cpp/models\n",
      "Updated Git hooks.\n",
      "Git LFS initialized.\n",
      "Cloning into 'Llama-3-8B-GGUF'...\n",
      "remote: Enumerating objects: 44, done.\u001b[K\n",
      "remote: Counting objects: 100% (41/41), done.\u001b[K\n",
      "remote: Compressing objects: 100% (41/41), done.\u001b[K\n",
      "remote: Total 44 (delta 9), reused 0 (delta 0), pack-reused 3 (from 1)\u001b[K\n",
      "Unpacking objects: 100% (44/44), 2.25 MiB | 17.34 MiB/s, done.\n",
      "Filtering content: 100% (8/8), 9.46 GiB | 12.37 MiB/s, done.\n",
      "Cloning into 'Llama-3-70B-GGUF'...\n",
      "remote: Enumerating objects: 67, done.\u001b[K\n",
      "remote: Counting objects: 100% (64/64), done.\u001b[K\n",
      "remote: Compressing objects: 100% (64/64), done.\u001b[K\n",
      "remote: Total 67 (delta 4), reused 0 (delta 0), pack-reused 3 (from 1)\u001b[K\n",
      "Unpacking objects: 100% (67/67), 2.26 MiB | 16.38 MiB/s, done.\n",
      "Filtering content: 100% (40/40), 22.45 GiB | 937.00 KiB/s, done.\n"
     ]
    }
   ],
   "source": [
    "# Clone/Download the model files from Meta HF repo: https://huggingface.co/meta-llama. Or feel free to clone from my HF repo:\n",
    "%cd models\n",
    "# !brew install git-lfs # Get git-lfs to clone large files\n",
    "!git lfs install\n",
    "!git clone https://huggingface.co/JaaackXD/Llama-3-8B-GGUF\n",
    "!git clone https://huggingface.co/JaaackXD/Llama-3-70B-GGUF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "12991b83",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/xiongjiedai/llama.cpp\n"
     ]
    }
   ],
   "source": [
    "!mkdir 8B-v3\n",
    "!mv Llama-3-8B-GGUF/*.gguf 8B-v3\n",
    "!mkdir 70B-v3\n",
    "!mv Llama-3-70B-GGUF/*.gguf 70B-v3\n",
    "%cd .."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5443c11a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build\n",
    "!make clean && make -j"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a57dd4e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # Run the code if you would like to convert and quantize models by yourself\n",
    "# # Install Python dependencies\n",
    "# !python3 -m pip install -r requirements.txt\n",
    "\n",
    "# # Convert the HF models to ggml FP16 format (High RAM requirement!)\n",
    "# !python3 convert-hf-to-gguf.py models/Llama-3-8B-GGUF/Meta-Llama-3-8B/ --outfile models/8B-v3/ggml-model-f16.gguf --outtype f16\n",
    "# !python3 convert-hf-to-gguf.py models/Llama-3-70B-GGUF/Meta-Llama-3-70B/ --outfile models/70B-v3/ggml-model-f16.gguf --outtype f16\n",
    "\n",
    "# # Quantize the model to 4-bits (using Q4_K_M method)\n",
    "# !./quantize models/8B-v3/ggml-model-f16.gguf models/8B-v3/ggml-model-Q4_K_M.gguf Q4_K_M\n",
    "# !./quantize models/70B-v3/ggml-model-f16.gguf models/70B-v3/ggml-model-Q4_K_M.gguf Q4_K_M"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "bd5c3a00",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Log start\n",
      "main: build = 2795 (bcdee0da)\n",
      "main: built with Apple clang version 15.0.0 (clang-1500.3.9.4) for arm64-apple-darwin23.4.0\n",
      "main: seed  = 0\n",
      "llama_model_loader: loaded meta data with 21 key-value pairs and 291 tensors from ./models/8B-v3/ggml-model-Q4_K_M.gguf (version GGUF V3 (latest))\n",
      "llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
      "llama_model_loader: - kv   0:                       general.architecture str              = llama\n",
      "llama_model_loader: - kv   1:                               general.name str              = Meta-Llama-3-8B\n",
      "llama_model_loader: - kv   2:                          llama.block_count u32              = 32\n",
      "llama_model_loader: - kv   3:                       llama.context_length u32              = 8192\n",
      "llama_model_loader: - kv   4:                     llama.embedding_length u32              = 4096\n",
      "llama_model_loader: - kv   5:                  llama.feed_forward_length u32              = 14336\n",
      "llama_model_loader: - kv   6:                 llama.attention.head_count u32              = 32\n",
      "llama_model_loader: - kv   7:              llama.attention.head_count_kv u32              = 8\n",
      "llama_model_loader: - kv   8:                       llama.rope.freq_base f32              = 500000.000000\n",
      "llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010\n",
      "llama_model_loader: - kv  10:                          general.file_type u32              = 15\n",
      "llama_model_loader: - kv  11:                           llama.vocab_size u32              = 128256\n",
      "llama_model_loader: - kv  12:                 llama.rope.dimension_count u32              = 128\n",
      "llama_model_loader: - kv  13:                       tokenizer.ggml.model str              = gpt2\n",
      "llama_model_loader: - kv  14:                         tokenizer.ggml.pre str              = llama-bpe\n",
      "llama_model_loader: - kv  15:                      tokenizer.ggml.tokens arr[str,128256]  = [\"!\", \"\\\"\", \"#\", \"$\", \"%\", \"&\", \"'\", ...\n",
      "llama_model_loader: - kv  16:                  tokenizer.ggml.token_type arr[i32,128256]  = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...\n",
      "llama_model_loader: - kv  17:                      tokenizer.ggml.merges arr[str,280147]  = [\"Ġ Ġ\", \"Ġ ĠĠĠ\", \"ĠĠ ĠĠ\", \"...\n",
      "llama_model_loader: - kv  18:                tokenizer.ggml.bos_token_id u32              = 128000\n",
      "llama_model_loader: - kv  19:                tokenizer.ggml.eos_token_id u32              = 128001\n",
      "llama_model_loader: - kv  20:               general.quantization_version u32              = 2\n",
      "llama_model_loader: - type  f32:   65 tensors\n",
      "llama_model_loader: - type q4_K:  193 tensors\n",
      "llama_model_loader: - type q6_K:   33 tensors\n",
      "llm_load_vocab: special tokens definition check successful ( 256/128256 ).\n",
      "llm_load_print_meta: format           = GGUF V3 (latest)\n",
      "llm_load_print_meta: arch             = llama\n",
      "llm_load_print_meta: vocab type       = BPE\n",
      "llm_load_print_meta: n_vocab          = 128256\n",
      "llm_load_print_meta: n_merges         = 280147\n",
      "llm_load_print_meta: n_ctx_train      = 8192\n",
      "llm_load_print_meta: n_embd           = 4096\n",
      "llm_load_print_meta: n_head           = 32\n",
      "llm_load_print_meta: n_head_kv        = 8\n",
      "llm_load_print_meta: n_layer          = 32\n",
      "llm_load_print_meta: n_rot            = 128\n",
      "llm_load_print_meta: n_embd_head_k    = 128\n",
      "llm_load_print_meta: n_embd_head_v    = 128\n",
      "llm_load_print_meta: n_gqa            = 4\n",
      "llm_load_print_meta: n_embd_k_gqa     = 1024\n",
      "llm_load_print_meta: n_embd_v_gqa     = 1024\n",
      "llm_load_print_meta: f_norm_eps       = 0.0e+00\n",
      "llm_load_print_meta: f_norm_rms_eps   = 1.0e-05\n",
      "llm_load_print_meta: f_clamp_kqv      = 0.0e+00\n",
      "llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
      "llm_load_print_meta: f_logit_scale    = 0.0e+00\n",
      "llm_load_print_meta: n_ff             = 14336\n",
      "llm_load_print_meta: n_expert         = 0\n",
      "llm_load_print_meta: n_expert_used    = 0\n",
      "llm_load_print_meta: causal attn      = 1\n",
      "llm_load_print_meta: pooling type     = 0\n",
      "llm_load_print_meta: rope type        = 0\n",
      "llm_load_print_meta: rope scaling     = linear\n",
      "llm_load_print_meta: freq_base_train  = 500000.0\n",
      "llm_load_print_meta: freq_scale_train = 1\n",
      "llm_load_print_meta: n_yarn_orig_ctx  = 8192\n",
      "llm_load_print_meta: rope_finetuned   = unknown\n",
      "llm_load_print_meta: ssm_d_conv       = 0\n",
      "llm_load_print_meta: ssm_d_inner      = 0\n",
      "llm_load_print_meta: ssm_d_state      = 0\n",
      "llm_load_print_meta: ssm_dt_rank      = 0\n",
      "llm_load_print_meta: model type       = 8B\n",
      "llm_load_print_meta: model ftype      = Q4_K - Medium\n",
      "llm_load_print_meta: model params     = 8.03 B\n",
      "llm_load_print_meta: model size       = 4.58 GiB (4.89 BPW) \n",
      "llm_load_print_meta: general.name     = Meta-Llama-3-8B\n",
      "llm_load_print_meta: BOS token        = 128000 '<|begin_of_text|>'\n",
      "llm_load_print_meta: EOS token        = 128001 '<|end_of_text|>'\n",
      "llm_load_print_meta: LF token         = 128 'Ä'\n",
      "llm_load_print_meta: EOT token        = 128009 '<|eot_id|>'\n",
      "llm_load_tensors: ggml ctx size =    0.30 MiB\n",
      "ggml_backend_metal_log_allocated_size: allocated buffer, size =  4403.50 MiB, ( 4403.58 / 49152.00)\n",
      "llm_load_tensors: offloading 32 repeating layers to GPU\n",
      "llm_load_tensors: offloading non-repeating layers to GPU\n",
      "llm_load_tensors: offloaded 33/33 layers to GPU\n",
      "llm_load_tensors:      Metal buffer size =  4403.50 MiB\n",
      "llm_load_tensors:        CPU buffer size =   281.81 MiB\n",
      ".......................................................................................\n",
      "llama_new_context_with_model: n_ctx      = 8192\n",
      "llama_new_context_with_model: n_batch    = 2048\n",
      "llama_new_context_with_model: n_ubatch   = 512\n",
      "llama_new_context_with_model: flash_attn = 0\n",
      "llama_new_context_with_model: freq_base  = 500000.0\n",
      "llama_new_context_with_model: freq_scale = 1\n",
      "ggml_metal_init: allocating\n",
      "ggml_metal_init: found device: Apple M3 Max\n",
      "ggml_metal_init: picking default device: Apple M3 Max\n",
      "ggml_metal_init: default.metallib not found, loading from source\n",
      "ggml_metal_init: GGML_METAL_PATH_RESOURCES = nil\n",
      "ggml_metal_init: loading '/Users/xiongjiedai/llama.cpp/ggml-metal.metal'\n",
      "ggml_metal_init: GPU name:   Apple M3 Max\n",
      "ggml_metal_init: GPU family: MTLGPUFamilyApple9  (1009)\n",
      "ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)\n",
      "ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)\n",
      "ggml_metal_init: simdgroup reduction support   = true\n",
      "ggml_metal_init: simdgroup matrix mul. support = true\n",
      "ggml_metal_init: hasUnifiedMemory              = true\n",
      "ggml_metal_init: recommendedMaxWorkingSetSize  = 51539.61 MB\n",
      "llama_kv_cache_init:      Metal KV buffer size =  1024.00 MiB\n",
      "llama_new_context_with_model: KV self size  = 1024.00 MiB, K (f16):  512.00 MiB, V (f16):  512.00 MiB\n",
      "llama_new_context_with_model:        CPU  output buffer size =     0.49 MiB\n",
      "llama_new_context_with_model:      Metal compute buffer size =   560.00 MiB\n",
      "llama_new_context_with_model:        CPU compute buffer size =    24.01 MiB\n",
      "llama_new_context_with_model: graph nodes  = 1030\n",
      "llama_new_context_with_model: graph splits = 2\n",
      "\n",
      "system_info: n_threads = 12 / 16 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | \n",
      "sampling: \n",
      "\trepeat_last_n = 64, repeat_penalty = 1.100, frequency_penalty = 0.000, presence_penalty = 0.000\n",
      "\ttop_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 1.100\n",
      "\tmirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000\n",
      "sampling order: \n",
      "CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temperature \n",
      "generate: n_ctx = 8192, n_batch = 2048, n_predict = 1024, n_keep = 0\n",
      "\n",
      "\n",
      "\u001b[33m<|begin_of_text|> First Citizen:\n",
      "\n",
      " Before we proceed any further, hear me speak.\n",
      "\n",
      " \n",
      "\n",
      " All:\n",
      "\n",
      " Speak, speak.\n",
      "\n",
      " \n",
      "\n",
      " First Citizen:\n",
      "\n",
      " You are all resolved rather to die than to famish?\n",
      "\n",
      " \n",
      "\n",
      " All:\n",
      "\n",
      " Resolved. resolved.\n",
      "\n",
      " \n",
      "\n",
      " First Citizen:\n",
      "\n",
      " First, you know Caius Marcius is chief enemy to the people.\n",
      "\n",
      " \n",
      "\n",
      " All:\n",
      "\n",
      " We know't, we know't.\n",
      "\n",
      " \n",
      "\n",
      " First Citizen:\n",
      "\n",
      " Let us kill him, and we'll have corn at our own price. Is't a verdict?\n",
      "\n",
      " \n",
      "\n",
      " All:\n",
      "\n",
      " No more talking on't; let it be done: away, away!\n",
      "\n",
      " \n",
      "\n",
      " Second Citizen:\n",
      "\n",
      " One word, good citizens.\n",
      "\n",
      " \n",
      "\n",
      " First Citizen:\n",
      "\n",
      " We are accounted poor citizens, the patricians good. What authority surfeits on would relieve us: if they would yield us but the superfluity,  while it were wholesome, we might guess they relieved us humanely; but they think we are too dear: the leanness that afflicts us, the object of  our misery, is as an inventory to particularise their abundance; our sufferance is a gain to them Let us revenge this with our pikes,  ere we become rakes: for the gods know I speak this in hunger for bread, not in thirst for revenge.\n",
      "\n",
      " \n",
      "\n",
      " \u001b[0m First Citizen:\n",
      "\n",
      " Do't when you will: 'tis like they'll never lift it.\n",
      "\n",
      " \n",
      "\n",
      " Second Citizen:\n",
      "\n",
      " Why should they then pretend to justice? Come, I trifle thus with them in hews; lay hands upon them: come, come. No words.\n",
      "\n",
      " [They rush forward.]\n",
      "\n",
      " \n",
      "\n",
      " All:\n",
      "\n",
      " A  noise within, as the rising of a woman's voice. Enter a WOMAN upstairs with a  pitcher, and a basket.\n",
      "\n",
      " First Citizen (within):\n",
      "\n",
      " What noise is this?\n",
      "\n",
      " Woman:\n",
      "\n",
      " O dear good fellow!  let me look on thee once more before I die.  I have served you eight years now without paying for any of it but you promised to pay well.\n",
      "\n",
      " \n",
      "\n",
      " [She offers money.]\n",
      "\n",
      " First Citizen:\n",
      "\n",
      " No, no; therefore go out of my door.\n",
      "\n",
      " \n",
      "\n",
      " Woman:\n",
      "\n",
      " Well, and there's the press of business: let it pass.\n",
      "\n",
      " She puts him down.\n",
      "\n",
      " Will't ever be ours, fellow?\n",
      "\n",
      " \n",
      "\n",
      " First Citizen:\n",
      "\n",
      " Make you full sure. This your horse men have intrapped  With wondrous physical achievement since they  lost their heads about her! They brought horses for their carriages  And mewed up their wives like rats in a trunk,  That the 'uicers might not stamp them out so precisely: being darkened from the searching sun,  Presuming that their wives were as they were themselves.\n",
      "\n",
      " \n",
      "\n",
      " Woman:\n",
      "\n",
      " So we did see the time with lusty men. But mark you now, this savageness is copied,  Nourish'd, bred and burned in, by my poor daughter; I would have men of such mettle, that they should turn to pathos,  Come these two month together; which confined, shall  moonstruck the world with beasts. As I,  God pardon it! have practised for four-score years,  Heaven and his pity give us  patience!  This is a bitter bawd to come so near with your fury unapplied: Was my office ever happy? Now thy latter end is come to look unto.\n",
      "\n",
      " \n",
      "\n",
      " [She strikes him on the face.]\n",
      "\n",
      " First Citizen:\n",
      "\n",
      " What mean you, Madam? Did you mean your service or your words should   spare? I'm nothing if I be not as Horace says, Well hallow'd, and true.\n",
      "\n",
      " \n",
      "\n",
      " Woman:\n",
      "\n",
      " Well, you are women; and therefore subject to all  incidents of climes adjusted. Yet from this serai to the south pole 'tis all one degree in chiding. Well--you'll forgive me:   graze up to th' mark with your eye; and then drop down into my neck: and you shall see how I'll pardon an entire riot.  [She pulls his beard.]\n",
      "\n",
      " My husband best of man, that is, if riches  could rive out his heart, were prized in her we have now for short.  When I said 'mark', you did not mark me;  And when I spoke truly then I lied: well; first, you see she's a harlot: therefore,   take her back: go to my woman that should this very hour be groaning hawling with a child incertain what will prove. We must follow him solemnly. Remember, these   are to flow straight through our hands, to the contrary, men needn't stop upon more game, for his  forks rise deep; but here's no combat; we on the muckhill are free.\n",
      "\n",
      " \n",
      "\n",
      " First Citizen:\n",
      "\n",
      " Ay, as the glass lookes in the finger of time; there is no chaos but an angrie man: therefore look you, she must have a name to her madness, he that brands their unthrifted youth with  'sanctity of heaven' will be somewhat more familiar why'n I break into three ourselves: let our crook arms hold asunder the general   part of women. Those that follow the wiudow do it because her husband's death; had more cause them to do it. But yet they are not down, for the wax must have entrenchment.\n",
      "\n",
      " \n",
      "\n",
      " Woman:\n",
      "\n",
      "  Nay, come, I pray you: we shall see who will excuse us,   that so deserves it. O lord! lord! 'tis a cruel wrong when women strike tops and men grow mutinous.\n",
      "\n",
      " [She pulls out his beard.]\n",
      "\n",
      " First Citizen:\n",
      "\n",
      " There's holaday to honest pillage when the stalling of your horse may be as profitable to  you as the riding of him: this shall be done; whether it like you or no. \n",
      "\n",
      " Woman (pulling off his cap):\n",
      "\n",
      " Will it be done? I would not lose the company of these plants on any condition. Villains, I will e'en custom you out of your dens.\n",
      "\n",
      " [She pulls up his mantle.]\n",
      "\n",
      " First Citizen:\n",
      "\n",
      " The mutines of Italy have done their spate; and  I do fear'd, you'll give us all great chapmen. But, say, the witlessness is beyond sugaration if the women have not the upper hand of the\n",
      "llama_print_timings:        load time =    6487.24 ms\n",
      "llama_print_timings:      sample time =     234.10 ms /  1024 runs   (    0.23 ms per token,  4374.27 tokens per second)\n",
      "llama_print_timings: prompt eval time =     441.81 ms /   258 tokens (    1.71 ms per token,   583.96 tokens per second)\n",
      "llama_print_timings:        eval time =   20201.08 ms /  1023 runs   (   19.75 ms per token,    50.64 tokens per second)\n",
      "llama_print_timings:       total time =   21212.60 ms /  1281 tokens\n",
      "ggml_metal_free: deallocating\n",
      "Log end\n"
     ]
    }
   ],
   "source": [
    "# Start inference on a gguf model (-h to show all options)\n",
    "!./main -ngl 10000 -m ./models/8B-v3/ggml-model-Q4_K_M.gguf --color --temp 1.1 --repeat_penalty 1.1 -c 0 -n 1024 -e -s 0 -p \"\"\"\\\n",
    "First Citizen:\\n\\n\\\n",
    "Before we proceed any further, hear me speak.\\n\\n\\\n",
    "\\n\\n\\\n",
    "All:\\n\\n\\\n",
    "Speak, speak.\\n\\n\\\n",
    "\\n\\n\\\n",
    "First Citizen:\\n\\n\\\n",
    "You are all resolved rather to die than to famish?\\n\\n\\\n",
    "\\n\\n\\\n",
    "All:\\n\\n\\\n",
    "Resolved. resolved.\\n\\n\\\n",
    "\\n\\n\\\n",
    "First Citizen:\\n\\n\\\n",
    "First, you know Caius Marcius is chief enemy to the people.\\n\\n\\\n",
    "\\n\\n\\\n",
    "All:\\n\\n\\\n",
    "We know't, we know't.\\n\\n\\\n",
    "\\n\\n\\\n",
    "First Citizen:\\n\\n\\\n",
    "Let us kill him, and we'll have corn at our own price. Is't a verdict?\\n\\n\\\n",
    "\\n\\n\\\n",
    "All:\\n\\n\\\n",
    "No more talking on't; let it be done: away, away!\\n\\n\\\n",
    "\\n\\n\\\n",
    "Second Citizen:\\n\\n\\\n",
    "One word, good citizens.\\n\\n\\\n",
    "\\n\\n\\\n",
    "First Citizen:\\n\\n\\\n",
    "We are accounted poor citizens, the patricians good. What authority surfeits on would relieve us: if they would yield us but the superfluity, \\\n",
    "while it were wholesome, we might guess they relieved us humanely; but they think we are too dear: the leanness that afflicts us, the object of \\\n",
    "our misery, is as an inventory to particularise their abundance; our sufferance is a gain to them Let us revenge this with our pikes, \\\n",
    "ere we become rakes: for the gods know I speak this in hunger for bread, not in thirst for revenge.\\n\\n\\\n",
    "\\n\\n\\\n",
    "\"\"\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "17c21608",
   "metadata": {},
   "source": [
    "## Benchmarks"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2b6aee37",
   "metadata": {},
   "source": [
    "### 8B Q4_K_M"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "25e6834b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "| model                          |       size |     params | backend    | ngl | test       |              t/s |\n",
      "| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | pp 512     |    693.32 ± 0.42 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | pp 1024    |    678.04 ± 0.46 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | pp 2048    |    652.33 ± 9.50 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | pp 4096    |   573.09 ± 14.08 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | pp 8192    |    505.32 ± 0.77 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | tg 512     |     48.97 ± 0.17 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | tg 1024    |     50.74 ± 2.02 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | tg 2048    |     49.78 ± 0.01 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | tg 4096    |     44.21 ± 0.01 |\n",
      "| llama 8B Q4_K - Medium         |   4.58 GiB |     8.03 B | Metal      |  99 | tg 8192    |     36.12 ± 0.02 |\n",
      "\n",
      "build: bcdee0da (2795)\n"
     ]
    }
   ],
   "source": [
    "!./llama-bench -p 512,1024,2048,4096,8192 -n 512,1024,2048,4096,8192 -m ./models/8B-v3/ggml-model-Q4_K_M.gguf"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0f07d6cd",
   "metadata": {},
   "source": [
    "### 8B F16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "e8d4d00c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "| model                          |       size |     params | backend    | ngl | test       |              t/s |\n",
      "| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | pp 512     |    769.84 ± 1.23 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | pp 1024    |    751.49 ± 0.52 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | pp 2048    |   695.29 ± 20.23 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | pp 4096    |   609.97 ± 15.87 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | pp 8192    |    515.15 ± 6.83 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | tg 512     |     22.04 ± 0.24 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | tg 1024    |     22.39 ± 0.09 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | tg 2048    |     21.86 ± 0.00 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | tg 4096    |     20.72 ± 0.00 |\n",
      "| llama 8B F16                   |  14.96 GiB |     8.03 B | Metal      |  99 | tg 8192    |     18.74 ± 0.00 |\n",
      "\n",
      "build: bcdee0da (2795)\n"
     ]
    }
   ],
   "source": [
    "!./llama-bench -p 512,1024,2048,4096,8192 -n 512,1024,2048,4096,8192 -m ./models/8B-v3/ggml-model-f16.gguf"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "72d34884",
   "metadata": {},
   "source": [
    "### 70B Q4_K_M"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b704364b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "| model                          |       size |     params | backend    | ngl | test       |              t/s |\n",
      "| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | pp 512     |     70.19 ± 2.91 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | pp 1024    |     62.88 ± 0.13 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | pp 2048    |     64.09 ± 1.46 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | pp 4096    |     64.90 ± 0.09 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | pp 8192    |     61.96 ± 0.11 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | tg 512     |      7.65 ± 0.07 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | tg 1024    |      7.53 ± 0.00 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | tg 2048    |      7.20 ± 0.00 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | tg 4096    |      6.58 ± 0.00 |\n",
      "| llama 70B Q4_K - Medium        |  39.59 GiB |    70.55 B | Metal      |  99 | tg 8192    |      5.60 ± 0.00 |\n",
      "\n",
      "build: bcdee0da (2795)\n"
     ]
    }
   ],
   "source": [
    "!./llama-bench -p 512,1024,2048,4096,8192 -n 512,1024,2048,4096,8192 -m ./models/70B-v3/ggml-model-Q4_K_M.gguf"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
