Text Generation
Transformers
GGUF
code
granite
Eval Results
Mayank Mishra
upload model
571f780
metadata
pipeline_tag: text-generation
inference: false
license: apache-2.0
datasets:
  - codeparrot/github-code-clean
  - bigcode/starcoderdata
  - open-web-math/open-web-math
  - math-ai/StackMathQA
metrics:
  - code_eval
library_name: transformers
tags:
  - code
  - granite
model-index:
  - name: granite-8b-code-base
    results:
      - task:
          type: text-generation
        dataset:
          type: mbpp
          name: MBPP
        metrics:
          - name: pass@1
            type: pass@1
            value: 42.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: evalplus/mbppplus
          name: MBPP+
        metrics:
          - name: pass@1
            type: pass@1
            value: 49.6
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 43.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 52.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 56.1
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 31.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 43.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 32.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 23.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 32.3
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 25
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 23.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 28
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 19.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 22.6
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 35.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 38.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 37.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 28.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 15.2
            veriefied: false

image/png

ibm-granite/granite-8b-code-base-GGUF

This is the Q4_K_M converted version of the original ibm-granite/granite-8b-code-base. Refer to the original model card for more details.

Use with llama.cpp

git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp

# install
make

# run generation
./main -m granite-8b-code-base-GGUF/granite-8b-code-base.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color