rpand002's picture
Update README.md
189b3d8 verified
|
raw
history blame
5.14 kB
metadata
pipeline_tag: text-generation
base_model: ibm-granite/granite-8b-code-base-4k
inference: false
license: apache-2.0
datasets:
  - bigcode/commitpackft
  - TIGER-Lab/MathInstruct
  - meta-math/MetaMathQA
  - glaiveai/glaive-code-assistant-v3
  - glaive-function-calling-v2
  - bugdaryan/sql-create-context-instruction
  - garage-bAInd/Open-Platypus
  - nvidia/HelpSteer
metrics:
  - code_eval
library_name: transformers
tags:
  - code
  - granite
model-index:
  - name: granite-8b-code-instruct-4k
    results:
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 57.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 52.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 58.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 43.3
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 48.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 37.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 53
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 42.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 52.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 36.6
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 43.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 16.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 39.6
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 40.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 48.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 41.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 39
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 32.9
            veriefied: false

image/png

ibm-granite/granite-8b-code-instruct-4k-GGUF

This is the Q4_K_M converted version of the original ibm-granite/granite-8b-code-instruct-4k. Refer to the original model card for more details.

Use with llama.cpp

git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp

# install
make

# run generation
./main -m granite-8b-code-instruct-4k-GGUF/granite-8b-code-instruct.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color