BERT-VI / README.md
RenatoBarreira's picture
Update README.md
2860b5e verified
|
raw
history blame
6.74 kB
metadata
license: cc
language:
  - pt
base_model:
  - neuralmind/bert-large-portuguese-cased
tags:
  - bert
  - pytorch
  - bertimbau
datasets:
  - RenatoBarreira/BERT-Vi_Trainning_data
model-index:
  - name: Bert-Vi
    results:
      - task:
          type: text-classfication
        dataset:
          type: RenatoBarreira/BERT-Vi_Trainning_data
          name: 6000augaug
        metrics:
          - name: Accuracy
            type: accuracy
            value: 94.07
            verified: false
      - task:
          type: text-classfication
        dataset:
          type: RenatoBarreira/BERT-Vi_Trainning_data
          name: 6000augaug
        metrics:
          - name: F1 score
            type: f1
            value: 94.07
            verified: false
      - task:
          type: text-classfication
        dataset:
          type: RenatoBarreira/BERT-Vi_Trainning_data
          name: 6000augaug
        metrics:
          - name: Precision
            type: precision
            value: 0.527
            verified: false
      - task:
          type: text-generation
        dataset:
          type: ds1000
          name: DS-1000 (Overall Completion)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.26
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.3155
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (C#)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2101
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (D)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.1357
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.1761
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.3022
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Julia)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2302
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.3079
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Lua)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2389
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (PHP)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2608
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Perl)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.1734
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.3357
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (R)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.155
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Ruby)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.0124
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Racket)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.0007
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2184
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Scala)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2761
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Bash)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.1046
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (Swift)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.2274
            verified: false
      - task:
          type: text-generation
        dataset:
          type: nuprl/MultiPL-E
          name: MultiPL-HumanEval (TypeScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 0.3229
            verified: false
pipeline_tag: text-classification
library_name: bertopic
Texto alternativo

PROJETO BERT-VI
Modelos Bertimbau treinados para análise de texto político.

GitHub

Texto alternativo Texto alternativo