Datasets:
Uploading tokenizer_robustness_completion_stem_spelled_out subset
Browse files
README.md
CHANGED
|
@@ -1620,6 +1620,130 @@ dataset_info:
|
|
| 1620 |
num_examples: 41
|
| 1621 |
download_size: 40316
|
| 1622 |
dataset_size: 22025
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1623 |
configs:
|
| 1624 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 1625 |
data_files:
|
|
@@ -1673,6 +1797,10 @@ configs:
|
|
| 1673 |
data_files:
|
| 1674 |
- split: test
|
| 1675 |
path: tokenizer_robustness_completion_stem_space_removal/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1676 |
---
|
| 1677 |
|
| 1678 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 1620 |
num_examples: 41
|
| 1621 |
download_size: 40316
|
| 1622 |
dataset_size: 22025
|
| 1623 |
+
- config_name: tokenizer_robustness_completion_stem_spelled_out
|
| 1624 |
+
features:
|
| 1625 |
+
- name: question
|
| 1626 |
+
dtype: string
|
| 1627 |
+
- name: choices
|
| 1628 |
+
list: string
|
| 1629 |
+
- name: answer
|
| 1630 |
+
dtype: int64
|
| 1631 |
+
- name: answer_label
|
| 1632 |
+
dtype: string
|
| 1633 |
+
- name: split
|
| 1634 |
+
dtype: string
|
| 1635 |
+
- name: subcategories
|
| 1636 |
+
dtype: string
|
| 1637 |
+
- name: lang
|
| 1638 |
+
dtype: string
|
| 1639 |
+
- name: second_lang
|
| 1640 |
+
dtype: string
|
| 1641 |
+
- name: notes
|
| 1642 |
+
dtype: string
|
| 1643 |
+
- name: id
|
| 1644 |
+
dtype: string
|
| 1645 |
+
- name: set_id
|
| 1646 |
+
dtype: string
|
| 1647 |
+
- name: variation_id
|
| 1648 |
+
dtype: string
|
| 1649 |
+
- name: question_general_category
|
| 1650 |
+
dtype: string
|
| 1651 |
+
- name: vanilla_cos_sim_to_canonical
|
| 1652 |
+
struct:
|
| 1653 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1654 |
+
dtype: float64
|
| 1655 |
+
- name: Qwen/Qwen3-8B
|
| 1656 |
+
dtype: float64
|
| 1657 |
+
- name: bigscience/bloom
|
| 1658 |
+
dtype: float64
|
| 1659 |
+
- name: common-pile/comma-v0.1-1t
|
| 1660 |
+
dtype: float64
|
| 1661 |
+
- name: facebook/xglm-564M
|
| 1662 |
+
dtype: float64
|
| 1663 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1664 |
+
dtype: float64
|
| 1665 |
+
- name: google/byt5-small
|
| 1666 |
+
dtype: float64
|
| 1667 |
+
- name: google/gemma-2-2b
|
| 1668 |
+
dtype: float64
|
| 1669 |
+
- name: gpt2
|
| 1670 |
+
dtype: float64
|
| 1671 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1672 |
+
dtype: float64
|
| 1673 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1674 |
+
dtype: float64
|
| 1675 |
+
- name: mistralai/tekken
|
| 1676 |
+
dtype: float64
|
| 1677 |
+
- name: tiktoken/gpt-4o
|
| 1678 |
+
dtype: float64
|
| 1679 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1680 |
+
dtype: float64
|
| 1681 |
+
- name: trimmed_cos_sim_to_canonical
|
| 1682 |
+
struct:
|
| 1683 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1684 |
+
dtype: float64
|
| 1685 |
+
- name: Qwen/Qwen3-8B
|
| 1686 |
+
dtype: float64
|
| 1687 |
+
- name: bigscience/bloom
|
| 1688 |
+
dtype: float64
|
| 1689 |
+
- name: common-pile/comma-v0.1-1t
|
| 1690 |
+
dtype: float64
|
| 1691 |
+
- name: facebook/xglm-564M
|
| 1692 |
+
dtype: float64
|
| 1693 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1694 |
+
dtype: float64
|
| 1695 |
+
- name: google/byt5-small
|
| 1696 |
+
dtype: float64
|
| 1697 |
+
- name: google/gemma-2-2b
|
| 1698 |
+
dtype: float64
|
| 1699 |
+
- name: gpt2
|
| 1700 |
+
dtype: float64
|
| 1701 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1702 |
+
dtype: float64
|
| 1703 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1704 |
+
dtype: float64
|
| 1705 |
+
- name: mistralai/tekken
|
| 1706 |
+
dtype: float64
|
| 1707 |
+
- name: tiktoken/gpt-4o
|
| 1708 |
+
dtype: float64
|
| 1709 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1710 |
+
dtype: float64
|
| 1711 |
+
- name: token_counts
|
| 1712 |
+
struct:
|
| 1713 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1714 |
+
dtype: int64
|
| 1715 |
+
- name: Qwen/Qwen3-8B
|
| 1716 |
+
dtype: int64
|
| 1717 |
+
- name: bigscience/bloom
|
| 1718 |
+
dtype: int64
|
| 1719 |
+
- name: common-pile/comma-v0.1-1t
|
| 1720 |
+
dtype: int64
|
| 1721 |
+
- name: facebook/xglm-564M
|
| 1722 |
+
dtype: int64
|
| 1723 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1724 |
+
dtype: int64
|
| 1725 |
+
- name: google/byt5-small
|
| 1726 |
+
dtype: int64
|
| 1727 |
+
- name: google/gemma-2-2b
|
| 1728 |
+
dtype: int64
|
| 1729 |
+
- name: gpt2
|
| 1730 |
+
dtype: int64
|
| 1731 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1732 |
+
dtype: int64
|
| 1733 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1734 |
+
dtype: int64
|
| 1735 |
+
- name: mistralai/tekken
|
| 1736 |
+
dtype: int64
|
| 1737 |
+
- name: tiktoken/gpt-4o
|
| 1738 |
+
dtype: int64
|
| 1739 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1740 |
+
dtype: int64
|
| 1741 |
+
splits:
|
| 1742 |
+
- name: test
|
| 1743 |
+
num_bytes: 13749
|
| 1744 |
+
num_examples: 25
|
| 1745 |
+
download_size: 35840
|
| 1746 |
+
dataset_size: 13749
|
| 1747 |
configs:
|
| 1748 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 1749 |
data_files:
|
|
|
|
| 1797 |
data_files:
|
| 1798 |
- split: test
|
| 1799 |
path: tokenizer_robustness_completion_stem_space_removal/test-*
|
| 1800 |
+
- config_name: tokenizer_robustness_completion_stem_spelled_out
|
| 1801 |
+
data_files:
|
| 1802 |
+
- split: test
|
| 1803 |
+
path: tokenizer_robustness_completion_stem_spelled_out/test-*
|
| 1804 |
---
|
| 1805 |
|
| 1806 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_stem_spelled_out/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:31194013b776adc3ad4cd2c27d2939b8f2a39667b55e24dcd27c6898c204863e
|
| 3 |
+
size 35840
|