Datasets:
Uploading tokenizer_robustness_completion_chinese_transliteration_vs_translation subset
Browse files
README.md
CHANGED
|
@@ -756,6 +756,40 @@ dataset_info:
|
|
| 756 |
num_examples: 2
|
| 757 |
download_size: 5636
|
| 758 |
dataset_size: 374
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 759 |
configs:
|
| 760 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
| 761 |
data_files:
|
|
@@ -845,6 +879,10 @@ configs:
|
|
| 845 |
data_files:
|
| 846 |
- split: test
|
| 847 |
path: tokenizer_robustness_completion_chinese_transliteration_variations/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 848 |
---
|
| 849 |
|
| 850 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 756 |
num_examples: 2
|
| 757 |
download_size: 5636
|
| 758 |
dataset_size: 374
|
| 759 |
+
- config_name: tokenizer_robustness_completion_chinese_transliteration_vs_translation
|
| 760 |
+
features:
|
| 761 |
+
- name: question
|
| 762 |
+
dtype: string
|
| 763 |
+
- name: choices
|
| 764 |
+
list: string
|
| 765 |
+
- name: answer
|
| 766 |
+
dtype: int64
|
| 767 |
+
- name: answer_label
|
| 768 |
+
dtype: string
|
| 769 |
+
- name: split
|
| 770 |
+
dtype: string
|
| 771 |
+
- name: subcategories
|
| 772 |
+
dtype: string
|
| 773 |
+
- name: category
|
| 774 |
+
dtype: string
|
| 775 |
+
- name: lang
|
| 776 |
+
dtype: string
|
| 777 |
+
- name: second_lang
|
| 778 |
+
dtype: string
|
| 779 |
+
- name: notes
|
| 780 |
+
dtype: string
|
| 781 |
+
- name: id
|
| 782 |
+
dtype: string
|
| 783 |
+
- name: set_id
|
| 784 |
+
dtype: float64
|
| 785 |
+
- name: variation_id
|
| 786 |
+
dtype: float64
|
| 787 |
+
splits:
|
| 788 |
+
- name: test
|
| 789 |
+
num_bytes: 187
|
| 790 |
+
num_examples: 1
|
| 791 |
+
download_size: 5564
|
| 792 |
+
dataset_size: 187
|
| 793 |
configs:
|
| 794 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
| 795 |
data_files:
|
|
|
|
| 879 |
data_files:
|
| 880 |
- split: test
|
| 881 |
path: tokenizer_robustness_completion_chinese_transliteration_variations/test-*
|
| 882 |
+
- config_name: tokenizer_robustness_completion_chinese_transliteration_vs_translation
|
| 883 |
+
data_files:
|
| 884 |
+
- split: test
|
| 885 |
+
path: tokenizer_robustness_completion_chinese_transliteration_vs_translation/test-*
|
| 886 |
---
|
| 887 |
|
| 888 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_transliteration_vs_translation/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6591956f199bbf57c8300499b602ed7171e97594517f59debee4f110aae4154d
|
| 3 |
+
size 5564
|