| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """macocu_parallel""" |
|
|
|
|
| import os |
| import csv |
| import datasets |
|
|
|
|
| _CITATION = """\ |
| @inproceedings{banon2022macocu, |
| title={MaCoCu: Massive collection and curation of monolingual and bilingual data: focus on under-resourced languages}, |
| author={Ban{\'o}n, Marta and Espla-Gomis, Miquel and Forcada, Mikel L and Garc{\'\i}a-Romero, Cristian and Kuzman, Taja and Ljube{\v{s}}i{\'c}, Nikola and van Noord, Rik and Sempere, Leopoldo Pla and Ram{\'\i}rez-S{\'a}nchez, Gema and Rupnik, Peter and others}, |
| booktitle={23rd Annual Conference of the European Association for Machine Translation, EAMT 2022}, |
| pages={303--304}, |
| year={2022}, |
| organization={European Association for Machine Translation} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The MaCoCu parallel dataset is an English-centric collection of 11 |
| parallel corpora including the following languages: Albanian, |
| Bulgarian, Bosnian, Croatian, Icelandic, Macedonian, Maltese, |
| Montenegrin, Serbian, Slovenian, and Turkish. These corpora have |
| been automatically crawled from national and generic top-level |
| domains (for example, ".hr" for croatian, or ".is" for icelandic); |
| then, a parallel curation pipeline has been applied to produce |
| the final data (see https://github.com/bitextor/bitextor). |
| """ |
|
|
| _LanguagePairs = [ "en-is" ] |
| |
| |
|
|
| _LICENSE = "cc0" |
| _HOMEPAGE = "https://macocu.eu" |
|
|
| class macocuConfig(datasets.BuilderConfig): |
| """BuilderConfig for macocu_parallel""" |
|
|
| def __init__(self, language_pair, **kwargs): |
| super().__init__(**kwargs) |
| """ |
| |
| Args: |
| language_pair: language pair to be loaded |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| self.language_pair = language_pair |
|
|
|
|
| class MaCoCu_parallel(datasets.GeneratorBasedBuilder): |
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIG_CLASS = macocuConfig |
| BUILDER_CONFIGS = [ |
| macocuConfig(name=pair, description=_DESCRIPTION, language_pair=pair ) |
| for pair in _LanguagePairs |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features({ |
| "src_url": datasets.Value("string"), |
| "trg_url": datasets.Value("string"), |
| "src_text": datasets.Value("string"), |
| "trg_text": datasets.Value("string"), |
| "bleualign_score": datasets.Value("string"), |
| "src_deferred_hash": datasets.Value("string"), |
| "trg_deferred_hash": datasets.Value("string"), |
| "src_paragraph_id": datasets.Value("string"), |
| "trg_paragraph_id": datasets.Value("string"), |
| "src_doc_title": datasets.Value("string"), |
| "trg_doc_title": datasets.Value("string"), |
| "src_crawl_date": datasets.Value("string"), |
| "trg_crawl_date": datasets.Value("string"), |
| "src_file_type": datasets.Value("string"), |
| "trg_file_type": datasets.Value("string"), |
| "src_boilerplate": datasets.Value("string"), |
| "trg_boilerplate": datasets.Value("string"), |
| "src_heading_html_tag": datasets.Value("string"), |
| "trg_heading_html_tag": datasets.Value("string"), |
| "bifixer_hash": datasets.Value("string"), |
| "bifixer_score": datasets.Value("string"), |
| "bicleaner_ai_score": datasets.Value("string"), |
| "biroamer_entities_detected": datasets.Value("string"), |
| "dsi": datasets.Value("string"), |
| "translation_direction": datasets.Value("string"), |
| "en_document_level_variant": datasets.Value("string"), |
| "domain_en": datasets.Value("string"), |
| "en_domain_level_variant": datasets.Value("string") |
| }), |
| homepage=_HOMEPAGE, |
| citation=_CITATION, |
| license=_LICENSE |
| ) |
|
|
| def _split_generators(self, dl_manager): |
|
|
| lang_pair = self.config.language_pair |
| |
| path = os.path.join("data", f"{lang_pair}.tsv") |
| |
| data_file = dl_manager.download_and_extract({"data_file": path}) |
| return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=data_file)] |
|
|
| def _generate_examples(self, data_file): |
| """Yields examples.""" |
| with open(data_file, encoding="utf-8") as f: |
| reader = csv.reader(f, delimiter="\t", quotechar='"') |
| for id_, row in enumerate(reader): |
| if id_ == 0: |
| continue |
| yield id_, { |
| "src_url": row[0], |
| "trg_url": row[1], |
| "src_text": row[2], |
| "trg_text": row[3], |
| "bleualign_score": row[4], |
| "src_deferred_hash": row[5], |
| "trg_deferred_hash": row[6], |
| "src_paragraph_id": row[7], |
| "trg_paragraph_id": row[8], |
| "src_doc_title": row[9], |
| "trg_doc_title": row[10], |
| "src_crawl_date": row[11], |
| "trg_crawl_date": row[12], |
| "src_file_type": row[13], |
| "trg_file_type": row[14], |
| "src_boilerplate": row[15], |
| "trg_boilerplate": row[16], |
| "src_heading_html_tag": row[17], |
| "trg_heading_html_tag": row[18], |
| "bifixer_hash": row[19], |
| "bifixer_score": row[20], |
| "bicleaner_ai_score": row[21], |
| "biroamer_entities_detected": row[22], |
| "dsi": row[23], |
| "translation_direction": row[24], |
| "en_document_level_variant": row[25], |
| "domain_en": row[26], |
| "en_domain_level_variant": row[27] |
| } |
|
|
|
|