| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """TODO: Add a description here.""" |
|
|
|
|
| import csv |
| import glob |
| import os |
|
|
| import datasets |
|
|
| import numpy as np |
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc. |
| }, |
| year={2020} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| """ |
|
|
| |
| _HOMEPAGE = "" |
|
|
| |
| _LICENSE = "" |
|
|
| |
| |
| |
| _DATA_URLs = { |
| "long": { |
| "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/long/training_long.csv", |
| "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/long/validation_long.csv", |
| "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/long/test_long.csv", |
| }, |
| "medium": { |
| "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/medium/training_medium.csv", |
| "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/medium/validation_medium.csv", |
| "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/medium/test_medium.csv", |
| }, |
| "short": { |
| "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/short/training_short.csv", |
| "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/short/validation_short.csv", |
| "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/short/test_short.csv", |
| }, |
| "mix": { |
| "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/mix/training_mix.csv", |
| "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/mix/validation_mix.csv", |
| "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/mix/test_mix.csv", |
| }, |
| } |
|
|
|
|
| |
| class CSNCHumanJudgementDataset(datasets.GeneratorBasedBuilder): |
| """TODO: Short description of my dataset.""" |
|
|
| VERSION = datasets.Version("1.1.0") |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig( |
| name="long", |
| version=VERSION, |
| description="", |
| ), |
| datasets.BuilderConfig( |
| name="medium", |
| version=VERSION, |
| description="", |
| ), |
| datasets.BuilderConfig( |
| name="short", |
| version=VERSION, |
| description="", |
| ), |
| datasets.BuilderConfig( |
| name="mix", |
| version=VERSION, |
| description="", |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "long" |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "idx": datasets.Value("int32"), |
| "input": datasets.Value("string"), |
| "target": datasets.Value("string"), |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| supervised_keys=None, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| my_urls = _DATA_URLs[self.config.name] |
| data_dirs = {} |
| for k, v in my_urls.items(): |
| data_dirs[k] = dl_manager.download_and_extract(v) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "file_path": data_dirs["train"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={ |
| "file_path": data_dirs["valid"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "file_path": data_dirs["test"], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples( |
| self, |
| file_path, |
| ): |
| """Yields examples as (key, example) tuples.""" |
| |
| |
|
|
| with open(file_path, encoding="utf-8") as f: |
| csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True) |
| next(csv_reader, None) |
|
|
| for row_id, row in enumerate(csv_reader): |
| _, idx, input, target = row |
| yield row_id, { |
| "idx": idx, |
| "input": input, |
| "target": target, |
| } |