-
Notifications
You must be signed in to change notification settings - Fork 812
Added new SST2 dataset class #1410
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
fa565ee
3e9551b
1d6d289
38d3d58
0f8968e
d2bcf2f
62e6fb2
846ee21
6d21049
7a82ba0
cdb5bac
fc41492
535c050
80b83e5
5e0f1e9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
import unittest | ||
from torchtext._internal.module_utils import is_module_available | ||
|
||
|
||
def skipIfNoModule(module, display_name=None): | ||
display_name = display_name or module | ||
return unittest.skipIf(not is_module_available(module), f'"{display_name}" is not available') |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
import hashlib | ||
import json | ||
|
||
from torchtext.experimental.datasets import sst2 | ||
|
||
from ..common.case_utils import skipIfNoModule | ||
from ..common.torchtext_test_case import TorchtextTestCase | ||
|
||
|
||
class TestDataset(TorchtextTestCase): | ||
@skipIfNoModule("torchdata") | ||
def test_sst2_dataset(self): | ||
split = ("train", "dev", "test") | ||
train_dp, dev_dp, test_dp = sst2.SST2(split=split) | ||
|
||
# verify hashes of first line in dataset | ||
self.assertEqual( | ||
hashlib.md5( | ||
json.dumps(next(iter(train_dp)), sort_keys=True).encode("utf-8") | ||
).hexdigest(), | ||
sst2._FIRST_LINE_MD5["train"], | ||
) | ||
self.assertEqual( | ||
hashlib.md5( | ||
json.dumps(next(iter(dev_dp)), sort_keys=True).encode("utf-8") | ||
).hexdigest(), | ||
sst2._FIRST_LINE_MD5["dev"], | ||
) | ||
self.assertEqual( | ||
hashlib.md5( | ||
json.dumps(next(iter(test_dp)), sort_keys=True).encode("utf-8") | ||
).hexdigest(), | ||
sst2._FIRST_LINE_MD5["test"], | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
import importlib.util | ||
|
||
|
||
def is_module_available(*modules: str) -> bool: | ||
r"""Returns if a top-level module with :attr:`name` exists *without** | ||
importing it. This is generally safer than try-catch block around a | ||
`import X`. It avoids third party libraries breaking assumptions of some of | ||
our tests, e.g., setting multiprocessing start method when imported | ||
(see librosa/#747, torchvision/#544). | ||
""" | ||
return all(importlib.util.find_spec(m) is not None for m in modules) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,4 @@ | ||
from . import raw | ||
from . import sst2 | ||
|
||
__all__ = ['raw'] | ||
__all__ = ["raw", "sst2"] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
# Copyright (c) Facebook, Inc. and its affiliates. | ||
import logging | ||
import os | ||
|
||
from torchtext._internal.module_utils import is_module_available | ||
from torchtext.data.datasets_utils import ( | ||
_add_docstring_header, | ||
_create_dataset_directory, | ||
_wrap_split_argument, | ||
) | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
if is_module_available("torchdata"): | ||
from torchdata.datapipes.iter import ( | ||
HttpReader, | ||
IterableWrapper, | ||
) | ||
else: | ||
logger.warning( | ||
"Package `torchdata` is required to be installed to use this dataset." | ||
"Please refer to https://github.com/pytorch/data for instructions on " | ||
"how to install the package." | ||
) | ||
|
||
|
||
NUM_LINES = { | ||
"train": 67349, | ||
"dev": 872, | ||
"test": 1821, | ||
} | ||
|
||
MD5 = "9f81648d4199384278b86e315dac217c" | ||
URL = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip" | ||
|
||
_EXTRACTED_FILES = { | ||
"train": f"{os.sep}".join(["SST-2", "train.tsv"]), | ||
"dev": f"{os.sep}".join(["SST-2", "dev.tsv"]), | ||
"test": f"{os.sep}".join(["SST-2", "test.tsv"]), | ||
} | ||
|
||
_EXTRACTED_FILES_MD5 = { | ||
"train": "da409a0a939379ed32a470bc0f7fe99a", | ||
"dev": "268856b487b2a31a28c0a93daaff7288", | ||
"test": "3230e4efec76488b87877a56ae49675a", | ||
} | ||
|
||
_FIRST_LINE_MD5 = { | ||
"train": "2552b8cecd57b2e022ef23411c688fa8", | ||
"dev": "1b0ffd6aa5f2bf0fd9840a5f6f1a9f07", | ||
"test": "f838c81fe40bfcd7e42e9ffc4dd004f7", | ||
} | ||
|
||
DATASET_NAME = "SST2" | ||
|
||
|
||
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2) | ||
@_create_dataset_directory(dataset_name=DATASET_NAME) | ||
@_wrap_split_argument(("train", "dev", "test")) | ||
def SST2(root, split): | ||
return SST2Dataset(root, split).get_datapipe() | ||
|
||
|
||
class SST2Dataset: | ||
"""The SST2 dataset uses torchdata datapipes end-2-end. | ||
To avoid download at every epoch, we cache the data on-disk | ||
We do sanity check on dowloaded and extracted data | ||
""" | ||
|
||
def __init__(self, root, split): | ||
self.root = root | ||
self.split = split | ||
|
||
def get_datapipe(self): | ||
# cache data on-disk | ||
cache_dp = IterableWrapper([URL]).on_disk_cache( | ||
HttpReader, | ||
op_map=lambda x: (x[0], x[1].read()), | ||
filepath_fn=lambda x: os.path.join(self.root, os.path.basename(x)), | ||
) | ||
Comment on lines
+76
to
+80
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am adding a PR to improve the interface of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sure that sounds great! |
||
|
||
# extract data from zip | ||
extracted_files = cache_dp.read_from_zip() | ||
|
||
# Parse CSV file and yield data samples | ||
return ( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The workaround for the patch: # do sanity check
check_cache_dp = cache_dp.check_hash(
{os.path.join(self.root, "SST-2.zip"): MD5}, "md5"
)
# extract data from zip
extracted_files = check_cache_dp.read_from_zip()
# Filter extracted files and do sanity check
check_extracted_files = extracted_files.filter(
lambda x: self.split in x[0]
).check_hash(
{
os.path.join(
self.root, _EXTRACTED_FILES[self.split]
): _EXTRACTED_FILES_MD5[self.split]
},
"md5",
rewind=False, # Turn rewind off for now
)
_ = list(check_extracted_files) # Run a loop for now
return check_cache_dp.read_from_zip().filter(
lambda x: self.split in x[0]
).parse_csv(skip_lines=1, delimiter="\t").map(
lambda x: (x[0], x[1])
) Option 2: # do sanity check
check_cache_dp = cache_dp.check_hash(
{os.path.join(self.root, "SST-2.zip"): MD5}, "md5"
)
# extract data from zip
extracted_files = check_cache_dp.read_from_zip().filter(
lambda x: self.split in x[0]
)
return extracted_files.parse_csv(skip_lines=1, delimiter="\t").map(
lambda x: (x[0], x[1])
) I highly recommend the option 2 because you have already done the sanity check for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Here's the PR with the suggested changes #1414. I added the recommended changes to extracted file path and went with option 2 as per your suggestion above |
||
extracted_files.filter(lambda x: self.split in x[0]) | ||
.parse_csv(skip_lines=1, delimiter="\t") | ||
.map(lambda x: (x[0], x[1])) | ||
) |
Uh oh!
There was an error while loading. Please reload this page.