diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py index af29f175d4..f345a82e19 100644 --- a/torchtext/data/batch.py +++ b/torchtext/data/batch.py @@ -1,4 +1,5 @@ import torch +import warnings class Batch(object): @@ -19,6 +20,7 @@ class Batch(object): def __init__(self, data=None, dataset=None, device=None): """Create a Batch from a list of examples.""" + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) if data is not None: self.batch_size = len(data) self.dataset = dataset diff --git a/torchtext/data/example.py b/torchtext/data/example.py index d9f96aeda3..a5d29ef42a 100644 --- a/torchtext/data/example.py +++ b/torchtext/data/example.py @@ -1,5 +1,6 @@ import json from functools import reduce +import warnings class Example(object): @@ -9,6 +10,7 @@ class Example(object): """ @classmethod def fromJSON(cls, data, fields): + warnings.warn('Example class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.', UserWarning) ex = cls() obj = json.loads(data) @@ -47,6 +49,7 @@ def reducer(obj, key): @classmethod def fromdict(cls, data, fields): + warnings.warn('Example class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.', UserWarning) ex = cls() for key, vals in fields.items(): if key not in data: @@ -62,6 +65,7 @@ def fromdict(cls, data, fields): @classmethod def fromCSV(cls, data, fields, field_to_index=None): + warnings.warn('Example class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.', UserWarning) if field_to_index is None: return cls.fromlist(data, fields) else: @@ -71,6 +75,7 @@ def fromCSV(cls, data, fields, field_to_index=None): @classmethod def fromlist(cls, data, fields): + warnings.warn('Example class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.', UserWarning) ex = cls() for (name, field), val in zip(fields, data): if field is not None: @@ -86,6 +91,7 @@ def fromlist(cls, data, fields): @classmethod def fromtree(cls, data, fields, subtrees=False): + warnings.warn('Example class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.', UserWarning) try: from nltk.tree import Tree except ImportError: diff --git a/torchtext/data/field.py b/torchtext/data/field.py index 189331c1be..6054f1a510 100644 --- a/torchtext/data/field.py +++ b/torchtext/data/field.py @@ -3,7 +3,7 @@ from itertools import chain import torch from tqdm import tqdm - +import warnings from .dataset import Dataset from .pipeline import Pipeline from .utils import get_tokenizer, dtype_to_attr, is_tokenizer_serializable @@ -33,6 +33,7 @@ class RawField(object): """ def __init__(self, preprocessing=None, postprocessing=None, is_target=False): + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) self.preprocessing = preprocessing self.postprocessing = postprocessing self.is_target = is_target @@ -146,6 +147,7 @@ def __init__(self, sequential=True, use_vocab=True, init_token=None, batch_first=False, pad_token="", unk_token="", pad_first=False, truncate_first=False, stop_words=None, is_target=False): + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) self.sequential = sequential self.use_vocab = use_vocab self.init_token = init_token @@ -365,6 +367,7 @@ def numericalize(self, arr, device=None): class ReversibleField(Field): def __init__(self, **kwargs): + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) if kwargs.get('tokenize') is list: self.use_revtok = False else: @@ -411,6 +414,7 @@ class SubwordField(ReversibleField): vocab_cls = SubwordVocab def __init__(self, **kwargs): + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) kwargs['tokenize'] = 'subword' if 'unk_token' not in kwargs: kwargs['unk_token'] = '�' @@ -491,6 +495,7 @@ def __init__(self, nesting_field, use_vocab=True, init_token=None, eos_token=Non postprocessing=None, tokenize=None, tokenizer_language='en', include_lengths=False, pad_token='', pad_first=False, truncate_first=False): + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) if isinstance(nesting_field, NestedField): raise ValueError('nesting field must not be another NestedField') if nesting_field.include_lengths: diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py index c76a7335ff..bc4d410464 100644 --- a/torchtext/data/iterator.py +++ b/torchtext/data/iterator.py @@ -2,7 +2,7 @@ import random import logging - +import warnings import torch from .utils import RandomShuffler from .batch import Batch @@ -45,6 +45,7 @@ def __init__(self, dataset, batch_size, sort_key=None, device=None, batch_size_fn=None, train=True, repeat=False, shuffle=None, sort=None, sort_within_batch=None): + warnings.warn('{} class will be retired in the 0.8.0 release and moved to torchtext.legacy. Please see 0.7.0 release notes for further information.'.format(self.__class__.__name__), UserWarning) self.batch_size, self.train, self.dataset = batch_size, train, dataset self.batch_size_fn = batch_size_fn self.iterations = 0