Skip to content

Commit 00fe9a1

Browse files
authored
Update to latest black (#708)
There is a new version of black, which is causing our format script to fail. These are the Changes with the updated black script.
1 parent e8be57c commit 00fe9a1

File tree

15 files changed

+8
-16
lines changed

15 files changed

+8
-16
lines changed

examples/bert_pretraining/bert_create_pretraining_data.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def write_instance_to_example_files(
124124
writer = tf.io.TFRecordWriter(output_filename)
125125
total_written = 0
126126
lookup = dict(zip(vocab, range(len(vocab))))
127-
for (inst_index, instance) in enumerate(instances):
127+
for inst_index, instance in enumerate(instances):
128128
token_ids = [lookup[x] for x in instance.tokens]
129129
padding_mask = [1] * len(token_ids)
130130
segment_ids = list(instance.segment_ids)
@@ -379,7 +379,7 @@ def create_masked_lm_predictions(
379379
# (Issue #166)
380380

381381
cand_indexes = []
382-
for (i, token) in enumerate(tokens):
382+
for i, token in enumerate(tokens):
383383
if token == "[CLS]" or token == "[SEP]":
384384
continue
385385
cand_indexes.append([i])

keras_nlp/layers/masked_lm_mask_generator.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,11 @@ def call(self, inputs):
147147
# convert dense to ragged.
148148
inputs = tf.RaggedTensor.from_tensor(inputs)
149149

150-
(token_ids, mask_positions, mask_ids,) = tf_text.mask_language_model(
150+
(
151+
token_ids,
152+
mask_positions,
153+
mask_ids,
154+
) = tf_text.mask_language_model(
151155
inputs,
152156
item_selector=self._random_selector,
153157
mask_values_chooser=self._mask_values_chooser,

keras_nlp/metrics/bleu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ def _corpus_bleu(
223223
smooth: boolean. Whether or not to apply Lin et al. 2004
224224
smoothing.
225225
"""
226-
for (references, translation) in zip(
226+
for references, translation in zip(
227227
reference_corpus, translation_corpus
228228
):
229229
reference_length += min(len(r) for r in references)

keras_nlp/models/albert/albert_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,6 @@ def __init__(
117117
num_segments=2,
118118
**kwargs,
119119
):
120-
121120
if num_layers % num_groups != 0:
122121
raise ValueError(
123122
"`num_layers` must be divisible by `num_groups`. Received: "

keras_nlp/models/bart/bart_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,6 @@ def __init__(
9696
max_sequence_length=1024,
9797
**kwargs,
9898
):
99-
10099
# Encoder inputs
101100
encoder_token_id_input = keras.Input(
102101
shape=(None,), dtype="int32", name="encoder_token_ids"

keras_nlp/models/bert/bert_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ def __init__(
104104
num_segments=2,
105105
**kwargs,
106106
):
107-
108107
# Index of classification token in the vocabulary
109108
cls_token_index = 0
110109
# Inputs

keras_nlp/models/deberta_v3/deberta_v3_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,6 @@ def __init__(
108108
bucket_size=256,
109109
**kwargs,
110110
):
111-
112111
# Inputs
113112
token_id_input = keras.Input(
114113
shape=(None,), dtype="int32", name="token_ids"

keras_nlp/models/distil_bert/distil_bert_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,6 @@ def __init__(
105105
max_sequence_length=512,
106106
**kwargs,
107107
):
108-
109108
# Inputs
110109
token_id_input = keras.Input(
111110
shape=(None,), dtype="int32", name="token_ids"

keras_nlp/models/f_net/f_net_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,6 @@ def __init__(
100100
num_segments=4,
101101
**kwargs,
102102
):
103-
104103
# Index of classification token in the vocabulary
105104
cls_token_index = 0
106105
# Inputs

keras_nlp/models/gpt2/gpt2_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,6 @@ def __init__(
102102
max_sequence_length=1024,
103103
**kwargs,
104104
):
105-
106105
# Inputs
107106
token_ids = keras.Input(shape=(None,), dtype="int32", name="token_ids")
108107
padding_mask = keras.Input(

keras_nlp/models/gpt2/gpt2_preprocessor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,6 @@ def __init__(
121121
sequence_length,
122122
**kwargs,
123123
):
124-
125124
super().__init__(**kwargs)
126125

127126
self.tokenizer = tokenizer

keras_nlp/models/roberta/roberta_backbone.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ def __init__(
9898
max_sequence_length=512,
9999
**kwargs,
100100
):
101-
102101
# Inputs
103102
token_id_input = keras.Input(
104103
shape=(None,), dtype=tf.int32, name="token_ids"

keras_nlp/samplers/beam_sampler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,6 @@ def sample(
111111
)
112112

113113
def one_step(beams, beams_prob, length, mask):
114-
115114
flattened_beams = tf.reshape(
116115
beams, shape=[batch_size * num_beams, -1]
117116
)

keras_nlp/tokenizers/byte_tokenizer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,6 @@ def vocabulary_size(self) -> int:
207207
return 256
208208

209209
def tokenize(self, inputs):
210-
211210
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
212211
inputs = tf.convert_to_tensor(inputs)
213212

keras_nlp/tokenizers/byte_tokenizer_test.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,6 @@ def test_load_model_with_config(self):
223223
)
224224

225225
def test_config(self):
226-
227226
tokenizer = ByteTokenizer(
228227
name="byte_tokenizer_config_test",
229228
lowercase=False,

0 commit comments

Comments
 (0)