Skip to content

Commit 6bd41e8

Browse files
authored
Update IntelTensorFlow_AMX_BF16_Inference.ipynb
1 parent dd8feb1 commit 6bd41e8

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_AMX_BF16_Inference/IntelTensorFlow_AMX_BF16_Inference.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@
226226
"outputs": [],
227227
"source": [
228228
"# Reload the model as the bf16 model with AVX512 to compare inference time\n",
229-
"os.environ[\"ONEDNN_MAX_CPU_ISA\"] = \"AVX512_BF16\"\n",
229+
"os.environ[\"ONEDNN_MAX_CPU_ISA\"] = \"AVX512_CORE_BF16\"\n",
230230
"tf.config.optimizer.set_experimental_options({'auto_mixed_precision_onednn_bfloat16':True})\n",
231231
"bf16_model_noAmx = tf.keras.models.load_model('models/my_saved_model_fp32')\n",
232232
"\n",
@@ -262,7 +262,7 @@
262262
"outputs": [],
263263
"source": [
264264
"# Reload the model as the bf16 model with AMX to compare inference time\n",
265-
"os.environ[\"ONEDNN_MAX_CPU_ISA\"] = \"AMX_BF16\"\n",
265+
"os.environ[\"ONEDNN_MAX_CPU_ISA\"] = \"AVX512_CORE_AMX\"\n",
266266
"tf.config.optimizer.set_experimental_options({'auto_mixed_precision_onednn_bfloat16':True})\n",
267267
"bf16_model_withAmx = tf.keras.models.load_model('models/my_saved_model_fp32')\n",
268268
"\n",

0 commit comments

Comments
 (0)