From 6ef0f9727857aec2ba7ad18fc7e1e2e03002f8bd Mon Sep 17 00:00:00 2001 From: Fabrizio Date: Sat, 8 Jun 2024 20:15:16 +0200 Subject: [PATCH] Removed Optimization TFLite flag from quantize.py Removed the TFLite Optimization flag for the FP32 tflite version, as it produced a Hybrid model (both INT8 and FP32 operations), which is not supported by TFLM. --- benchmark/training/keyword_spotting/quantize.py | 1 - 1 file changed, 1 deletion(-) diff --git a/benchmark/training/keyword_spotting/quantize.py b/benchmark/training/keyword_spotting/quantize.py index 2bae4422..eeb15120 100644 --- a/benchmark/training/keyword_spotting/quantize.py +++ b/benchmark/training/keyword_spotting/quantize.py @@ -12,7 +12,6 @@ print(f"Converting trained model {Flags.saved_model_path} to TFL model at {Flags.tfl_file_name}") model = tf.keras.models.load_model(Flags.saved_model_path) converter = tf.lite.TFLiteConverter.from_keras_model(model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] fp32_tfl_file_name = Flags.tfl_file_name[:Flags.tfl_file_name.rfind('.')] + '_float32.tflite' tflite_float_model = converter.convert()