@@ -80,8 +80,16 @@ unique_void_ptr make_type_erased_array(size_t size) {
8080 [](void * data) { delete[] static_cast <T*>(data); });
8181}
8282
83- bool IsQuantized (const TfLiteTensor& tensor) {
84- if (tensor.type != kTfLiteInt8 && tensor.type != kTfLiteInt16 ) return false ;
83+ bool InterpretAsQuantized (const TfLiteTensor& tensor) {
84+ if (tensor.quantization .type == kTfLiteNoQuantization ) return false ;
85+
86+ // Quantized single-op models with uint8 input/output type are only used for
87+ // EdgeTPU tests.
88+ // EdgeTPU tests need to read the quantized values as-is to check for
89+ // bit-exactness. As a result we don't interpret the tensor as quantized.
90+ // TODO(b/176121243): Add an option to interpret uint8 buffers as
91+ // non-quantized type and set if from the child class.
92+ if (tensor.type == kTfLiteUInt8 ) return false ;
8593
8694 if (tensor.quantization .params != nullptr ) {
8795 auto * quantization =
@@ -316,7 +324,7 @@ bool TfLiteDriver::DataExpectation::QuantizedCheck(bool verbose,
316324
317325bool TfLiteDriver::DataExpectation::Check (bool verbose,
318326 const TfLiteTensor& tensor) {
319- if (IsQuantized (tensor)) {
327+ if (InterpretAsQuantized (tensor)) {
320328 return QuantizedCheck (verbose, tensor);
321329 }
322330
@@ -549,7 +557,7 @@ void TfLiteDriver::SetExpectation(int id, const string& csv_values) {
549557 new DataExpectation (relative_threshold_, absolute_threshold_,
550558 quantization_error_multiplier_));
551559
552- if (IsQuantized (*tensor)) {
560+ if (InterpretAsQuantized (*tensor)) {
553561 expected_output_[id]->SetData <float >(csv_values);
554562 return ;
555563 }
0 commit comments