jetzt mit fload output

This commit is contained in:
Winz 2025-07-31 02:07:47 +02:00
parent c1ad03aad6
commit ae28de9b90
3 changed files with 6 additions and 3 deletions

Binary file not shown.

View File

@ -30,7 +30,7 @@ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Je nach Ziel-Hardware (z.B. NAO): INT8 in/out oder UINT8
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.inference_output_type = tf.float32
# --- Konvertieren ---
quant_model = converter.convert()

View File

@ -1,4 +1,7 @@
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path="yolo_training/NAO_detector/weights/best_saved_model/best_float16.tflite")
interpreter.allocate_tensors()
interpreter = tf.lite.Interpreter(model_path="best_int8.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details[0]['dtype'], output_details[0]['dtype'])