diff --git a/best_int8.tflite b/best_int8.tflite index fbd9672..f1f9f3d 100644 Binary files a/best_int8.tflite and b/best_int8.tflite differ diff --git a/quantisierung.py b/quantisierung.py index eb3f6b0..338b238 100644 --- a/quantisierung.py +++ b/quantisierung.py @@ -30,7 +30,7 @@ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] # Je nach Ziel-Hardware (z. B. NAO): INT8 in/out oder UINT8 converter.inference_input_type = tf.uint8 -converter.inference_output_type = tf.uint8 +converter.inference_output_type = tf.float32 # --- Konvertieren --- quant_model = converter.convert() diff --git a/test_tflite.py b/test_tflite.py index 4045642..83a642e 100644 --- a/test_tflite.py +++ b/test_tflite.py @@ -1,4 +1,7 @@ import tensorflow as tf -interpreter = tf.lite.Interpreter(model_path="yolo_training/NAO_detector/weights/best_saved_model/best_float16.tflite") -interpreter.allocate_tensors() \ No newline at end of file +interpreter = tf.lite.Interpreter(model_path="best_int8.tflite") +interpreter.allocate_tensors() +input_details = interpreter.get_input_details() +output_details = interpreter.get_output_details() +print(input_details[0]['dtype'], output_details[0]['dtype']) \ No newline at end of file