jetzt mit fload output
This commit is contained in:
parent
c1ad03aad6
commit
ae28de9b90
BIN
best_int8.tflite
BIN
best_int8.tflite
Binary file not shown.
@ -30,7 +30,7 @@ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
|
||||
# Je nach Ziel-Hardware (z. B. NAO): INT8 in/out oder UINT8
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
converter.inference_output_type = tf.float32
|
||||
|
||||
# --- Konvertieren ---
|
||||
quant_model = converter.convert()
|
||||
|
||||
@ -1,4 +1,7 @@
|
||||
import tensorflow as tf
|
||||
|
||||
interpreter = tf.lite.Interpreter(model_path="yolo_training/NAO_detector/weights/best_saved_model/best_float16.tflite")
|
||||
interpreter = tf.lite.Interpreter(model_path="best_int8.tflite")
|
||||
interpreter.allocate_tensors()
|
||||
input_details = interpreter.get_input_details()
|
||||
output_details = interpreter.get_output_details()
|
||||
print(input_details[0]['dtype'], output_details[0]['dtype'])
|
||||
Loading…
x
Reference in New Issue
Block a user