import tensorflow as tf import numpy as np import os from PIL import Image dir = os.path.dirname(os.path.realpath(__file__)) # --- Pfad zum SavedModel --- saved_model_dir = dir + "/yolo_training/NAO_detector9/weights/best_saved_model" # --- Optional: Pfad zu Beispielbildern --- image_dir = dir + "/calib_images" # z. B. 50–100 JPGs aus deinem Datensatz (z. B. NAO-Roboter-Bilder) input_size = (320, 320) # oder (640, 640) je nach deinem Modellinput # --- Repräsentative Datenfunktion --- def representative_data_gen(): for filename in os.listdir(image_dir): if filename.endswith(".jpg") or filename.endswith(".png"): img = Image.open(os.path.join(image_dir, filename)).convert("RGB") img = img.resize(input_size) img = np.array(img, dtype=np.float32) / 255.0 # normalisieren falls nötig img = np.expand_dims(img, axis=0) yield [img] # --- Konverter konfigurieren --- converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_data_gen converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] # Je nach Ziel-Hardware (z. B. NAO): INT8 in/out oder UINT8 converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 # --- Konvertieren --- quant_model = converter.convert() # --- Speichern --- with open("best_int8.tflite", "wb") as f: f.write(quant_model) print("✅ INT8-Quantisierung abgeschlossen.")