import cv2
import numpy as np
import tflite_runtime.interpreter as tflite
from picamera2 import Picamera2, Preview
import RPi.GPIO as GPIO
from time import sleep
from gpiozero import DistanceSensor
# GPIO setup
GPIO.setmode(GPIO.BCM)
# Motor pins for the L298N motor driver
motor_in1 = 17
motor_in2 = 18
motor_in3 = 22
motor_in4 = 23
enA = 25
enB = 24
# Motor setup
GPIO.setup(motor_in1, GPIO.OUT)
GPIO.setup(motor_in2, GPIO.OUT)
GPIO.setup(motor_in3, GPIO.OUT)
GPIO.setup(motor_in4, GPIO.OUT)
GPIO.setup(enA, GPIO.OUT)
GPIO.setup(enB, GPIO.OUT)
# Set PWM for speed control
pwmA = GPIO.PWM(enA, 100)
pwmB = GPIO.PWM(enB, 100)
pwmA.start(50)
pwmB.start(50)
# Ultrasonic sensor setup
sensor = DistanceSensor(echo=6, trigger=5)
# Load TFLite model and allocate tensors
interpreter = tflite.Interpreter(model_path="pikachu_detector.tflite")
interpreter.allocate_tensors()
# Get input and output tensors
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Initialize camera
picam2 = Picamera2()
picam2.start_preview(Preview.QTGL)
picam2.configure(picam2.create_preview_configuration(main={"size": (640, 480)}))
picam2.start()
def forward():
GPIO.output(motor_in1, GPIO.HIGH)
GPIO.output(motor_in2, GPIO.LOW)
GPIO.output(motor_in3, GPIO.HIGH)
GPIO.output(motor_in4, GPIO.LOW)
def backward():
GPIO.output(motor_in1, GPIO.LOW)
GPIO.output(motor_in2, GPIO.HIGH)
GPIO.output(motor_in3, GPIO.LOW)
GPIO.output(motor_in4, GPIO.HIGH)
def stop():
GPIO.output(motor_in1, GPIO.LOW)
GPIO.output(motor_in2, GPIO.LOW)
GPIO.output(motor_in3, GPIO.LOW)
GPIO.output(motor_in4, GPIO.LOW)
def turn_left():
GPIO.output(motor_in1, GPIO.LOW)
GPIO.output(motor_in2, GPIO.HIGH)
GPIO.output(motor_in3, GPIO.HIGH)
GPIO.output(motor_in4, GPIO.LOW)
def turn_right():
GPIO.output(motor_in1, GPIO.HIGH)
GPIO.output(motor_in2, GPIO.LOW)
GPIO.output(motor_in3, GPIO.LOW)
GPIO.output(motor_in4, GPIO.HIGH)
try:
while True:
# Capture image
image = picam2.capture_array()
# Preprocess image for model input
input_shape = input_details[0]['shape']
image_resized = cv2.resize(image, (input_shape[1], input_shape[2]))
input_data = np.expand_dims(image_resized, axis=0)
input_data = np.float32(input_data)
# Perform the detection
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
# Get detection results
detection_threshold = 0.5
pikachu_detected = output_data[0][0] > detection_threshold
# Check distance from ultrasonic sensor
distance = sensor.distance * 100 # Convert to cm
# Decision logic
if distance < 10:
stop()
print("Obstacle detected, stopping.")
elif pikachu_detected:
forward()
print("Pikachu detected, moving forward.")
else:
stop()
print("No Pikachu detected, stopping.")
# Display the image
if pikachu_detected:
cv2.putText(image, 'Pikachu!', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
cv2.LINE_AA)
cv2.imshow("Pikachu Detector", image)
# Press 'q' to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
sleep(0.1)
except KeyboardInterrupt:
pass
finally:
picam2.stop()
cv2.destroyAllWindows()
GPIO.cleanup()