This is Qiita's first post. I would like to write about how a computer science student uses Python, OpenCV, and a drone to generate and detect AR markers and estimate their attitude.
I referred to this blog. https://qiita.com/ReoNagai/items/a8fdee89b1686ec31d10
This time, we will use ArUco, an AR library that is one of the OpenCV modules. The ArUco marker can be generated with the following code.
generate.py
#!/usr/bin/env python
# -*- coding: utf-8 -*
import cv2
aruco = cv2.aruco
dir(aruco)
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
marker = aruco.drawMarker(dictionary, 1, 300)
cv2.imwrite('id-001.png', marker)
50 types of markers can be generated with aruco.DICT_4X4_50. For the time being, when I try to generate one, it looks like this.
It will be printed and pasted on the wall. I used it to track the drone, so I stuck it on the back of the drone like this.
I use AR.Drone 2.0 from parrot.
Parrot AR. Drone2.0, https://www.parrot.com/jp/doron/parrot-ardrone-20-power-edition (2020.4.22 access)
detect_drone.py
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import sys
from pyardrone import ARDrone
import logging
logging.basicConfig(level=logging.DEBUG)
client = ARDrone()
client.video_ready.wait()
cnt=0
aruco = cv2.aruco #aruco library
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
try:
while True:
cv2.imwrite('drone'+str(cnt)+'.png',client.frame)
cnt+=1
corners, ids, rejectedImgPoints = aruco.detectMarkers(client.frame, dictionary) #Detect marker
aruco.drawDetectedMarkers(client.frame, corners, ids, (0,255,0)) #Draw on the detected marker
if cv2.waitKey(10) == ord(' '):
break
finally:
client.close()
detect_camera.py
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import sys
aruco = cv2.aruco #aruco library
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
def arReader():
print(cv2.getBuildInformation())
cap = cv2.VideoCapture(0)
cnt=0
while True:
ret, frame = cap.read()
if frame is None: break
corners, ids, rejectedImgPoints = aruco.detectMarkers(frame, dictionary) #Detect marker
print(corners,ids)
aruco.drawDetectedMarkers(frame, corners, ids, (0,255,0)) #Draw on the detected marker
cv2.imwrite('result'+str(cnt)+'.png',frame)
cnt+=1
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
arReader()
In this way, the border, ID, corner, and XYZ axes of the marker were drawn and detected.
When you want to find the distance and tilt between the marker and the camera, you will generally use an infrared sensor, but this time, in addition to the camera's internal parameters and distortion coefficient obtained by camera calibration, rotation It is calculated using a matrix, rotation vector, and translation vector.
First, calibrate the camera to find the internal parameters and distortion factor of the camera.
calib.py
# -*- coding: utf-8 -*
import numpy as np
import cv2
import glob
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((7*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2)
objpoints = []
imgpoints = []
gray_images=glob.glob('chess*.png')
cnt=0
for fname in gray_images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,7),None)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
img = cv2.drawChessboardCorners(img, (7,7), corners2,ret)
cv2.imwrite('calib'+str(cnt)+'.png',img)
cnt+=1
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
print(mtx, dist)
cv2.destroyAllWindows()
Execution result
Internal parameters of the camera
[[9.31357583e+03 0.00000000e+00 1.61931898e+03]
[0.00000000e+00 9.64867367e+03 1.92100899e+03]
[0.00000000e+00 0.00000000e+00 1.00000000e+00]]
Strain coefficient
[[ 0.22229833 -6.34741982 0.01145082 0.01934784 -8.43093571]]
Calculations are performed using a rotation matrix, rotation vector, and translation vector. Rewrite camera_matrix with the parameters obtained by the above method.
distance.py
#!/usr/bin/env python
# -*- coding: utf-8 -*
import cv2
import numpy as np
import sys
from pyardrone import ARDrone
import logging
logging.basicConfig(level=logging.DEBUG)
aruco = cv2.aruco #aruco library
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
client = ARDrone()
client.video_ready.wait()
parameters = aruco.DetectorParameters_create()
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
def main():
cnt=0
marker_length = 0.1 # [m]
camera_matrix = np.array( [[ , , ],
[ , , ],
[ , , ]] )
#Find parameters by camera calibration ↑
distortion_coeff = np.array( [[ 0.22229833, -6.34741982, 0.01145082, 0.01934784, -8.43093571]] )
try:
while True:
corners, ids, rejectedImgPoints = aruco.detectMarkers(client.frame, dictionary, parameters=parameters)
aruco.drawDetectedMarkers(client.frame, corners, ids, (0,255,255))
if len(corners) > 0:
for i, corner in enumerate(corners):
# rvec -> rotation vector, tvec -> translation vector
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corner, marker_length, camera_matrix, distortion_coeff)
tvec = np.squeeze(tvec)
rvec = np.squeeze(rvec)
rvec_matrix = cv2.Rodrigues(rvec)
rvec_matrix = rvec_matrix[0]
transpose_tvec = tvec[np.newaxis, :].T
proj_matrix = np.hstack((rvec_matrix, transpose_tvec))
euler_angle = cv2.decomposeProjectionMatrix(proj_matrix)[6] # [deg]
print("x : " + str(tvec[0]))
print("y : " + str(tvec[1]))
print("z : " + str(tvec[2]))
print("roll : " + str(euler_angle[0]))
print("pitch: " + str(euler_angle[1]))
print("yaw : " + str(euler_angle[2]))
draw_pole_length = marker_length/2 #Real length[m]
aruco.drawAxis(client.frame, camera_matrix, distortion_coeff, rvec, tvec, draw_pole_length)
cv2.imwrite('distance'+str(cnt)+'.png',client.frame)
cnt+=1
key = cv2.waitKey(50)
if key == 27: # ESC
break
finally:
client.close()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
In order to confirm that the distance can be measured correctly, set the distance between the marker and the camera (drone) to 25 cm and 50 cm, and execute the program.
Execution result(25cm)
x : -0.5410338091817037
y : -0.713790809892646
z : 3.828983632577233
roll : [173.43049198]
pitch: [-20.42010647]
yaw : [1.58947772]
Execution result(50cm)
x : -1.0633298519994792
y : -1.3476793013004273
z : 7.311742619516305
roll : [-150.91884043]
pitch: [2.45488175]
yaw : [3.61554034]
The value of the execution result is doubled nicely. In this way, the magnitude of movement and the value of roll pitch yaw can be obtained for each of the XYZ axes.
From the obtained value, the drone was tracked by conditional branching as shown in the table below.
Marker-Distance between cameras | Drone behavior |
---|---|
~0.5m | Hovering |
0.5m ~ 2.0m | Advance |
2.0m ~ | landing |
other than that | Hovering |
The configuration diagram looks like this.
Since it uses multi-hop communication (fossil), if even RasPi can be mounted on the drone, it will be a research terminal in a place where people can not enter. For that reason, I have to make a large amount of RasPi wireless AP and set dnsmasq and hostapd, but this is also too troublesome, so I will write it at another time.
This time, I used Python and OpenCV to generate and detect AR markers to find the distance and slope. I think this ArUco marker is an excellent one that can easily and accurately recognize the distance between the marker and the camera, the direction of the marker, and the number (ID) assigned to each, but it is not so famous ... ...? I will write a drone edition someday.
Recommended Posts