improvements in color calibration and tracking

This commit is contained in:
2018-06-02 12:10:42 +02:00
parent 51d2a139f9
commit a9029061a0
7 changed files with 104 additions and 59 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
*.sw* *.sw*
*.pyc *.pyc
*.jpg

8
scripts/ball_hsv.json Normal file
View File

@@ -0,0 +1,8 @@
{
"low_s": 175,
"low_v": 100,
"high_h": 6,
"high_v": 255,
"low_h": 0,
"high_s": 255
}

View File

@@ -57,6 +57,7 @@ class Colorpicker(object):
tuple(map(self.settings.get, ('low_h', 'low_s', 'low_v'))), tuple(map(self.settings.get, ('low_h', 'low_s', 'low_v'))),
tuple(map(self.settings.get, ('high_h', 'high_s', 'high_v'))) tuple(map(self.settings.get, ('high_h', 'high_s', 'high_v')))
) )
frame_threshold = cv2.resize(frame_threshold, (640, 480))
cv2.imshow(self.WINDOW_CAPTURE_NAME, frame) cv2.imshow(self.WINDOW_CAPTURE_NAME, frame)
cv2.imshow(self.WINDOW_DETECTION_NAME, frame_threshold) cv2.imshow(self.WINDOW_DETECTION_NAME, frame_threshold)
return cv2.waitKey(1) return cv2.waitKey(1)
@@ -107,9 +108,19 @@ if __name__ == '__main__':
choices=['upper', 'lower'], choices=['upper', 'lower'],
help='choose a camera from nao' help='choose a camera from nao'
) )
parser.add_argument(
'--nao-res',
choices=[1, 2, 3],
type=int,
default=1
)
args = parser.parse_args() args = parser.parse_args()
cp = Colorpicker() cp = Colorpicker()
camera_ids = {
'upper': 0,
'lower': 1
}
if args.input_config: if args.input_config:
cp.load(args.input_config) cp.load(args.input_config)
if args.video_file: if args.video_file:
@@ -119,7 +130,8 @@ if __name__ == '__main__':
elif args.nao_ip: elif args.nao_ip:
rdr = NaoImageReader( rdr = NaoImageReader(
args.nao_ip, args.nao_ip,
cam_id=args.nao_cam if args.nao_cam else 0 cam_id=camera_ids[args.nao_cam] if args.nao_cam else 0,
res=args.nao_res
) )
else: else:
rdr = VideoReader(0) rdr = VideoReader(0)

View File

@@ -1,3 +1,5 @@
from __future__ import division
import numpy as np import numpy as np
import cv2 import cv2
try: try:
@@ -21,6 +23,14 @@ class NaoImageReader(object):
"video_streamer", cam_id, res, 13, fps "video_streamer", cam_id, res, 13, fps
) )
def to_angles(self, x, y):
return self.vd.getAngularPositionFromImagePosition(
self.cam_id, x / self.res[1], y / self.res[0]
)
def to_relative(self, x, y):
return x / self.res[1], y / self.res[0]
def get_frame(self): def get_frame(self):
result = self.vd.getImageRemote(self.sub) result = self.vd.getImageRemote(self.sub)
self.vd.releaseImage(self.sub) self.vd.releaseImage(self.sub)

View File

@@ -4,7 +4,7 @@ from __future__ import division
import json import json
import cv2 import cv2
import numpy as np import numpy as np
import imutils # import imutils
from imagereaders import NaoImageReader, VideoReader from imagereaders import NaoImageReader, VideoReader
from collections import deque from collections import deque
@@ -15,7 +15,8 @@ red_upper = (2, 255, 255)
class BallFinder(object): class BallFinder(object):
def __init__(self, hsv_lower, hsv_upper, min_radius, width): def __init__(self, hsv_lower, hsv_upper, min_radius, width,
viz=False):
self.hsv_lower = hsv_lower self.hsv_lower = hsv_lower
self.hsv_upper = hsv_upper self.hsv_upper = hsv_upper
@@ -24,9 +25,11 @@ class BallFinder(object):
self.history = deque(maxlen=64) self.history = deque(maxlen=64)
self.last_center = None self.last_center = None
self.last_radius = None self.last_radius = None
self.viz = viz
cv2.namedWindow('ball_mask') if self.viz:
cv2.namedWindow('Frame') cv2.namedWindow('ball_mask')
cv2.namedWindow('Frame')
def find_colored_ball(self, frame): def find_colored_ball(self, frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
@@ -36,7 +39,8 @@ class BallFinder(object):
mask = cv2.inRange(hsv, self.hsv_lower, self.hsv_upper) mask = cv2.inRange(hsv, self.hsv_lower, self.hsv_upper)
mask = cv2.erode(mask, None, iterations=2) mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2)
cv2.imshow('ball_mask', mask) if self.viz:
cv2.imshow('ball_mask', mask)
# find contours in the mask and initialize the current # find contours in the mask and initialize the current
# (x, y) center of the ball # (x, y) center of the ball
@@ -45,6 +49,7 @@ class BallFinder(object):
# only proceed if at least one contour was found # only proceed if at least one contour was found
if len(cnts) == 0: if len(cnts) == 0:
print('Nothin there')
return None return None
# find the largest contour in the mask, then use it to compute # find the largest contour in the mask, then use it to compute
@@ -61,8 +66,8 @@ class BallFinder(object):
def next_frame(self, frame): def next_frame(self, frame):
# maybe resize the frame, maybe blur it # maybe resize the frame, maybe blur it
if self.width is not None: # if self.width is not None:
frame = imutils.resize(frame, width=self.width) # frame = imutils.resize(frame, width=self.width)
try: try:
self.last_center, self.last_radius = self.find_colored_ball(frame) self.last_center, self.last_radius = self.find_colored_ball(frame)
except TypeError: # No red ball found and function returned None except TypeError: # No red ball found and function returned None
@@ -72,8 +77,9 @@ class BallFinder(object):
self.draw_ball_markers(frame) self.draw_ball_markers(frame)
# show the frame to screen # show the frame to screen
cv2.imshow("Frame", frame) if self.viz:
return cv2.waitKey(2) cv2.imshow("Frame", frame)
return cv2.waitKey(2)
def draw_ball_markers(self, frame): def draw_ball_markers(self, frame):
# draw the enclosing circle and ball's centroid on the frame, # draw the enclosing circle and ball's centroid on the frame,

View File

@@ -1,13 +1,11 @@
from __future__ import print_function from __future__ import print_function
from __future__ import division from __future__ import division
import cv2
import numpy as np
#import imutils #import imutils
from naoqi import ALProxy from naoqi import ALProxy
from collections import deque
from imagereaders import NaoImageReader from imagereaders import NaoImageReader
from live_recognition import BallFinder from live_recognition import BallFinder
from live_recognition_with_head_with_body import move_to
# Nao configuration # Nao configuration
@@ -28,8 +26,7 @@ min_radius = 5
resized_width = None # Maybe we need it maybe don't (None if don't) resized_width = None # Maybe we need it maybe don't (None if don't)
current_value = 0 current_value = 0
global counter counter = 0
counter=0
def get_angle(): def get_angle():
robotIP="192.168.0.11" robotIP="192.168.0.11"
@@ -50,41 +47,48 @@ def set_angle_new(x,y):
# activiert gelenke # activiert gelenke
motionProxy.setStiffnesses("Head", 0.5) motionProxy.setStiffnesses("Head", 0.5)
names = ["HeadYaw", "HeadPitch"] names = ["HeadYaw", "HeadPitch"]
fractionMaxSpeed = 0.025 fractionMaxSpeed = 0.3
x_mid=320/2 x_diff=x-0.5
y_mid=240/2 y_diff=y-0.5
#y_mid=120
x_diff=x-x_mid
y_diff=y-y_mid
print("x_diff="+str(x_diff)) print("x_diff="+str(x_diff))
print("y_diff="+str(y_diff)) print("y_diff="+str(y_diff))
#videoProxy=ALProxy('ALVideoDevice', robotIP, PORT) # videoProxy=ALProxy('ALVideoDevice', robotIP, PORT)
#ball_angles=videoProxy.getAngularPositionFromImagePosition(cam_id,[x/320,y/240]) # ball_angles=videoProxy.getAngularPositionFromImagePosition(cam_id,[x/320,y/240])
#print(ball_angles) # print(ball_angles)
#ball_angle_x=ball_angles[0] # ball_angle_x=ball_angles[0]
#ball_angle_y=ball_angles[1] # ball_angle_y=ball_angles[1]
#print("ball_angle_x="+str(ball_angle_x)) # print("ball_angle_x="+str(ball_angle_x))
#print("ball_angle_y="+str(ball_angle_y)) # print("ball_angle_y="+str(ball_angle_y))
#ball_angle_x_diff=ball_angle_x+ # ball_angle_x_diff=ball_angle_x+
#ball_angle_y_diff=ball_angle_y-99 # ball_angle_y_diff=ball_angle_y-99
#print(ball_angle_x_diff*3.14/180) # print(ball_angle_x_diff*3.14/180)
#print(ball_angle_y_diff) # print(ball_angle_y_diff)
# print(ball_angles-[-169,99]) # print(ball_angles-[-169,99])
#[-169.53343200683594, 99.27782440185547] (x_mid,y_mid) # [-169.53343200683594, 99.27782440185547] (x_mid,y_mid)
#if abs(ball_angle_x)>0.2 and abs(ball_angle_y)>0.01: # if abs(ball_angle_x)>0.2 and abs(ball_angle_y)>0.01:
#angles=[ball_angle_x,0] # angles=[ball_angle_x,0]
#angles=[0.25*ball_angle_x,0.25*ball_angle_y] # angles=[0.25*ball_angle_x,0.25*ball_angle_y]
#angles=[0.25*ball_angle_x,0.25*ball_angle_y] # angles=[0.25*ball_angle_x,0.25*ball_angle_y]
#angles=[2*(-1 if x_diff > 0 else 1),2*(-1 if y_diff > 0 else 1)] # angles=[2*(-1 if x_diff > 0 else 1),2*(-1 if y_diff > 0 else 1)]
angles=[2*(-1 if x_diff > 0 else 1),0] angles=[-x_diff / 2, 0]
#if abs(ball_angle_x)>0.1 or abs(ball_angle_y)>0.1: #if abs(ball_angle_x)>0.1 or abs(ball_angle_y)>0.1:
#if abs(x_diff)>50 or abs(y_diff)>50: #if abs(x_diff)>50 or abs(y_diff)>50:
if abs(x_diff)>50: global counter
motionProxy.changeAngles(names, angles, fractionMaxSpeed) if abs(x_diff) > 0.1:
motionProxy.changeAngles(names, angles, fractionMaxSpeed)
counter = 0
else:
counter += 1
print(counter)
if counter == 10:
print('Going to rotate')
angle = get_angle()
if abs(angle[1]) > 0.1:
move_to(0, angle[1])
#motionProxy.setAngles(names,angles,fractionMaxSpeed) #motionProxy.setAngles(names,angles,fractionMaxSpeed)
#else: #else:
# a=get_angle() # a=get_angle()
@@ -160,12 +164,16 @@ if __name__ == '__main__':
video = NaoImageReader(nao_ip, port=nao_port, cam_id=cam_id, res=res, video = NaoImageReader(nao_ip, port=nao_port, cam_id=cam_id, res=res,
fps=fps) fps=fps)
finder = BallFinder(red_lower, red_upper, 5, None) finder = BallFinder(red_lower, red_upper, 5, None)
finder.load_hsv_config('ball_hsv.json')
try: try:
while True: while True:
try: try:
(x, y), radius = finder.find_colored_ball(video.get_frame()) (x, y), radius = finder.find_colored_ball(video.get_frame())
except TypeError: except TypeError:
continue continue
print(x, y)
x, y = video.to_relative(x, y)
print(x, y)
set_angle_new(x,y) set_angle_new(x,y)
''' '''
if 0<y<100: if 0<y<100:

View File

@@ -89,40 +89,40 @@ def move_to(old,y):
# First call of move API # First call of move API
# with post prefix to not be bloquing here. # with post prefix to not be bloquing here.
motionProxy.post.moveTo(0, 0.0, 0) # motionProxy.post.moveTo(0, 0.0, 0)
# wait that the move process start running # wait that the move process start running
time.sleep(0.1) # time.sleep(0.1)
# get robotPosition and nextRobotPosition # get robotPosition and nextRobotPosition
useSensors = False # useSensors = False
robotPosition = almath.Pose2D(motionProxy.getRobotPosition(useSensors)) # robotPosition = almath.Pose2D(motionProxy.getRobotPosition(useSensors))
nextRobotPosition = almath.Pose2D(motionProxy.getNextRobotPosition()) # nextRobotPosition = almath.Pose2D(motionProxy.getNextRobotPosition())
# get the first foot steps vector # get the first foot steps vector
# (footPosition, unChangeable and changeable steps) # (footPosition, unChangeable and changeable steps)
footSteps1 = [] # footSteps1 = []
#try: #try:
footSteps1 = motionProxy.getFootSteps() # footSteps1 = motionProxy.getFootSteps()
#except Exception, errorMsg: #except Exception, errorMsg:
# print str(errorMsg) # print str(errorMsg)
# PLOT_ALLOW = False # PLOT_ALLOW = False
# Second call of move API # Second call of move API
motionProxy.post.moveTo(0, 0.0, 1.2*y) motionProxy.post.moveTo(0, 0.0, y)
# get the second foot steps vector # get the second foot steps vector
footSteps2 = [] # footSteps2 = []
#try: #try:
footSteps2 = motionProxy.getFootSteps() # footSteps2 = motionProxy.getFootSteps()
#except Exception, errorMsg: #except Exception, errorMsg:
#print str(errorMsg) #print str(errorMsg)
#PLOT_ALLOW = False #PLOT_ALLOW = False
motionProxy.setStiffnesses("Head", 0.5) motionProxy.setStiffnesses("Head", 0.7)
names = ["HeadYaw", "HeadPitch"] names = ["HeadYaw", "HeadPitch"]
fractionMaxSpeed = 0.5 fractionMaxSpeed = 0.05
angles=[0,old] angles=[0, 0]
motionProxy.setAngles(names,angles,fractionMaxSpeed) motionProxy.setAngles(names,angles,fractionMaxSpeed)