Major refactoring for better usability

1. Created interface classes for reading video streams
   (in imagereaders.py)
2. Created a class for ball detection (for reusability)
3. Reworked colorpicker, now it is possible to choose the mode of
   operation from command line (available are live from Nao, from
   video file or from webcam). It is possible to capture only the
   first image of a stream and work on it. Colorpicker now can save
   the settings on exit or load settings on startup.
This commit is contained in:
2018-05-31 20:23:49 +02:00
parent 57cf0b2206
commit 69b1b137a2
4 changed files with 260 additions and 341 deletions

View File

@@ -1,131 +1,133 @@
# syntax python colorpicker.py
# or
# python colorpicker.py 640 480 -> set resolution
from __future__ import print_function
from live_recognition import get_frame_nao
import cv2 as cv
import imutils
from naoqi import ALProxy
import sys
max_value = 255
max_value_H = 360 // 2
#low_H = 0
#low_S = 0
#low_V = 0
#high_H = max_value_H
#high_S = max_value
#high_V = max_value
low_H=0
low_S=185
low_V=170
high_H=2
high_S=255
high_V=255
window_capture_name = 'Video Capture'
window_detection_name = 'Object Detection'
low_H_name = 'Low H'
low_S_name = 'Low S'
low_V_name = 'Low V'
high_H_name = 'High H'
high_S_name = 'High S'
high_V_name = 'High V'
import json
import argparse
import cv2
from imagereaders import VideoReader, NaoImageReader
# import imutils
def do_print():
print('(%s %s %s): (%s %s %s)' %
(low_H, low_S, low_V, high_H, high_S, high_V))
class Colorpicker(object):
def on_low_H_thresh_trackbar(val):
global low_H
low_H = min(high_H-1, val)
cv.setTrackbarPos(low_H_name, window_detection_name, low_H)
do_print()
WINDOW_CAPTURE_NAME = 'Video Capture'
WINDOW_DETECTION_NAME = 'Object Detection'
def on_high_H_thresh_trackbar(val):
global high_H
high_H = max(val, low_H+1)
cv.setTrackbarPos(high_H_name, window_detection_name, high_H)
do_print()
def __init__(self):
parameters = ['low_h', 'low_s', 'low_v', 'high_h', 'high_s', 'high_v']
maxes = [180, 255, 255, 180, 255, 255]
checkers = [
lambda x: min(x, self.settings['high_h'] - 1), # LOW H
lambda x: min(x, self.settings['high_s'] - 1), # LOW S
lambda x: min(x, self.settings['high_v'] - 1), # LOW H
lambda x: max(x, self.settings['low_h'] + 1), # HIGH H
lambda x: max(x, self.settings['low_s'] + 1), # HIGH S
lambda x: max(x, self.settings['low_v'] + 1), # HIGH V
]
self.settings = {
'low_h': 0,
'low_s': 0,
'low_v': 0,
'high_h': 180,
'high_s': 255,
'high_v': 255
}
cv2.namedWindow(self.WINDOW_CAPTURE_NAME)
cv2.namedWindow(self.WINDOW_DETECTION_NAME)
self.trackers = [
cv2.createTrackbar(
name, self.WINDOW_DETECTION_NAME, self.settings[name], max_v,
lambda val, name=name, checker=checker: self._on_trackbar(
val, name, checker
)
)
for name, max_v, checker in zip(parameters, maxes, checkers)
]
def on_low_S_thresh_trackbar(val):
global low_S
low_S = min(high_S-1, val)
cv.setTrackbarPos(low_S_name, window_detection_name, low_S)
do_print()
def do_print(self):
print(self.settings)
def on_high_S_thresh_trackbar(val):
global high_S
high_S = max(val, low_S+1)
cv.setTrackbarPos(high_S_name, window_detection_name, high_S)
do_print()
def _on_trackbar(self, val, name, checker):
self.settings[name] = checker(val)
cv2.setTrackbarPos(name, self.WINDOW_DETECTION_NAME,
self.settings[name])
def on_low_V_thresh_trackbar(val):
global low_V
low_V = min(high_V-1, val)
cv.setTrackbarPos(low_V_name, window_detection_name, low_V)
do_print()
def on_high_V_thresh_trackbar(val):
global high_V
high_V = max(val, low_V+1)
cv.setTrackbarPos(high_V_name, window_detection_name, high_V)
do_print()
cap = cv.VideoCapture(0)
cv.namedWindow(window_capture_name)
cv.namedWindow(window_detection_name)
cv.createTrackbar(
low_H_name, window_detection_name, low_H,
max_value_H, on_low_H_thresh_trackbar
)
cv.createTrackbar(
high_H_name, window_detection_name , high_H, max_value_H,
on_high_H_thresh_trackbar
)
cv.createTrackbar(
low_S_name, window_detection_name , low_S, max_value,
on_low_S_thresh_trackbar
)
cv.createTrackbar(
high_S_name, window_detection_name , high_S, max_value,
on_high_S_thresh_trackbar
)
cv.createTrackbar(
low_V_name, window_detection_name , low_V, max_value,
on_low_V_thresh_trackbar
)
cv.createTrackbar(
high_V_name, window_detection_name , high_V, max_value,
on_high_V_thresh_trackbar
)
vd_proxy = ALProxy('ALVideoDevice', '192.168.0.11', 9559)
cam_subscriber = vd_proxy.subscribeCamera(
"ball_finder", 0, 3, 13, 1
)
frame = get_frame_nao(vd_proxy, cam_subscriber, 1280, 960)
try:
while True:
frame_HSV = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
frame_threshold = cv.inRange(
frame_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V)
def show_frame(self, frame):
frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame_threshold = cv2.inRange(
frame_HSV,
tuple(map(self.settings.get, ('low_h', 'low_s', 'low_v'))),
tuple(map(self.settings.get, ('high_h', 'high_s', 'high_v')))
)
if len(sys.argv) > 1:
frame_threshold = cv.resize(frame_threshold, (int(sys.argv[1]), int(sys.argv[2])))
cv.imshow(window_capture_name, frame)
cv.imshow(window_detection_name, frame_threshold)
cv2.imshow(self.WINDOW_CAPTURE_NAME, frame)
cv2.imshow(self.WINDOW_DETECTION_NAME, frame_threshold)
return cv2.waitKey(1)
key = cv.waitKey(1)
if key == ord('q') or key == 27:
break
finally:
vd_proxy.unsubscribe(cam_subscriber)
def save(self, filename):
with open(filename, 'w') as f:
json.dump(self.settings, f, indent=4)
def load(self, filename):
with open(filename) as f:
self.settings = json.load(f)
for name in self.settings:
cv2.setTrackbarPos(name, self.WINDOW_DETECTION_NAME,
self.settings[name])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
epilog='When called without arguments specifying the video source, ' +
'will try to use the webcam')
parser.add_argument(
'-o', '--output-config',
help='file, to which the settings will be saved (if given)'
)
parser.add_argument(
'-i', '--input-config',
help='file, from which to read the initial values'
)
parser.add_argument(
'--video-file',
help='video file to use'
)
parser.add_argument(
'--still',
help='only take one image from video stream',
action='store_true'
)
parser.add_argument(
'--nao-ip',
help='ip address of the nao robot, from which to capture'
)
parser.add_argument(
'--nao-cam',
choices=['upper', 'lower'],
help='choose a camera from nao'
)
args = parser.parse_args()
cp = Colorpicker()
if args.input_config:
cp.load(args.input_config)
if args.video_file:
rdr = VideoReader(args.video_file, loop=True)
elif args.nao_ip:
rdr = NaoImageReader(
args.nao_ip,
cam_id=args.nao_cam if args.nao_cam else 0
)
else:
rdr = VideoReader(0)
try:
if args.still:
frame = rdr.get_frame()
while True:
if not args.still:
frame = rdr.get_frame()
key = cp.show_frame(frame)
if key == ord('q') or key == 27:
break
finally:
cp.do_print()
if args.output_config:
cp.save(args.output_config)
rdr.close()