diff --git a/pf-ball-demo/.gitignore b/pf-ball-demo/.gitignore
new file mode 100644
index 0000000..f0e87d2
--- /dev/null
+++ b/pf-ball-demo/.gitignore
@@ -0,0 +1,5 @@
+cmake-build-debug
+cmake-build-release
+build
+.vscode
+.idea
\ No newline at end of file
diff --git a/pf-ball-demo/CMakeLists.txt b/pf-ball-demo/CMakeLists.txt
new file mode 100644
index 0000000..f223efe
--- /dev/null
+++ b/pf-ball-demo/CMakeLists.txt
@@ -0,0 +1,45 @@
+# requires minimum cmake version
+cmake_minimum_required(VERSION 3.5)
+
+project(vGazeDemo)
+
+find_package(event-driven REQUIRED)
+find_package(YARP REQUIRED COMPONENTS os sig dev math cv)
+
+set(module_name atp)
+add_executable(${module_name} attend_track_predict.cpp attend_track_predict.h)
+target_link_libraries(${module_name} PRIVATE YARP::YARP_os
+ YARP::YARP_init
+ YARP::YARP_dev
+ YARP::YARP_math
+ ev::event-driven)
+
+install(TARGETS ${module_name} DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+set(module_name hough)
+add_executable(${module_name} module.cpp houghAlgorithm.cpp module.h houghAlgorithm.h)
+target_link_libraries(${module_name} PRIVATE YARP::YARP_os
+ YARP::YARP_init
+ YARP::YARP_dev
+ YARP::YARP_math
+ ev::event-driven)
+
+install(TARGETS ${module_name} DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+set(module_name pf)
+add_executable(${module_name} vControlLoopDelay.cpp vParticle.cpp vControlLoopDelay.h vParticle.h)
+target_link_libraries(${module_name} PRIVATE YARP::YARP_os
+ YARP::YARP_init
+ YARP::YARP_dev
+ YARP::YARP_math
+ ev::event-driven)
+install(TARGETS ${module_name} DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+set(module_name gazeControl)
+add_executable(${module_name} vTrackToRobot.cpp vTrackToRobot.h)
+target_link_libraries(${module_name} PRIVATE YARP::YARP_os
+ YARP::YARP_init
+ YARP::YARP_dev
+ YARP::YARP_math
+ ev::event-driven)
+install(TARGETS ${module_name} DESTINATION ${CMAKE_INSTALL_BINDIR})
diff --git a/pf-ball-demo/app_vGazeDemo2.xml b/pf-ball-demo/app_vGazeDemo2.xml
new file mode 100644
index 0000000..9bdddbe
--- /dev/null
+++ b/pf-ball-demo/app_vGazeDemo2.xml
@@ -0,0 +1,218 @@
+
+ vPFBallDemo2
+
+
+ zynqGrabber
+
+ icub-zynq
+
+
+
+ vPreProcess
+ --flipy --flipx --split --undistort false --filter_temporal --filter_spatial
+ iiticublap267
+
+
+
+ vATP
+
+ iiticublap267
+
+
+
+ vCircleDetection
+ --name /hough/left
+ iiticublap267
+
+
+
+ vCircleDetection
+ --name /hough/right
+ iiticublap267
+
+
+
+ vParticleFilterTracker
+ --name /vpfL --particles 20 --obsthresh 0.02 --truethresh 0.5 --threads 4 --bins 64 --gain 0.05 --variance 2.0
+ iiticublap267
+
+
+
+ vParticleFilterTracker
+ --name /vpfR --particles 20 --obsthresh 0.02 --truethresh 0.5 --threads 4 --bins 64 --gain 0.05 --variance 2.0
+ iiticublap267
+
+
+
+ vFramerLite
+ --name /vFramer --displays "(/left (CIRC AE ISO) /right (CIRC AE ISO))" --frameRate 30
+ iiticublap268
+
+
+
+ yarpview
+ --name /viewLeft --x 30 --y 30 --w 608 --h 480 --synch
+ iiticublap268
+
+
+
+ yarpview
+ --name /viewRight --x 700 --y 30 --w 608 --h 480 --synch
+ iiticublap268
+
+
+
+ yarpview
+ --name /viewDebugL --x 30 --y 658 --w 608 --h 480 --synch
+ iiticublap268
+
+
+
+ yarpview
+ --name /viewDebugR --x 700 --y 658 --w 608 --h 480 --synch
+ iiticublap268
+
+
+
+ vGazeDemo
+ --period 0.01 --start true --velocity true
+ iiticublap267
+
+
+
+
+
+ /zynqGrabber/AE:o
+ /vPreProcess/AE:i
+ fast_tcp
+
+
+
+ /vPreProcess/left:o
+ /vFramer/left/AE:i
+ fast_tcp
+
+
+
+ /vPreProcess/right:o
+ /vFramer/right/AE:i
+ fast_tcp
+
+
+
+ /vPreProcess/left:o
+ /vpfL/AE:i
+ fast_tcp
+
+
+
+ /vPreProcess/right:o
+ /vpfR/AE:i
+ fast_tcp
+
+
+
+ /vPreProcess/left:o
+ /hough/left/AE:i
+ fast_tcp
+
+
+
+ /vPreProcess/right:o
+ /hough/right/AE:i
+ fast_tcp
+
+
+
+ /vpfL/GAE:o
+ /vFramer/left/GAE:i
+ fast_tcp
+
+
+
+ /vpfR/GAE:o
+ /vFramer/right/GAE:i
+ fast_tcp
+
+
+
+ /vFramer/left/image:o
+ /viewLeft
+ fast_tcp
+
+
+
+ /vFramer/right/image:o
+ /viewRight
+ fast_tcp
+
+
+
+ /hough/left/debug:o
+ /viewDebugL
+ fast_tcp
+
+
+
+ /hough/right/debug:o
+ /viewDebugR
+ fast_tcp
+
+
+
+ /vATP/target:o
+ /vGazeDemo/vBottle:i
+ fast_tcp
+
+
+
+ /hough/left/detection:o
+ /vATP/left/message:i
+ fast_tcp
+
+
+
+ /vpfL/state:o
+ /vATP/left/message:i
+ fast_tcp
+
+
+
+ /vATP/left/detector_commands:o
+ /hough/left
+ fast_tcp
+
+
+
+ /vATP/left/tracker_commands:o
+ /vpfL/cmd
+ fast_tcp
+
+
+
+ /hough/right/detection:o
+ /vATP/right/message:i
+ fast_tcp
+
+
+
+ /vpfR/state:o
+ /vATP/right/message:i
+ fast_tcp
+
+
+
+ /vATP/right/detector_commands:o
+ /hough/right
+ fast_tcp
+
+
+
+ /vATP/right/tracker_commands:o
+ /vpfR/cmd
+ fast_tcp
+
+
+
+
+
diff --git a/pf-ball-demo/attend_track_predict.cpp b/pf-ball-demo/attend_track_predict.cpp
new file mode 100644
index 0000000..f4f13b0
--- /dev/null
+++ b/pf-ball-demo/attend_track_predict.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "attend_track_predict.h"
+using namespace yarp::os;
+
+int main(int argc, char * argv[])
+{
+ /* initialize yarp network */
+ yarp::os::Network yarp;
+ if(!yarp.checkNetwork()) {
+ yError() << "Could not connect to yarp";
+ return -1;
+ }
+
+ /* prepare and configure the resource finder */
+ yarp::os::ResourceFinder rf;
+ rf.setDefaultContext( "eventdriven" );
+ rf.setDefaultConfigFile( "vATP.ini" );
+ rf.configure( argc, argv );
+
+ /* create the module */
+ robot_control module;
+
+ /* run the module: runModule() calls configure first and, if successful, it then runs */
+ return module.runModule(rf);
+}
+
+robot_control::robot_control() {};
+
+bool robot_control::configure(yarp::os::ResourceFinder &rf)
+{
+
+ setName(rf.check("name", Value("/vATP")).asString().c_str());
+
+ if(!left_machine.open(getName() + "/left")) {
+ yError() << "Could not open left state machine";
+ return false;
+ }
+
+ if(!right_machine.open(getName() + "/right")) {
+ yError() << "Could not open right state machine";
+ return false;
+ }
+
+ return true;
+}
+
+bool robot_control::interruptModule()
+{
+ left_machine.interrupt();
+ right_machine.interrupt();
+ return true;
+}
+
+//double getPeriod();
+bool robot_control::updateModule()
+{
+ return true;
+}
+
+
+
+state_machine::state_machine()
+{
+ start_message.addVocab(createVocab('S', 'T', 'A', 'R'));
+ start_message.addInt(0);
+ start_message.addInt(0);
+ start_message.addInt(0);
+
+ stop_message.addVocab(createVocab('S', 'T', 'O', 'P'));
+
+ machine_state = searching;
+}
+
+bool state_machine::open(const std::string &name)
+{
+
+ useCallback();
+ if(!BufferedPort::open(name + "/message:i")) {
+ yError() << "Couldn't open message port";
+ return false;
+ }
+
+ if(!detector_rpc.open(name + "/detector_commands:o")) {
+ yError() << "Couldn't open left detector commands rpc";
+ return false;
+ }
+
+ if(!tracker_rpc.open(name + "/tracker_commands:o")) {
+ yError() << "Couldn't open left tracker commands rpc";
+ return false;
+ }
+
+ info_prefix = name;
+
+ return true;
+
+}
+
+#define R_ATTEND_POINT createVocab('A', 'T', 'T')
+#define R_TRACK_STATUS createVocab('T', '_', 'S', 'T')
+#define R_PRED_POINT createVocab('T', 'R', 'A', 'J')
+
+void state_machine::onRead(Bottle &message)
+{
+ int32_t m_type = message.get(0).asVocab();
+
+ switch(machine_state)
+ {
+
+ case(searching):
+
+ if(m_type == R_ATTEND_POINT)
+ {
+ detector_rpc.write(stop_message);
+
+ start_message.clear();
+ start_message.addVocab(createVocab('S', 'T', 'A', 'R'));
+ start_message.addInt((int)message.get(1).asDouble());
+ start_message.addInt((int)message.get(2).asDouble());
+ start_message.addInt((int)message.get(3).asDouble());
+ tracker_rpc.write(start_message);
+
+ machine_state = tracking;
+ yInfo() << info_prefix << "Attention point found, beginning to track";
+ yInfo() << info_prefix << start_message.toString();
+ } else {
+ yWarning() << info_prefix << "[searching]" << message.toString();
+ }
+
+ break;
+
+
+ case(tracking):
+
+ if(m_type == R_TRACK_STATUS) {
+ if(message.get(1).asInt() == 0) {
+ //tracking lost
+ tracker_rpc.write(stop_message);
+ detector_rpc.write(start_message);
+ machine_state = searching;
+ yInfo() << info_prefix << "Tracking lost, performing detection";
+
+ } else if(message.get(1).asInt() == 1) {
+ //tracking started
+ //yInfo() << "Tracking Started";
+ }
+ } else if(m_type == R_PRED_POINT) {
+ yInfo() << info_prefix << "Received prediction point";
+ } else {
+ yWarning() << info_prefix << "[tracking]" << message.toString();
+ }
+
+ break;
+
+
+
+ default:
+ yWarning() << info_prefix << "Machine in unconfigured state";
+ break;
+
+
+ }
+
+}
+
+
+
diff --git a/pf-ball-demo/attend_track_predict.h b/pf-ball-demo/attend_track_predict.h
new file mode 100644
index 0000000..68bd43d
--- /dev/null
+++ b/pf-ball-demo/attend_track_predict.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2018 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef _ICUBATPSTATEMACHINE_
+#define _ICUBATPSTATEMACHINE_
+
+#include "yarp/os/all.h"
+
+
+class state_machine : public yarp::os::BufferedPort {
+
+protected:
+
+ enum states {
+ initialise,
+ searching,
+ tracking
+ };
+
+ std::string info_prefix;
+ yarp::os::Bottle start_message;
+ yarp::os::Bottle stop_message;
+
+ states machine_state;
+
+ yarp::os::RpcClient detector_rpc;
+ yarp::os::RpcClient tracker_rpc;
+
+public:
+
+ state_machine();
+ void onRead(yarp::os::Bottle &message);
+ bool open(const std::string &name);
+
+};
+
+class robot_control : public yarp::os::RFModule
+{
+
+protected:
+
+ state_machine left_machine;
+ state_machine right_machine;
+
+public:
+
+ robot_control();
+ bool configure(yarp::os::ResourceFinder &rf);
+ bool interruptModule();
+ bool updateModule();
+
+};
+
+#endif
diff --git a/pf-ball-demo/houghAlgorithm.cpp b/pf-ball-demo/houghAlgorithm.cpp
new file mode 100644
index 0000000..5001f20
--- /dev/null
+++ b/pf-ball-demo/houghAlgorithm.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "houghAlgorithm.h"
+
+circularHoughDetector::circularHoughDetector()
+{
+ radius = 0;
+ perimeter = 0;
+ inv_perimeter = 1.0;
+ max_response = pixel{0, 0};
+}
+
+bool circularHoughDetector::initialise(double radius, double thickness, int height, int width)
+{
+
+ H.resize(width, height);
+ H.zero();
+ duplicate_check.resize(width, height);
+ duplicate_check.zero();
+
+ xlim = width-1;
+ ylim = height-1;
+
+ this->radius = radius;
+ perimeter = radius * 2.0 * M_PI + 0.5;
+ inv_perimeter = 1.0 / (radius * 2 * M_PI);
+
+ pixel_offsets.clear();
+ for(int x = -radius; x <= radius; x++) {
+ for(int y = -radius; y <= radius; y++) {
+
+ if(std::fabs(sqrt(x*x + y*y) - radius) < thickness) {
+ pixel_offsets.push_back(pixel{x, y});
+ }
+
+ }
+ }
+
+ processing.lock();
+ done.lock();
+
+ return this->start();
+
+}
+
+void circularHoughDetector::process(std::vector &q)
+{
+ this->q = &q;
+ processing.unlock();
+}
+
+double circularHoughDetector::waittilldone()
+{
+ done.lock();
+ double score = (double)(H(max_response.x, max_response.y)) * inv_perimeter;
+ if(score > 1.0) score = 1.0;
+ return score;
+}
+
+int circularHoughDetector::x()
+{
+ return max_response.x;
+}
+
+int circularHoughDetector::y()
+{
+ return max_response.y;
+}
+
+int circularHoughDetector::r()
+{
+ return radius;
+}
+
+
+void circularHoughDetector::run()
+{
+
+ while(!isStopping()) {
+
+ processing.lock();
+ if(isStopping()) return;
+
+ H.zero();
+ duplicate_check.zero();
+
+ unsigned int events_to_proc = std::min(2*perimeter, (unsigned int)q->size());
+
+ for(size_t i = 0; i < events_to_proc; i++) {
+
+ if(duplicate_check((*q)[i].x, (*q)[i].y))
+ continue;
+
+ duplicate_check((*q)[i].x, (*q)[i].y) = 1;
+
+ for(size_t j = 0; j < pixel_offsets.size(); j++) {
+
+ int x = (*q)[i].x + pixel_offsets[j].x;
+ int y = (*q)[i].y + pixel_offsets[j].y;
+ if(y > ylim || y < 0 || x > xlim || x < 0) continue;
+
+
+ H(x, y)++;
+ if(H(x, y) > H(max_response.x, max_response.y)) {
+ max_response.x = x; max_response.y = y;
+ }
+
+ }
+ }
+
+ done.unlock();
+
+ }
+}
+
+void circularHoughDetector::onStop()
+{
+ processing.unlock();
+}
+
+yarp::sig::ImageOf circularHoughDetector::makeDebugImage()
+{
+
+ yarp::sig::ImageOf image; image.resize(H.width(), H.height());
+ image.zero();
+
+ for(int y = 0; y < ylim; y++) {
+ for(int x = 0; x < xlim; x++) {
+
+ int value = (double)(H(x, y)) * 255.0 * inv_perimeter * 2.0;
+ if(value > 255) value = 255;
+
+ yarp::sig::PixelBgr &pixel = image(x, y);
+ pixel.g = value;
+
+ }
+ }
+
+ unsigned int events_to_proc = std::min(2*perimeter, (unsigned int)q->size());
+ for(size_t i = 0; i < events_to_proc; i++) {
+
+ yarp::sig::PixelBgr &pixel = image((*q)[i].x, (*q)[i].y);
+ pixel.b = 255; pixel.g = 255; pixel.r = 255;
+
+ }
+
+ image(max_response.x, max_response.y) = yarp::sig::PixelBgr(0, 0, 255);
+
+
+ return image;
+}
+
diff --git a/pf-ball-demo/houghAlgorithm.h b/pf-ball-demo/houghAlgorithm.h
new file mode 100644
index 0000000..32e810a
--- /dev/null
+++ b/pf-ball-demo/houghAlgorithm.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef __VHOUGH__
+#define __VHOUGH__
+
+#include
+#include
+#include
+
+class circularHoughDetector : public yarp::os::Thread
+{
+
+private:
+
+ //data
+ yarp::sig::ImageOf H;
+ yarp::sig::ImageOf duplicate_check;
+ int xlim;
+ int ylim;
+ typedef struct { int x; int y;} pixel;
+ std::vector pixel_offsets;
+ pixel max_response;
+ int radius;
+ unsigned int perimeter;
+ double inv_perimeter;
+
+ std::mutex processing; /// for thread safety when starting computation
+ std::mutex done;
+
+ //current data
+ std::vector *q; /// pointer to list of events to add to Hough space
+
+ /// update the Hough space given adds and subs
+ void performHough();
+
+ /// calls performHough in separate thread
+ void run();
+ void onStop();
+
+public:
+
+ circularHoughDetector();
+
+ bool initialise(double radius, double thickness, int height, int width);
+
+ void process(std::vector &q);
+ double waittilldone();
+ int x();
+ int y();
+ int r();
+
+
+ yarp::sig::ImageOf makeDebugImage();
+
+};
+
+
+#endif
diff --git a/pf-ball-demo/module.cpp b/pf-ball-demo/module.cpp
new file mode 100644
index 0000000..26df88b
--- /dev/null
+++ b/pf-ball-demo/module.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "module.h"
+#include
+using std::vector;
+
+int main(int argc, char * argv[])
+{
+ /* initialize yarp network */
+ yarp::os::Network yarp;
+ if(!yarp.checkNetwork()) {
+ yError() << "Could not find YARP network";
+ return false;
+ }
+
+ /* prepare and configure the resource finder */
+ yarp::os::ResourceFinder rf;
+ rf.setDefaultContext( "eventdriven" );
+ rf.setDefaultConfigFile( "vCircleDetector.ini" );
+ rf.configure( argc, argv );
+
+ /* create the module */
+ detectionManager detector_module;
+ return detector_module.runModule(rf);
+}
+
+bool detectionManager::configure(yarp::os::ResourceFinder &rf)
+{
+ setName((rf.check("name", yarp::os::Value("/vCircleDetect")).asString()).c_str());
+ if(!rpc_port.open(getName()))
+ return false;
+ attach(rpc_port);
+
+ //sensory size
+ int width = rf.check("width", yarp::os::Value(304)).asInt();
+ int height = rf.check("height", yarp::os::Value(240)).asInt();
+
+ int radmin = rf.check("radmin", yarp::os::Value(25)).asInt();
+ int radmax = rf.check("radmax", yarp::os::Value(45)).asInt();
+
+ send_response_threshold = rf.check("threshold", yarp::os::Value(0.25)).asDouble();
+ active = rf.check("start", yarp::os::Value(true)).asBool();
+
+ for(int r = radmin; r <= radmax; r+=2) {
+
+ detectors.push_back(new circularHoughDetector);
+ if(!detectors.back()->initialise(r, 0.42, height, width))
+ return false;
+
+ }
+
+ fifoSize = 2 * 2 * radmax * M_PI;
+
+ if(!input_port.open(getName() + "/AE:i"))
+ return false;
+ if(!debug_port.open(getName() + "/debug:o"))
+ return false;
+ if(!output_port.open(getName() + "/detection:o"))
+ return false;
+
+ return start();
+}
+
+bool detectionManager::interruptModule()
+{
+ input_port.close();
+ debug_port.interrupt();
+ debug_port.close();
+ output_port.interrupt();
+ output_port.close();
+ vector::iterator i;
+ for(i = detectors.begin(); i < detectors.end(); i++) {
+ (*i)->stop();
+ }
+
+ return this->stop();
+}
+
+bool detectionManager::updateModule()
+{
+ return !Thread::isStopping();
+}
+
+void detectionManager::run()
+{
+ vector q; q.resize(fifoSize);
+ yarp::os::Stamp yarp_stamp;
+ unsigned int input_q_i = 0, q_i = 0;
+ yInfo() << "Waiting for connection";
+ ev::packet *input_q = input_port.read();
+ if(!input_q) return;
+ yInfo() << "Running!";
+
+ while(!Thread::isStopping()) {
+
+ //get a set of events equal to fifoSize
+ while(q_i < fifoSize) {
+
+ if(input_q_i >= input_q->size()) {
+
+ unsigned int upqs = input_port.getPendingReads();
+ if(upqs < 1) upqs = 1;
+ for(unsigned int i = 0; i < upqs; i++) {
+ input_q = input_port.read();
+ if(!input_q) return;
+ }
+ input_q_i = 0;
+
+ }
+
+ q[q_i++] = (*input_q)[input_q_i++];
+
+ }
+
+ if(!active) continue;
+
+ vector::iterator i;
+ for(i = detectors.begin(); i < detectors.end(); i++) {
+ (*i)->process(q);
+ }
+
+ double max_response = 0;
+ vector::iterator max_detector = detectors.begin();
+ for(i = detectors.begin(); i < detectors.end(); i++) {
+
+ double response = (*i)->waittilldone();
+ if(response >= max_response) {
+ max_response = response;
+ max_detector = i;
+ }
+ }
+
+ if(max_response > send_response_threshold) {
+ yarp::os::Bottle &output_data = output_port.prepare();
+ output_data.clear();
+ output_data.addVocab(yarp::os::createVocab32('A', 'T', 'T'));
+ output_data.addDouble((*max_detector)->x());
+ output_data.addDouble((*max_detector)->y());
+ output_data.addDouble((*max_detector)->r());
+ output_data.addDouble(max_response);
+ output_port.write();
+ }
+
+ static double prev_time = yarp::os::Time::now();
+ double dt = yarp::os::Time::now() - prev_time;
+ if( dt > 0.033 && debug_port.getOutputCount()) {
+
+ debug_port.prepare() = (*max_detector)->makeDebugImage();
+ debug_port.write();
+ prev_time += dt;
+
+ }
+
+ q_i = 0;
+
+ }
+
+
+}
+
+#define CMD_START yarp::os::createVocab('S', 'T', 'A', 'R')
+#define CMD_STOP yarp::os::createVocab('S', 'T', 'O', 'P')
+
+bool detectionManager::respond(const yarp::os::Bottle& command, yarp::os::Bottle& reply)
+{
+
+ reply.clear();
+ switch(command.get(0).asVocab()) {
+
+ case CMD_START:
+ input_port.resume();
+ reply.addString("starting detection");
+ break;
+ case CMD_STOP:
+ input_port.interrupt();
+ reply.addString("stopping detection");
+ break;
+
+ default:
+ reply.addString("error. try START or STOP");
+ return false;
+ }
+
+ return true;
+
+}
diff --git a/pf-ball-demo/module.h b/pf-ball-demo/module.h
new file mode 100644
index 0000000..b84bc45
--- /dev/null
+++ b/pf-ball-demo/module.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+// \defgroup Modules Modules
+// \defgroup vCircle vCircle
+// \ingroup Modules
+// \brief detects circles using the Hough transform
+
+#ifndef __VCIRCLEDETECTION__
+#define __VCIRCLEDETECTION__
+
+#include
+#include
+#include
+#include
+
+#include "houghAlgorithm.h"
+
+class detectionManager : public yarp::os::Thread, public yarp::os::RFModule
+{
+private:
+
+ ev::BufferedPort< ev::AE > input_port;
+ yarp::os::BufferedPort > debug_port;
+ yarp::os::BufferedPort output_port;
+ yarp::os::RpcServer rpc_port;
+
+ std::vector detectors;
+ unsigned int fifoSize;
+ double send_response_threshold;
+ bool active;
+
+ void run();
+
+public:
+
+ detectionManager() {}
+
+ bool configure(yarp::os::ResourceFinder &rf);
+ bool updateModule();
+ bool interruptModule();
+ bool respond(const yarp::os::Bottle& command, yarp::os::Bottle& reply);
+
+
+};
+
+
+
+#endif
diff --git a/pf-ball-demo/vControlLoopDelay.cpp b/pf-ball-demo/vControlLoopDelay.cpp
new file mode 100644
index 0000000..ed76e1b
--- /dev/null
+++ b/pf-ball-demo/vControlLoopDelay.cpp
@@ -0,0 +1,715 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "vControlLoopDelay.h"
+#include
+#include
+#include
+#include
+
+using std::vector;
+using std::deque;
+using ev::AE;
+using ev::packet;
+using yarp::sig::ImageOf;
+using yarp::sig::PixelFloat;
+using yarp::sig::PixelBgr;
+using yarp::os::Value;
+using yarp::os::Bottle;
+using yarp::os::createVocab;
+
+int main(int argc, char * argv[])
+{
+ /* initialize yarp network */
+
+
+ /* create the module */
+ delayControl instance;
+
+ /* prepare and configure the resource finder */
+ yarp::os::ResourceFinder rf;
+ //rf.setVerbose( false );
+ //rf.setDefaultContext( "event-driven" );
+ //rf.setDefaultConfigFile( "vParticleFilterTracker.ini" );
+ rf.configure( argc, argv );
+
+ return instance.runModule(rf);
+}
+
+void drawEvents(yarp::sig::ImageOf< yarp::sig::PixelBgr> &image, deque &q,
+ int offsetx = 0) {
+
+ if(q.empty()) return;
+
+ //draw oldest first
+ for(int i = (int)q.size()-1; i >= 0; i--) {
+ double p = (double)i / (double)q.size();
+ //image(q[i].x + offsetx, q[i].y).b = 255 * (1-p);
+ image(q[i].x + offsetx, q[i].y).b = 255;
+ }
+}
+
+void drawcircle(yarp::sig::ImageOf &image, int cx, int cy,
+ int cr, int id)
+{
+
+ for(int y = -cr; y <= cr; y++) {
+ for(int x = -cr; x <= cr; x++) {
+ if(fabs(sqrt(pow(x, 2.0) + pow(y, 2.0)) - (double)cr) > 0.8)
+ continue;
+ int px = cx + x; int py = cy + y;
+ if(py<0 || py>(int)image.height()-1 || px<0 || px>(int)image.width()-1)
+ continue;
+ switch(id) {
+ case(0): //green
+ image(px, py) = yarp::sig::PixelBgr(0, 255, 0);
+ break;
+ case(1): //blue
+ image(px, py) = yarp::sig::PixelBgr(0, 0, 255);
+ break;
+ case(2): //red
+ image(px, py) = yarp::sig::PixelBgr(255, 0, 0);
+ break;
+ default:
+ image(px, py) = yarp::sig::PixelBgr(255, 255, 0);
+ break;
+
+ }
+
+ }
+ }
+
+}
+
+void drawDistribution(yarp::sig::ImageOf &image, std::vector &indexedlist)
+{
+
+ double sum = 0;
+ std::vector weights;
+ for(unsigned int i = 0; i < indexedlist.size(); i++) {
+ weights.push_back(indexedlist[i].weight);
+ sum += weights.back();
+ }
+
+ std::sort(weights.begin(), weights.end());
+
+
+ image.resize(indexedlist.size(), 100);
+ image.zero();
+ for(unsigned int i = 0; i < weights.size(); i++) {
+ image(weights.size() - 1 - i, 99 - weights[i]*100) = yarp::sig::PixelBgr(255, 255, 255);
+ }
+}
+
+void drawTemplate(ImageOf &image, ImageOf &appearance,
+ double t_x, double t_y, double t_r)
+{
+ int mid_x = appearance.width() / 2;
+ int mid_y = appearance.height() / 2;
+ double t_s = t_r / mid_x;
+
+ for(auto x = 0; x < appearance.width(); x++) {
+ for(auto y = 0; y < appearance.height(); y++) {
+
+ int i_x = t_x + ((x - mid_x) * t_s);
+ i_x = std::max(std::min(i_x, (int)(image.width() - 1)), 0);
+ int i_y = t_y + ((y - mid_y) * t_s);
+ i_y = std::max(std::min(i_y, (int)(image.height() - 1)), 0);
+
+ PixelBgr &p = image(i_x, i_y);
+ if(appearance(x, y) > 0) {
+ p.g = appearance(x, y) * 125;
+ } else if(appearance(x, y) < 0) {
+ p.r = -appearance(x, y) * 125;
+ }
+
+
+ }
+ }
+
+
+
+
+
+}
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// ROIQ
+/*////////////////////////////////////////////////////////////////////////////*/
+
+
+roiq::roiq()
+{
+ roi.resize(4);
+ n = 1000;
+ roi[0] = 0; roi[1] = 1000;
+ roi[2] = 0; roi[3] = 1000;
+ use_TW = false;
+}
+
+void roiq::setSize(unsigned int value)
+{
+ //if TW n is in clock-ticks
+ //otherwise n is in # events.
+ n = value;
+ while(q.size() > n)
+ q.pop_back();
+}
+
+void roiq::setROI(int xl, int xh, int yl, int yh)
+{
+ roi[0] = xl; roi[1] = xh;
+ roi[2] = yl; roi[3] = yh;
+}
+
+int roiq::add(const AE &v)
+{
+
+ if(v.x < roi[0] || v.x > roi[1] || v.y < roi[2] || v.y > roi[3])
+ return 0;
+ q.push_front(v);
+ return 1;
+}
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// DELAYCONTROL
+/*////////////////////////////////////////////////////////////////////////////*/
+
+bool delayControl::configure(yarp::os::ResourceFinder &rf)
+{
+
+ if(rf.check("h") || rf.check("H"))
+ {
+ yInfo() << "--name";
+ yInfo() << "--height, --width";
+ yInfo() << "--gain, --adaptive, --variance";
+ yInfo() << "--particles (20), --threads (1)";
+ yInfo() << "--seed \"(x, y, r)\", --start_time , --file ";
+ return false;
+ }
+
+ yarp::os::Network yarp;
+ if(!yarp.checkNetwork(2.0)) {
+ std::cout << "Could not connect to YARP" << std::endl;
+ return false;
+ }
+
+ //module name and control
+ setName((rf.check("name", Value("/vpf")).asString()).c_str());
+ if(!rpcPort.open(getName() + "/cmd")) {
+ yError() << "Could not open rpc port for" << getName();
+ return false;
+ }
+ attach(rpcPort);
+
+ //options and parameters
+ px = py = pr = 0;
+ res.height = rf.check("height", Value(480)).asInt();
+ res.width = rf.check("width", Value(640)).asInt();
+ gain = rf.check("gain", Value(0.01)).asDouble();
+ batch_size = rf.check("batch", Value(0)).asInt();
+ bool adaptivesampling = rf.check("adaptive") &&
+ rf.check("adaptive", yarp::os::Value(true)).asBool();
+ motionVariance = rf.check("variance", yarp::os::Value(3.0)).asDouble();
+ output_sample_delta = rf.check("output_sample", Value(0)).asDouble();
+ bool start = rf.check("start") &&
+ rf.check("start", yarp::os::Value(true)).asBool();
+
+ maxRawLikelihood = vpf.initialise(res.width, res.height,
+ rf.check("particles", yarp::os::Value(100)).asInt(),
+ adaptivesampling,
+ rf.check("threads", Value(1)).asInt());
+
+ //maxRawLikelihood must be set before TrueThreshold
+ setTrueThreshold(rf.check("truethresh", yarp::os::Value(0.35)).asDouble());
+
+ yarp::os::Bottle * seed = rf.find("seed").asList();
+ if(seed && seed->size() == 3) {
+ yInfo() << "Setting initial seed state:" << seed->toString();
+ vpf.setSeed(seed->get(0).asDouble(),
+ seed->get(1).asDouble(),
+ seed->get(2).asDouble());
+ vpf.resetToSeed();
+ }
+
+ start_time = rf.check("start_time", Value(0.0)).asDouble();
+
+ if(rf.check("file")) {
+ std::string file_name = rf.find("file").asString();
+ fs.open(file_name, std::ios_base::out | std::ios_base::trunc);
+ if(!fs.is_open()) {
+ yError() << "Could not open output file" << file_name;
+ return false;
+ } else {
+ yInfo() << "Saving data to file" << file_name;
+ }
+ }
+
+
+ // if(!scopePort.open(getName() + "/scope:o"))
+ // return false;
+
+ //event_output_port.setWriteType(GaussianAE::tag);
+ // if(!event_output_port.open(getName() + "/GAE:o"))
+ // return false;
+
+ // if(!raw_output_port.open(getName() + "/state:o"))
+ // return false;
+
+ if(!debug_port.open(getName("/debug:o")))
+ return false;
+
+ //input_port.setQLimit(rf.check("qlimit", Value(0)).asInt());
+ if(!input_port.open(getName("/AE:i")))
+ return false;
+
+ yarp::os::Network::connect("/atis3/AE:o", getName("/AE:i"), "fast_tcp");
+ yarp::os::Network::connect(getName("/debug:o"), "/ptView", "fast_tcp");
+
+ //if(!start)
+ // pause();
+
+ return Thread::start();
+
+}
+
+bool delayControl::interruptModule()
+{
+ return Thread::stop();
+}
+
+bool delayControl::updateModule()
+{
+ //output the scope if connected
+ // if(scopePort.getOutputCount()) {
+ // scopePort.prepare() = getTrackingStats();
+ // scopePort.write();
+ // }
+
+ //output the debug image if connected
+ if(debug_port.getOutputCount()) {
+ ImageOf &image = debug_port.prepare();
+
+
+ image.resize(res.width, res.height);
+ image.zero();
+ m.lock();
+ drawTemplate(image, vpf.appearance, avgx, avgy, avgr);
+ drawEvents(image, qROI.q);
+ m.unlock();
+
+ debug_port.write();
+ }
+
+ return Thread::isRunning();
+}
+
+double delayControl::getPeriod()
+{
+ return 0.05;
+}
+
+void delayControl::setTrueThreshold(double value)
+{
+ detectionThreshold = value * maxRawLikelihood;
+}
+
+void delayControl::performReset(int x, int y, int r)
+{
+ if(x > 0)
+ vpf.setSeed(x, y, r);
+ vpf.resetToSeed();
+ input_port.resume();
+}
+
+yarp::sig::Vector delayControl::getTrackingStats()
+{
+ yarp::sig::Vector stats(10);
+
+ stats[0] = 0;//1000*input_port.queryDelayT();
+ stats[1] = 1.0/filterPeriod;
+ stats[2] = targetproc;
+ stats[3] = 0;//input_port.queryRate() / 1000.0;
+ stats[4] = dx;
+ stats[5] = dy;
+ stats[6] = dr;
+ stats[7] = vpf.maxlikelihood / (double)maxRawLikelihood;
+ stats[8] = cpuusage.getProcessorUsage();
+ stats[9] = qROI.n;
+
+ return stats;
+}
+
+void delayControl::onStop()
+{
+ input_port.stop();
+ debug_port.close();
+
+ if(fs.is_open())
+ {
+ yInfo() << "Writing data";
+ for(auto i : data_to_save)
+ fs << i[0] << " " << i[1] << " " << i[2] << " " << i[3] << std::endl;
+ fs.close();
+ yInfo() << "Finished Writing data";
+ }
+
+}
+
+void delayControl::pause()
+{
+ input_port.interrupt();
+}
+
+void delayControl::resume()
+{
+ input_port.resume();
+}
+
+void delayControl::run()
+{
+
+ //initialise the position
+ vpf.extractTargetPosition(avgx, avgy, avgr);
+ double roisize = avgr + 20;
+ qROI.setROI(avgx - roisize, avgx + roisize, avgy - roisize, avgy + roisize);
+
+ //read some data to extract the channel
+ if (fs.is_open())
+ data_to_save.push_back({start_time, avgx, avgy, 0.0});
+
+ double time_offset = -1.0;
+ double time_now = -1.0;
+ while(time_now-time_offset <= start_time) {
+ ev::info read_stats = input_port.readChunkN(1);
+ if(input_port.isStopping()) return;
+ time_now = input_port.begin().packetTime();
+ if(time_offset < 0) {
+ time_offset = input_port.begin().packetTime();
+ }
+
+ m.lock();
+ for(auto a = input_port.begin(); a != input_port.end(); a++) {
+ qROI.add(*a);
+ }
+ m.unlock();
+ }
+
+ m.lock();
+ if (batch_size)
+ qROI.setSize(batch_size);
+ else
+ qROI.setSize(300);
+ m.unlock();
+
+ double nw = 0;
+ unsigned int addEvents = 0;
+ unsigned int testedEvents = 0;
+
+ targetproc = M_PI * avgr;
+
+ while(true) {
+
+ if(input_port.isStopping())
+ break;
+
+ input_port.readChunkN(1);
+
+ m.lock();
+ for(auto a = input_port.begin(); a != input_port.end(); a++) {
+ addEvents += qROI.add(*a);
+ if(addEvents > targetproc)
+ {
+ addEvents = 0;
+ if (batch_size) qROI.setSize(batch_size);
+ m.unlock();
+ vpf.performObservation(qROI.q);
+ vpf.extractTargetWindow(nw);
+ vpf.extractTargetPosition(avgx, avgy, avgr);
+ vpf.performResample();
+ vpf.performPrediction(motionVariance);
+
+ roisize = avgr + 20;
+ qROI.setROI(avgx - roisize, avgx + roisize, avgy - roisize, avgy + roisize);
+
+ // set our new window #events
+ m.lock();
+ if (!batch_size) {
+ if (qROI.q.size() - nw > 30)
+ qROI.setSize(std::max(nw, 300.0));
+ if (qROI.q.size() > 2000)
+ qROI.setSize(2000);
+ }
+
+ if (fs.is_open())
+ data_to_save.push_back({a.packetTime() - time_offset, avgx, avgy, input_port.getUnprocessedDelay()});
+ }
+ }
+ m.unlock();
+
+
+
+
+
+
+ // while(addEvents < targetproc) {
+
+ // //if we ran out of events get a new queue
+ // if(i >= q->size()) {
+ // i = 0;
+ // q = input_port.read();
+ // if(!q || Thread::isStopping()) {
+ // m.unlock();
+ // return;
+ // }
+ // input_port.getEnvelope(ystamp);
+ // }
+
+ // addEvents += qROI.add((*q)[i]);
+ // testedEvents++;
+ // i++;
+ // }
+ // Tgetwindow = yarp::os::Time::now() - Tgetwindow;
+
+ // //if using a batch fix the size to the batch size ALWAYS
+
+
+ // m.unlock();
+
+ // //update the particle weights
+ // Tlikelihood = yarp::os::Time::now();
+ // vpf.performObservation(qROI.q);
+ // Tlikelihood = yarp::os::Time::now() - Tlikelihood;
+
+ // //extract the state
+ // vpf.extractTargetWindow(nw);
+ // dx = avgx, dy = avgy, dr = avgr;
+ // vpf.extractTargetPosition(avgx, avgy, avgr);
+ // dx = avgx - dx; dy = avgy - dy; dr = avgr - dr;
+
+ // // if (debug_port.getOutputCount()) {
+ // // ImageOf &image = debug_port.prepare();
+ // // image.resize(res.width, res.height);
+ // // image.zero();
+ // // drawTemplate(image, vpf.appearance, avgx, avgy, avgr);
+ // // drawEvents(image, qROI.q);
+ // // debug_port.write();
+ // // }
+ // //yarp::os::Time::delay(0.05);
+
+ // Tresample = yarp::os::Time::now();
+ // vpf.performResample();
+ // Tresample = yarp::os::Time::now() - Tresample;
+
+ // Tpredict = yarp::os::Time::now();
+ // vpf.performPrediction(motionVariance);
+ // Tpredict = yarp::os::Time::now() - Tpredict;
+
+ // int is_tracking = 1;
+ // if(vpf.maxlikelihood < detectionThreshold)
+ // is_tracking = 0;
+
+ // //to compute the delay here, we should probably have the
+ // double data_time_passed = ystamp.getTime() - time_offset;
+ // if(fs.is_open())
+ // data_to_save.push_back({data_time_passed, avgx, avgy, input_port.getPendingReads() * 0.004});
+
+ // double delta_x = avgx - px;
+ // double delta_y = avgy - py;
+ // double dpos = sqrt(delta_x * delta_x + delta_y * delta_y);
+ // if(dpos > output_sample_delta) {
+ // px = avgx;
+ // py = avgy;
+ // pr = avgr;
+
+ // double tw = qROI.q.front().ts - qROI.q.back().ts;
+ // if(tw < 0) tw += ev::max_stamp;
+
+ //output our event
+ // if(event_output_port.getOutputCount()) {
+ // auto ceg = make_event();
+ // ceg->stamp = qROI.q.front().stamp;
+ // ceg->setChannel(channel);
+ // ceg->x = avgx;
+ // ceg->y = avgy;
+ // ceg->sigx = avgr;
+ // ceg->sigy = tw;
+ // ceg->sigxy = 1.0;
+ // if(is_tracking)
+ // ceg->polarity = 1.0;
+ // else
+ // ceg->polarity = 0.0;
+
+ // vQueue outq; outq.push_back(ceg);
+ // event_output_port.write(outq, ystamp);
+
+ // }
+ //output the raw data
+ // if(raw_output_port.getOutputCount()) {
+ // Bottle &next_sample = raw_output_port.prepare();
+ // next_sample.clear();
+ // next_sample.addVocab(createVocab('T', '_', 'S', 'T'));
+ // next_sample.addInt(is_tracking);
+ // next_sample.addDouble(qROI.q.front().stamp);
+ // next_sample.addDouble(Time::now());
+ // next_sample.addDouble(avgx);
+ // next_sample.addDouble(avgy);
+ // next_sample.addDouble(avgr);
+ // next_sample.addDouble(channel);
+ // next_sample.addDouble(tw);
+ // next_sample.addDouble(vpf.maxlikelihood);
+
+ // raw_output_port.setEnvelope(ystamp);
+ // raw_output_port.write();
+ // }
+
+
+ //}
+
+ }
+
+}
+
+
+#define CMD_HELP createVocab('h', 'e', 'l', 'p')
+#define CMD_SET createVocab('s', 'e', 't')
+#define CMD_START createVocab('S', 'T', 'A', 'R')
+#define CMD_STOP createVocab('S', 'T', 'O', 'P')
+
+bool delayControl::respond(const yarp::os::Bottle& command,
+ yarp::os::Bottle& reply) {
+
+ //initialise for default response
+ bool error = false;
+ yInfo() << command.size();
+ reply.clear();
+
+ //switch on the command word
+ switch(command.get(0).asVocab()) {
+
+ case CMD_HELP:
+ {
+ reply.addString("<>");
+ reply.addString("Set the following parameters with | set "
+ " |");
+ reply.addString("trackThresh [0-1]");
+ reply.addString("trueThresh [0-1]");
+ reply.addString("gain [0-1]");
+ reply.addString("resetTimeout [0 inf]");
+ reply.addString("negativeBias [0 inf]");
+ reply.addString("motionVar [0 inf]");
+ reply.addString("inlierParam [0 inf]");
+ reply.addString("adaptive [true false]");
+ break;
+ }
+ case CMD_SET:
+ {
+
+ std::string param = command.get(1).asString();
+ double value = command.get(2).asDouble();
+
+ if(param == "trackThresh") {
+ reply.addString("setting tracking parameter to ");
+ reply.addDouble(value);
+ vpf.setMinLikelihood(value);
+ }
+ else if(param == "gain") {
+ reply.addString("gain changed from ");
+ reply.addDouble(gain);
+ gain = value;
+ reply.addString(" to ");
+ reply.addDouble(gain);
+ }
+ else if(param == "trueThresh") {
+ reply.addString("setting true classification parameter to ");
+ reply.addDouble(value);
+ setTrueThreshold(value);
+ }
+ else if(param == "resetTimeout") {
+ reply.addString("resetTimeout changed from ");
+ reply.addDouble(resetTimeout);
+ resetTimeout = value;
+ reply.addString(" to ");
+ reply.addDouble(resetTimeout);
+
+ }
+ else if(param == "negativeBias") {
+ reply.addString("setting the observation negative bias to ");
+ reply.addDouble(value);
+ vpf.setNegativeBias(value);
+ }
+ else if(param == "motionVar") {
+ reply.addString("motionVar changed from ");
+ reply.addDouble(motionVariance);
+ motionVariance = value;
+ reply.addString(" to ");
+ reply.addDouble(motionVariance);
+ }
+ else if(param == "inlierParam") {
+ reply.addString("setting the inlier width to ");
+ reply.addDouble(value);
+ vpf.setInlierParameter(value);
+ }
+ else if(param == "adaptive") {
+ if(value) {
+ reply.addString("setting the resample method = adaptive");
+ vpf.setAdaptive(true);;
+ }
+ else {
+ reply.addString("setting the resample method = every update");
+ vpf.setAdaptive(false);
+ }
+ }
+ else {
+ error = true;
+ reply.addString("incorrect parameter");
+ }
+ break;
+ }
+ case CMD_STOP:
+ {
+ reply.addString("tracking paused");
+ pause();
+ break;
+ }
+ case CMD_START:
+ {
+ if(command.size() == 4) {
+ int x = command.get(1).asInt();
+ int y = command.get(2).asInt();
+ int r = command.get(3).asInt();
+ reply.addString("resetting particle positions to custom positions");
+ performReset(x, y, r);
+ } else {
+ reply.addString("resetting particle positions to seed");
+ performReset();
+ }
+ break;
+ }
+ default:
+ {
+ error = true;
+ break;
+ }
+
+ } //switch
+
+ //return the error - the reply is automatically sent
+ return !error;
+
+}
diff --git a/pf-ball-demo/vControlLoopDelay.h b/pf-ball-demo/vControlLoopDelay.h
new file mode 100644
index 0000000..4d76358
--- /dev/null
+++ b/pf-ball-demo/vControlLoopDelay.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+
+#include
+#include
+#include
+#include "vParticle.h"
+#include
+#include
+#include
+#include
+
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// ROIQ
+/*////////////////////////////////////////////////////////////////////////////*/
+class roiq
+{
+public:
+
+ std::deque q;
+ unsigned int n;
+ yarp::sig::Vector roi;
+ bool use_TW;
+
+ roiq();
+ void setSize(unsigned int value);
+ void setROI(int xl, int xh, int yl, int yh);
+ int add(const ev::AE &v);
+
+};
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// DELAYCONTROL
+/*////////////////////////////////////////////////////////////////////////////*/
+
+class delayControl : public yarp::os::RFModule, public yarp::os::Thread
+{
+private:
+
+ //data structures and ports
+ yarp::os::Port rpcPort;
+ ev::window input_port;
+ yarp::os::BufferedPort< yarp::sig::ImageOf > debug_port;
+ roiq qROI;
+ vParticlefilter vpf;
+ std::mutex m;
+
+ //variables
+ ev::resolution res;
+ double avgx, avgy, avgr;
+ int maxRawLikelihood;
+ double gain;
+ double detectionThreshold;
+ double resetTimeout;
+ double motionVariance;
+ int batch_size;
+ double output_sample_delta;
+ double start_time;
+
+ //diagnostics
+ double filterPeriod;
+ unsigned int targetproc;
+ double dx;
+ double dy;
+ double dr;
+ double px, py, pr;
+ ev::benchmark cpuusage;
+ std::deque< std::array > data_to_save;
+ std::ofstream fs;
+
+public:
+
+ delayControl() {}
+
+ virtual bool configure(yarp::os::ResourceFinder &rf);
+ virtual bool interruptModule();
+ virtual double getPeriod();
+ virtual bool updateModule();
+ virtual bool respond(const yarp::os::Bottle& command, yarp::os::Bottle& reply);
+
+ bool open(std::string name, unsigned int qlimit = 0);
+ void performReset(int x = -1, int y = -1, int r = -1);
+ void setFilterInitialState(int x, int y, int r);
+
+ void setMinRawLikelihood(double value);
+ void setMaxRawLikelihood(int value);
+ void setNegativeBias(int value);
+ void setInlierParameter(int value);
+ void setMotionVariance(double value);
+ void setTrueThreshold(double value);
+ void setAdaptive(double value = true);
+ void setOutputSampleDelta(double value);
+
+ void setGain(double value);
+ void setMinToProc(int value);
+ void setResetTimeout(double value);
+
+ yarp::sig::Vector getTrackingStats();
+
+ //bool threadInit();
+ void pause();
+ void resume();
+ void onStop();
+ void run();
+ //void threadRelease();
+
+
+};
diff --git a/pf-ball-demo/vParticle.cpp b/pf-ball-demo/vParticle.cpp
new file mode 100644
index 0000000..654a367
--- /dev/null
+++ b/pf-ball-demo/vParticle.cpp
@@ -0,0 +1,521 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "vParticle.h"
+#include
+#include
+#include
+#include
+
+using std::vector;
+using std::deque;
+using ev::AE;
+using yarp::sig::ImageOf;
+using yarp::sig::PixelFloat;
+using yarp::sig::PixelBgr;
+
+
+double generateGaussianNoise(double mu, double sigma)
+{
+ const double epsilon = std::numeric_limits::min();
+ const double two_pi = 2.0*3.14159265358979323846;
+
+ static double z0, z1;
+ static bool generate;
+ generate = !generate;
+
+ if (!generate)
+ return z1 * sigma + mu;
+
+ double u1, u2;
+ do
+ {
+ u1 = rand() * (1.0 / RAND_MAX);
+ u2 = rand() * (1.0 / RAND_MAX);
+ }
+ while ( u1 <= epsilon );
+
+ z0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2);
+ z1 = sqrt(-2.0 * log(u1)) * sin(two_pi * u2);
+ return z0 * sigma + mu;
+}
+
+double generateUniformNoise(double mu, double sigma)
+{
+ return mu + rand() * (2.0 * sigma / RAND_MAX) - sigma;
+}
+
+
+
+/*////////////////////////////////////////////////////////////////////////////*/
+//VPARTICLEFILTER
+/*////////////////////////////////////////////////////////////////////////////*/
+
+
+double vParticlefilter::initialise(int width, int height, int nparticles,
+ bool adaptive, int nthreads)
+{
+ res.width = width;
+ res.height = height;
+ this->nparticles = nparticles;
+ this->adaptive = adaptive;
+ this->nthreads = nthreads;
+
+ setSeed(res.width/2.0, res.height/2.0);
+
+ ps.clear();
+ ps_snap.clear();
+ accum_dist.resize(this->nparticles);
+
+ if(this->nthreads > 1) {
+ for(int i = 0; i < this->nthreads; i++) {
+ int pStart = i * (this->nparticles / this->nthreads);
+ int pEnd = (i+1) * (this->nparticles / this->nthreads);
+ if(i == this->nthreads - 1)
+ pEnd = this->nparticles;
+
+ yInfo() << "Thread" << i << "=" << pStart << "->" << pEnd-1;
+ computeThreads.push_back(new vPartObsThread(pStart, pEnd));
+ computeThreads[i]->start();
+ }
+ }
+
+
+ templatedParticle p;
+ p.weight = 1.0 / (double)nparticles;
+
+ const static double circ_base_radius = 15.0;
+ vector mins = {0.0, 0.0, circ_base_radius/25.5}; //largest diameter (51)
+ vector maxs = {(double)res.width, (double)res.height, circ_base_radius/6}; //smallest diameter (9)
+ p.setConstraints(mins, maxs);
+
+ double max_likelihood = initialiseAsEllipse(circ_base_radius);
+ p.setAppearance(&appearance, max_likelihood);
+
+ for(int i = 0; i < this->nparticles; i++) {
+ p.id = i;
+ ps.push_back(p);
+ ps_snap.push_back(p);
+ }
+
+ resetToSeed();
+
+ return max_likelihood;
+}
+
+void vParticlefilter::setAppearance(const ImageOf &new_appearance,
+ double max_likelihood)
+{
+ this->appearance.copy(new_appearance);
+ yInfo() << &new_appearance << " " << &(this->appearance);
+ for(auto &p : ps) {
+ p.setAppearance(&(this->appearance), max_likelihood);
+ yInfo() << p.appearance;
+ }
+
+
+}
+
+void vParticlefilter::setSeed(double x, double y, double r)
+{
+ seedx = x; seedy = y; seedr = r;
+}
+
+void vParticlefilter::resetToSeed()
+{
+ if(seedr) {
+ for(int i = 0; i < nparticles; i++) {
+ ps[i].state = {(double)seedx, (double)seedy, (double)seedr};
+ }
+ } else {
+ for(int i = 0; i < nparticles; i++) {
+ ps[i].state = {(double)seedx, (double)seedy, 1.0};
+ }
+ }
+}
+
+void vParticlefilter::setMinLikelihood(double value)
+{
+ yWarning() << "Deprecated";
+ //for(int i = 0; i < nparticles; i++)
+ //ps[i].updateMinLikelihood(value);
+}
+
+void vParticlefilter::setInlierParameter(double value)
+{
+ yWarning() << "Deprecated";
+ // for(int i = 0; i < nparticles; i++)
+ // ps[i].setInlierParameter(value);
+}
+
+void vParticlefilter::setNegativeBias(double value)
+{
+ yWarning() << "Deprecated";
+ // for(int i = 0; i < nparticles; i++)
+ // ps[i].setNegativeBias(value);
+}
+
+void vParticlefilter::setAdaptive(bool value)
+{
+ adaptive = value;
+}
+
+void vParticlefilter::performObservation(const deque &q)
+{
+ double normval = 0.0;
+ if(nthreads == 1) {
+
+ //START WITHOUT THREAD
+ for(int i = 0; i < nparticles; i++) {
+ ps[i].initLikelihood(q.size());
+ }
+
+ for(int i = 0; i < nparticles; i++) {
+ for(int j = 0; j < (int)q.size(); j++) {
+ ps[i].incrementalLikelihood(q[j].x, q[j].y, j);
+ }
+ }
+
+ for(int i = 0; i < nparticles; i++) {
+ ps[i].concludeLikelihood();
+ normval += ps[i].weight;
+ }
+
+ } else {
+
+ //START MULTI-THREAD
+ for(int k = 0; k < nthreads; k++) {
+ computeThreads[k]->setDataSources(&ps, &q);
+ computeThreads[k]->process();
+ }
+
+ for(int k = 0; k < nthreads; k++) {
+ normval += computeThreads[k]->waittilldone();
+ }
+ }
+
+ normval = 1.0 / normval;
+
+ pwsumsq = 0;
+ maxlikelihood = 0;
+ for(int i = 0; i < nparticles; i ++) {
+ ps[i].normaliseWithinPopulation(normval);
+ pwsumsq += pow(ps[i].weight, 2.0);
+ maxlikelihood = std::max(maxlikelihood, ps[i].getl());
+ }
+
+}
+
+void vParticlefilter::extractTargetPosition(double &x, double &y, double &r)
+{
+ x = 0; y = 0; r = 0;
+
+ for(auto &p : ps) {
+ x += p.state[templatedParticle::x] * p.weight;
+ y += p.state[templatedParticle::y] * p.weight;
+ r += p.getAbsoluteSize() * p.weight;
+ }
+
+}
+
+void vParticlefilter::extractTargetWindow(double &tw)
+{
+ tw = 0;
+ for(auto &p : ps)
+ tw += p.getn() * p.weight;
+
+}
+
+void vParticlefilter::performResample()
+{
+ if(!adaptive || pwsumsq * nparticles > 2.0) {
+ //initialise for the resample
+ double accum = 0;
+ for(int i = 0; i < nparticles; i++) {
+ ps_snap[i] = ps[i];
+ accum += ps[i].weight;
+ accum_dist[i] = accum;
+ }
+
+ //perform the resample
+ for(int i = 0; i < nparticles; i++) {
+ double rn = (double)rand() / RAND_MAX;
+ int j = 0;
+ for(j = 0; j < nparticles; j++)
+ if(accum_dist[j] > rn) break;
+ ps[i] = ps_snap[j];
+ }
+ }
+}
+
+
+void vParticlefilter::performPrediction(double sigma)
+{
+ for(int i = 0; i < nparticles; i++)
+ ps[i].predict(sigma);
+}
+
+std::vector vParticlefilter::getps()
+{
+ return ps;
+}
+
+// cv::Mat createCustom(int width, int height){
+
+// cv::Point2d origin((width+4)/2, (height)/2);
+// cv::Mat ell_filter = cv::Mat::zeros(height, width+4, CV_32F);
+// for(int x=0; x< ell_filter.cols; x++) {
+// for(int y=0; y< ell_filter.rows; y++) {
+// double dx = (pow(x,2) -2*origin.x*x + pow(origin.x,2))/pow((width)/2,2);
+// double dy = (pow(y,2) -2*origin.y*y + pow(origin.y,2))/pow((height)/2,2);
+// double value = dx+ dy;
+// if(value > 0.9)
+// ell_filter.at(y, x) = -0.2;
+// else if (value > 0.4 && value<=0.9)
+// ell_filter.at(y, x) = 1;
+// else
+// ell_filter.at(y, x) = 0;
+// }
+// }
+
+// return ell_filter;
+// }
+
+double vParticlefilter::initialiseAsEllipse(double r)
+{
+ appearance.resize(r*2+1, r*2+1);
+ appearance.zero();
+
+ double max_likelihood = 0;
+
+ double a = r;
+ double b = 1.9*r;
+
+ for(double y = 0; y < appearance.height(); y++) {
+ for(double x = 0; x < appearance.width(); x++) {
+
+ double dx = (pow(x, 2) - 2 * r * x + pow(r, 2)) / pow((a) / 2, 2);
+ double dy = (pow(y, 2) - 2 * r * y + pow(r, 2)) / pow((b) / 2, 2);
+ double value = sqrt(dx*dx + dy*dy);
+ if (value > 0.5 && value < 1.5) {
+ appearance(y, x) = 1.0;
+ max_likelihood += appearance(y, x);
+ } else if (value <= 0.5) {
+ appearance(y, x) = -2;
+
+ }
+ // else if(value > 1.5)
+ // appearance(y, x) = -0.5;
+
+ // else
+ // appearance = 0;
+
+ //double d = sqrt((y - r) * (y - r) + (x - r) * (x - r)) - r;
+ // double d = (x-r)*(x-r)/(r*r) + (y-r)*(y-r)/(r*r) - 1;
+
+ // if(d > 2.0)
+ // appearance(y, x) = 0.0;
+
+ // else if(d < -2.0)
+ // appearance(y, x) = -1.0;
+
+ // else if(d < 1.0 && d > -1.0)
+ // {
+ // appearance(y, x) = 1.0;
+ // max_likelihood += appearance(y, x);
+ // }
+ // else
+ // {
+ // appearance(y, x) = (2.0 - fabs(d));
+ // max_likelihood += appearance(y, x);
+ // }
+ }
+ }
+
+// for(size_t y = 0; y < appearance.height(); y++) {
+// for(size_t x = 0; x < appearance.width(); x++) {
+// std::cout << appearance(y, x) << " ";
+// }
+// std::cout << std::endl;
+// }
+
+ return max_likelihood;
+
+}
+
+double vParticlefilter::initialiseAsCircle(int r)
+{
+ appearance.resize(r*2+1, r*2+1);
+ appearance.zero();
+
+ double max_likelihood = 0;
+
+ for(size_t y = 0; y < appearance.height(); y++) {
+ for(size_t x = 0; x < appearance.width(); x++) {
+
+ double d = sqrt((y - r) * (y - r) + (x - r) * (x - r)) - r;
+
+ if(d > 2.0)
+ appearance(y, x) = 0.0;
+
+ else if(d < -2.0)
+ appearance(y, x) = -1.0;
+
+ else if(d < 1.0 && d > -1.0)
+ {
+ appearance(y, x) = 1.0;
+ max_likelihood += appearance(y, x);
+ }
+ else
+ {
+ appearance(y, x) = (2.0 - fabs(d));
+ max_likelihood += appearance(y, x);
+ }
+ }
+ }
+
+// for(size_t y = 0; y < appearance.height(); y++) {
+// for(size_t x = 0; x < appearance.width(); x++) {
+// std::cout << appearance(y, x) << " ";
+// }
+// std::cout << std::endl;
+// }
+
+ return max_likelihood;
+
+}
+
+/*////////////////////////////////////////////////////////////////////////////*/
+//particleobserver (threaded observer)
+/*////////////////////////////////////////////////////////////////////////////*/
+
+vPartObsThread::vPartObsThread(int pStart, int pEnd)
+{
+ this->pStart = pStart;
+ this->pEnd = pEnd;
+ processing.lock();
+ done.lock();
+}
+
+void vPartObsThread::setDataSources(std::vector *particles,
+ const deque *stw)
+{
+ this->particles = particles;
+ this->stw = stw;
+}
+
+void vPartObsThread::process()
+{
+ processing.unlock();
+}
+
+double vPartObsThread::waittilldone()
+{
+ done.lock();
+ return normval;
+}
+
+void vPartObsThread::run()
+{
+ while(!isStopping()) {
+
+ processing.lock();
+ if(isStopping()) return;
+
+ int nw = (*stw).size();
+
+ for(int i = pStart; i < pEnd; i++) {
+ (*particles)[i].initLikelihood(nw);
+ }
+
+ for(int i = pStart; i < pEnd; i++) {
+ for(int j = 0; j < nw; j++) {
+ //AE* v = read_as((*stw)[j]);
+ (*particles)[i].incrementalLikelihood((*stw)[j].x, (*stw)[j].y, j);
+ }
+ }
+
+ normval = 0.0;
+ for(int i = pStart; i < pEnd; i++) {
+ (*particles)[i].concludeLikelihood();
+ normval += (*particles)[i].weight;
+ }
+
+ done.unlock();
+
+ }
+
+}
+
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// templatedParticle
+/*////////////////////////////////////////////////////////////////////////////*/
+
+templatedParticle::templatedParticle()
+{
+ appearance = nullptr;
+}
+
+templatedParticle& templatedParticle::operator=(const templatedParticle &rhs)
+{
+ this->state = rhs.state;
+ this->weight = rhs.weight;
+ return *this;
+}
+
+void templatedParticle::setAppearance(ImageOf *appearance, double max_likelihood)
+{
+ this->appearance = appearance;
+ this->max_likelihood = max_likelihood;
+ this->min_likelihood = 0.03 * max_likelihood;
+ this->offset_x = appearance->width() / 2;
+ this->offset_y = appearance->height() / 2;
+}
+
+bool templatedParticle::setConstraints(vector mins, vector maxs)
+{
+ //if(mins.size() != state.size()) return false;
+ //if(maxs.size() != state.size()) return false;
+
+ min_state = mins;
+ max_state = maxs;
+ constrain = true;
+
+ return true;
+}
+
+void templatedParticle::predict(double sigma)
+{
+ state[x] = generateUniformNoise(state[x], sigma);
+ state[y] = generateUniformNoise(state[y], sigma);
+ state[s] = generateUniformNoise(state[s], 0.1);
+
+ if(constrain) checkConstraints();
+
+}
+
+void templatedParticle::checkConstraints()
+{
+
+ for(size_t i = 0; i < state.size(); i++) {
+ if(state[i] < min_state[i]) state[i] = min_state[i];
+ if(state[i] > max_state[i]) state[i] = max_state[i];
+ }
+
+}
diff --git a/pf-ball-demo/vParticle.h b/pf-ball-demo/vParticle.h
new file mode 100644
index 0000000..437d138
--- /dev/null
+++ b/pf-ball-demo/vParticle.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef __VPARTICLE__
+#define __VPARTICLE__
+
+#include
+#include
+#include
+#include
+
+class vParticle;
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// templatedParticle
+/*////////////////////////////////////////////////////////////////////////////*/
+class templatedParticle
+{
+protected:
+
+ int offset_x, offset_y;
+
+ bool constrain;
+ std::vector min_state;
+ std::vector max_state;
+
+ //incremental counters
+ double likelihood, score, min_likelihood, max_likelihood;
+ int n;
+
+ void checkConstraints();
+
+public:
+
+ enum { x = 0, y = 1, s = 2};
+
+ int id;
+ std::vector state;
+ double weight;
+ yarp::sig::ImageOf *appearance;
+
+ templatedParticle();
+ //templatedParticle(const templatedParticle &from);
+ templatedParticle& operator=(const templatedParticle &rhs);
+
+ void setAppearance(yarp::sig::ImageOf *appearance, double max_likelihood);
+ bool setConstraints(std::vector mins, std::vector maxs);
+ void predict(double sigma);
+ double getl() { return likelihood; }
+ double getn() { return n; }
+ double getAbsoluteSize() { return offset_x / state[s]; }
+
+ void initLikelihood(int windowSize)
+ {
+ likelihood = min_likelihood;
+ score = 0;
+ n = 0;
+ }
+
+ inline void incrementalLikelihood(int vx, int vy, int n)
+ {
+ int index_x = (vx - state[x]) * state[s] + offset_x + 0.5;
+ int index_y = (vy - state[y]) * state[s] + offset_y + 0.5;
+ if(index_x < 0 ||
+ index_y < 0 ||
+ index_x >= (int)appearance->width() ||
+ index_y >= (int)appearance->height())
+ {
+ return;
+ }
+
+ score += (*appearance)(index_x, index_y);
+ if(score >= likelihood)
+ {
+ likelihood = score;
+ this->n = n;
+ }
+
+ }
+
+ void concludeLikelihood()
+ {
+ likelihood *= (state[s] * state[s]);
+ if(likelihood < min_likelihood) {
+ //yInfo() << "scaling up likelihood from minimum";
+ likelihood = min_likelihood;
+ }
+ if(likelihood > max_likelihood) {
+ // yInfo() << "scaling down likelihood from maximum";
+ likelihood = max_likelihood;
+ }
+ weight *= likelihood;
+ }
+
+ void normaliseWithinPopulation(double normval)
+ {
+ weight *= normval;
+ }
+
+
+};
+
+/*////////////////////////////////////////////////////////////////////////////*/
+// vParticleObserver
+/*////////////////////////////////////////////////////////////////////////////*/
+class vPartObsThread : public yarp::os::Thread
+{
+private:
+
+ std::mutex processing;
+ std::mutex done;
+ int pStart;
+ int pEnd;
+
+ double normval;
+
+ std::vector *particles;
+ const std::deque *stw;
+
+public:
+
+ vPartObsThread(int pStart, int pEnd);
+ void setDataSources(std::vector *particles, const std::deque *stw);
+ void process();
+ double waittilldone();
+
+ void run();
+};
+
+/*////////////////////////////////////////////////////////////////////////////*/
+//VPARTICLEFILTER
+/*////////////////////////////////////////////////////////////////////////////*/
+
+class vParticlefilter
+{
+private:
+
+ //parameters
+ int nparticles;
+ int nthreads;
+ ev::resolution res;
+ bool adaptive;
+ double seedx, seedy, seedr;
+
+ //data
+ std::vector ps;
+ std::vector ps_snap;
+ std::vector accum_dist;
+ std::vector computeThreads;
+
+ //variables
+ double pwsumsq;
+ int rbound_min;
+ int rbound_max;
+
+ double initialiseAsCircle(int r);
+ double initialiseAsEllipse(double r);
+
+public:
+
+ double maxlikelihood;
+ yarp::sig::ImageOf< yarp::sig::PixelFloat > appearance;
+
+ vParticlefilter() {}
+
+ double initialise(int width, int height, int nparticles, bool adaptive,
+ int nthreads);
+
+ void setSeed(double x, double y, double r = 0.0);
+ void resetToSeed();
+ void setMinLikelihood(double value);
+ void setInlierParameter(double value);
+ void setNegativeBias(double value);
+ void setAdaptive(bool value = true);
+
+ void setAppearance(const yarp::sig::ImageOf &new_appearance, double max_likelihood);
+ void performObservation(const std::deque &q);
+ void extractTargetPosition(double &x, double &y, double &r);
+ void extractTargetWindow(double &tw);
+ void performResample();
+ void performPrediction(double sigma);
+
+ std::vector getps();
+
+};
+
+
+
+#endif
diff --git a/pf-ball-demo/vTrackToRobot.cpp b/pf-ball-demo/vTrackToRobot.cpp
new file mode 100644
index 0000000..c045c48
--- /dev/null
+++ b/pf-ball-demo/vTrackToRobot.cpp
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "vTrackToRobot.h"
+#include "yarp/math/Math.h"
+#include
+#include
+
+using namespace yarp::math;
+using yarp::sig::Vector;
+using namespace ev;
+
+int main(int argc, char * argv[])
+{
+ /* initialize yarp network */
+ yarp::os::Network yarp;
+ if(!yarp.checkNetwork()) {
+ yError() << "Could not connect to yarp";
+ return -1;
+ }
+
+ /* prepare and configure the resource finder */
+ yarp::os::ResourceFinder rf;
+ rf.setDefaultContext( "eventdriven" );
+ rf.setDefaultConfigFile( "vGazeDemo.ini" );
+ rf.configure( argc, argv );
+
+ /* create the module */
+ vTrackToRobotModule module;
+
+ /* run the module: runModule() calls configure first and, if successful, it then runs */
+ return module.runModule(rf);
+}
+
+
+/*//////////////////////////////////////////////////////////////////////////////
+ MODULE
+ ////////////////////////////////////////////////////////////////////////////*/
+
+bool vTrackToRobotModule::configure(yarp::os::ResourceFinder &rf)
+{
+ //set the name of the module
+ setName((rf.check("name", yarp::os::Value("/vGazeDemo")).asString()).c_str());
+ yThresh = rf.check("yThresh", yarp::os::Value(20)).asDouble();
+ rThresh = rf.check("rThresh", yarp::os::Value(5)).asDouble();
+ period = rf.check("period", yarp::os::Value(0.01)).asDouble();
+ gazingActive = rf.check("start", yarp::os::Value(false)).asBool();
+ useDemoRedBall = rf.check("grasp", yarp::os::Value(false)).asBool();
+ velocityControl = rf.check("velocity", yarp::os::Value(false)).asBool();
+ res.height = rf.check("height", yarp::os::Value(240)).asDouble();
+ res.width = rf.check("width", yarp::os::Value(304)).asDouble();
+ arm_traj_time = rf.check("arm_traj_time", yarp::os::Value(1.0)).asDouble();
+ usearm = rf.check("arm") && rf.check("arm", yarp::os::Value(true)).asBool();
+
+ arm_target_position = yarp::sig::Vector(3);
+ arm_target_position = 0.0;
+
+ if(!rpcPort.open(getName() + "/control"))
+ return false;
+ this->attach(rpcPort);
+
+ //inputPort.setDemo(rf.check("demo", yarp::os::Value("gaze")).asString());
+
+ if(!inputPort.open(getName() + "/vBottle:i"))
+ return false;
+
+ if(!cartOutPort.open(getName() + "/cart:o"))
+ return false;
+
+ if(!debugOutPort.open(getName() + "/debug:o"))
+ return false;
+
+ yarp::os::Property options;
+ options.put("device", "gazecontrollerclient");
+ options.put("local", getName());
+ options.put("remote", "/iKinGazeCtrl");
+ gazedriver.open(options);
+ if(gazedriver.isValid()) {
+ gazedriver.view(gazecontrol);
+ gazecontrol->getHeadPose(headhomepos, headhomerot);
+ } else {
+ yWarning() << "Gaze Driver not opened and will not be used";
+ }
+
+ options.put("device","cartesiancontrollerclient");
+ // left arm
+ options.put("remote","/icub/cartesianController/left_arm");
+ options.put("local","/cartesian_client/left_arm");
+ //right arm
+// options.put("remote","/icub/cartesianController/right_arm");
+// options.put("local","/cartesian_client/right_arm");
+
+ if(usearm)
+ armdriver.open(options);
+ if(armdriver.isValid()) {
+ armdriver.view(arm);
+ arm->storeContext(&startup_context_id);
+ arm->setTrajTime(arm_traj_time);
+ // get the torso dofs
+ yarp::sig::Vector newDof, curDof;
+ arm->getDOF(curDof);
+ newDof=curDof;
+
+ // enable the torso yaw and pitch
+ // disable the torso roll
+ newDof[0]=0;
+ newDof[1]=0;
+ newDof[2]=1;
+ arm->setDOF(newDof,curDof);
+
+ double min, max;
+ // we keep the lower limit
+ arm->getLimits(0,&min,&max);
+ arm->setLimits(0,min,30.0);
+
+ arm->getPose(armhomepos, armhomerot);
+
+ } else
+ yWarning() << "Arm driver not opened and will not be used";
+
+ if(velocityControl) {
+ yInfo() << "Velocity control in visual space";
+ if(!velocityController.initialise(res.height, res.width))
+ return false;
+ } else {
+ yInfo() << "Position-based cartesian control";
+ }
+
+ return true ;
+}
+
+bool vTrackToRobotModule::controlCartesian(yarp::sig::Vector ltarget,
+ yarp::sig::Vector rtarget)
+{
+ //check target is present
+ if(!rtarget[3] || !ltarget[3])
+ return false;
+
+ //do our stereo target check
+ if(std::abs(rtarget[1] - ltarget[1]) > yThresh) {
+ //yWarning() << "Y values not consistent for target";
+ return false;
+ }
+ if(std::abs(rtarget[2] - ltarget[2]) > rThresh) {
+ //yWarning() << "Radius not consistent for target";
+ return false;
+ }
+
+ if(!gazingActive || !gazedriver.isValid()) {
+ //yInfo() << "Gaze valid (gazing blocked)";
+ return false;
+ }
+
+ yarp::sig::Vector pleft = ltarget.subVector(0, 1);
+ yarp::sig::Vector pright = rtarget.subVector(0, 1);
+
+ yarp::sig::Vector tp;
+ gazecontrol->triangulate3DPoint(pleft, pright, tp);
+
+ //yarp::sig::Vector pos(3);
+ //yarp::sig::Vector ori(3);
+
+ //gazecontrol->getHeadPose(pos, ori);
+
+ //std::cout << "head pos = " << pos.toString() << std::endl;
+ //std::cout << "head ori = " << ori.toString() << std::endl;
+
+ //std::cout << tp.toString() << std::endl;
+
+ //target too close to body?
+ if(tp[0] > -0.10) {
+ return false;
+ }
+
+ gazecontrol->lookAtStereoPixels(pleft, pright);
+
+ //gazecontrol->lookAtMonoPixel(0, pleft, 0.5);
+ //yarp::sig::Vector mono_pos_left, mono_pos_right;
+ //gazecontrol->get3DPoint(0, pleft, 0.5, mono_pos_left);
+ //gazecontrol->get3DPoint(1, pright, 0.5, mono_pos_right);
+ //yInfo() << mono_pos_left.toString() << mono_pos_right.toString();
+
+ return true;
+
+}
+
+bool vTrackToRobotModule::controlArm(yarp::sig::Vector ltarget,
+ yarp::sig::Vector rtarget)
+{
+ //check target is present
+ if(!rtarget[3] || !ltarget[3])
+ return false;
+
+// //do our stereo target check
+// if(std::abs(rtarget[1] - ltarget[1]) > yThresh) {
+// //yWarning() << "Y values not consistent for target";
+// return false;
+// }
+// if(std::abs(rtarget[2] - ltarget[2]) > rThresh) {
+// //yWarning() << "Radius not consistent for target";
+// return false;
+// }
+
+ if(!gazingActive || !gazedriver.isValid()) {
+ //yInfo() << "Gaze valid (gazing blocked)";
+ return false;
+ }
+
+ if(!armdriver.isValid())
+ return false;
+
+ yarp::sig::Vector pleft = ltarget.subVector(0, 1);
+ yarp::sig::Vector pright = rtarget.subVector(0, 1);
+
+ yarp::sig::Vector tp;
+ //gazecontrol->triangulate3DPoint(pleft, pright, tp);
+ gazecontrol->getFixationPoint(tp);
+
+
+
+ double DCONST = 0.3;
+ tp[0] = -DCONST;
+
+ if(fabs(tp[1]) < 0.1) {
+ tp[0] = -DCONST;
+ } else {
+ double sign = tp[1] < 0 ? -1 : 1;
+ double temp = sqrt(((tp[0] * tp[0]) / (tp[1] * tp[1])) + 1);
+ double yhat = sign * DCONST / temp;
+ double xhat = tp[0] * (yhat / tp[1]);
+ tp[0] = xhat;
+ tp[1] = yhat;
+ }
+
+ //tp[0] += 0.1;
+ tp[1] -= 0.1;
+ tp[2] += -0.15;
+ tp[2] = std::max(tp[2], 0.0);
+
+ //std::cout << tp.toString() << std::endl;
+
+
+ //target too close to body
+ //if(tp[0] > -0.10) {
+ // return false;
+ //}
+
+
+ //tp[2] += -0.10;
+
+ //tp[0] = std::max(tp[0], -0.15);
+ //tp[0] = std::min(tp[0], -0.15);
+ //tp[0] = std::max(tp[0], -0.30);
+ //tp[1] = std::max(tp[1], -0.6);
+ //tp[1] = std::min(tp[1], 0.10);
+ //tp[2] = std::max(tp[2], 0.0); tp[2] = std::min(tp[2], 0.6);
+
+ //tp[0] = -0.3;
+ yarp::sig::Vector od(4);
+ yarp::sig::Matrix handor(3, 3); handor.zero();
+ handor(0, 0) = -1.0; //hand x axis = - robot x axis (point forward)
+ handor(2, 1) = -1.0;
+ handor(1, 2) = 1.0;
+
+ od = dcm2axis(handor);
+
+ //arm->goToPose(tp,od);
+ arm->goToPosition(tp);
+ arm_target_position = tp;
+ //yInfo() << "Arm Controlled";
+
+ return true;
+
+}
+
+bool vTrackToRobotModule::controlVelocity(yarp::sig::Vector ltarget,
+ yarp::sig::Vector rtarget)
+{
+ static double trecord = yarp::os::Time::now();
+ double dt = yarp::os::Time::now() - trecord;
+ trecord += dt;
+
+// //check target is present
+// if(!rtarget[3] || !ltarget[3]) {
+// velocityController.controlReset();
+// return false;
+// }
+
+// bool sameY = std::abs(rtarget[1] - ltarget[1]) < yThresh;
+// bool sameR = std::abs(rtarget[2] - ltarget[2]) < rThresh;
+// if(!sameY || !sameR) {
+// velocityController.controlReset();
+// return false;
+// }
+
+// velocityController.controlStereo(ltarget[0], ltarget[1], rtarget[0],
+// rtarget[1], dt);
+
+
+
+ if(ltarget[3]) {
+ bool sameY = std::abs(rtarget[1] - ltarget[1]) < yThresh;
+ bool sameR = std::abs(rtarget[2] - ltarget[2]) < rThresh;
+ if(rtarget[3] && sameY && sameR)
+ velocityController.controlStereo(ltarget[0], ltarget[1], rtarget[0],
+ rtarget[1], dt);
+ else
+ velocityController.controlMono(ltarget[0], ltarget[1], dt);
+ } else if(rtarget[3])
+ velocityController.controlMono(rtarget[0], rtarget[1], dt);
+ else {
+ velocityController.controlReset();
+ return false;
+ }
+
+ return true;
+}
+
+bool vTrackToRobotModule::controlExternal(yarp::sig::Vector ltarget,
+ yarp::sig::Vector rtarget)
+{
+
+ //check target is present
+ if(!rtarget[3] || !ltarget[3])
+ return false;
+
+ //do our stereo target check
+ if(std::abs(rtarget[1] - ltarget[1]) > yThresh) {
+ //yWarning() << "Y values not consistent for target";
+ return false;
+ }
+ if(std::abs(rtarget[2] - ltarget[2]) > rThresh) {
+ //yWarning() << "Radius not consistent for target";
+ return false;
+ }
+
+ if(!gazingActive || !gazedriver.isValid()) {
+ //yInfo() << "Gaze valid (gazing blocked)";
+ return false;
+ }
+
+ if(!armdriver.isValid())
+ return false;
+
+ yarp::sig::Vector pleft = ltarget.subVector(0, 1);
+ yarp::sig::Vector pright = rtarget.subVector(0, 1);
+
+ yarp::sig::Vector xrobref, xeye, oeye;
+ gazecontrol->triangulate3DPoint(pleft, pright, xrobref);
+
+ //target too close to body?
+ if(xrobref[0] > -0.10) {
+ return false;
+ }
+
+ gazecontrol->getLeftEyePose(xeye,oeye); //in robot ref frame
+
+ //from eye -> torso
+ yarp::sig::Matrix T=yarp::math::axis2dcm(oeye);
+ T(0,3)=xeye[0];
+ T(1,3)=xeye[1];
+ T(2,3)=xeye[2];
+
+ //from torso -> eye
+ yarp::sig::Matrix Ti = yarp::math::SE3inv(T);
+
+ //this was the target in robot reference frame
+ yarp::sig::Vector fp(4);
+ fp[0]=xrobref[0];
+ fp[1]=xrobref[1];
+ fp[2]=xrobref[2];
+ fp[3]=1.0;
+
+ //convert point in robrf -> eyerf
+ yarp::sig::Vector tp=Ti*fp;
+
+ //send eye ref frame coordinates
+ if(cartOutPort.getOutputCount()) {
+ yarp::os::Bottle &cartcoords = cartOutPort.prepare();
+ cartcoords.clear();
+ // //add the XYZ position
+ cartcoords.addDouble(tp[0]); cartcoords.addDouble(tp[1]); cartcoords.addDouble(tp[2]);
+ //cartcoords.add(-1.0); cartcoords.add(0.0); cartcoords.add(-0.3);
+ // //add some buffer ints
+ cartcoords.addDouble(0.5); cartcoords.addDouble(pleft[0]); cartcoords.addDouble(pleft[1]);
+ // //flag that the object is detected
+ cartcoords.addDouble(1.0);
+
+ cartOutPort.write();
+ }
+
+ return true;
+
+}
+
+bool vTrackToRobotModule::updateModule()
+{
+
+ //if we haven't gazed for some time reset robot position
+ static double htimeout = yarp::os::Time::now();
+ if(yarp::os::Time::now() - htimeout > 5.0) {
+ if(armdriver.isValid()) {
+ arm->goToPose(armhomepos, armhomerot);
+ }
+ yarp::sig::Vector homefix(3);
+ homefix[0] = -10; homefix[1] = 0; homefix[2] = 0.3;
+ //gazecontrol->lookAtFixationPoint(homefix);
+ htimeout = yarp::os::Time::now();
+ }
+
+ //get the targets from the input ports
+ yarp::sig::Vector leftTarget, rightTarget;
+ Vector *dataIn = inputPort.read();
+
+ //yInfo() << dataIn->toString();
+
+ leftTarget = dataIn->subVector(0, 2);
+ leftTarget.push_back(!(leftTarget[0] == -1.0));
+ rightTarget = dataIn->subVector(3, 5);
+ rightTarget.push_back(!(rightTarget[0] == -1.0));
+
+ //perform the type of control as specified
+ bool gazePerformed = false;
+ if(useDemoRedBall) {
+ controlExternal(leftTarget, rightTarget);
+ gazePerformed = true; //let the external function do all control
+ } else {
+ if(velocityControl) {
+ if(velocityController.setVelocityControl())
+ gazePerformed = controlVelocity(leftTarget, rightTarget);
+ } else
+ gazePerformed = controlCartesian(leftTarget, rightTarget);
+ if(usearm)
+ controlArm(leftTarget, rightTarget);
+ }
+
+ //send out a debug if needed
+ if(debugOutPort.getOutputCount()) {
+ yarp::sig::Vector values(6);
+ yarp::sig::Vector pleft = leftTarget.subVector(0, 1);
+ yarp::sig::Vector pright = rightTarget.subVector(0, 1);
+ yarp::sig::Vector xrobref;
+ yarp::sig::Vector fixation;
+ gazecontrol->triangulate3DPoint(pleft, pright, xrobref);
+ gazecontrol->getFixationPoint(fixation);
+ values[0] = xrobref[0]; values[1] = xrobref[1]; values[2] = xrobref[2];
+ values[3] = fixation[0]; values[4] = fixation[1]; values[5] = fixation[2];
+ //values[0] = arm_target_position[0]; values[1] = arm_target_position[1]; values[2] = arm_target_position[2];
+ if(armdriver.isValid()) {
+ yarp::sig::Vector x_arm, o_arm;
+ arm->getPose(x_arm, o_arm);
+ values[3] = x_arm[0];
+ values[4] = x_arm[1];
+ values[5] = x_arm[2];
+ }
+ debugOutPort.prepare() = values;
+ debugOutPort.write();
+ }
+
+ //reset the timeout if needed
+ if(gazePerformed)
+ htimeout = yarp::os::Time::now();
+
+
+// static double pyt = Time::now();
+// double cyt = Time::now();
+// yInfo() << "thread time = " << (cyt - pyt)*1000 << "ms";
+// pyt = cyt;
+
+ return !isStopping();
+}
+
+double vTrackToRobotModule::getPeriod()
+{
+ return period;
+}
+
+bool vTrackToRobotModule::respond(const yarp::os::Bottle &command,
+ yarp::os::Bottle &reply)
+{
+ reply.clear();
+
+ if(command.get(0).asString() == "start") {
+ reply.addString("starting");
+ gazingActive = true;
+ } else if(command.get(0).asString() == "stop") {
+ reply.addString("stopping");
+ gazingActive = false;
+ } else {
+ return false;
+ }
+
+ return true;
+
+
+}
+
+bool vTrackToRobotModule::interruptModule()
+{
+
+ if(armdriver.isValid()) {
+ arm->goToPoseSync(armhomepos, armhomerot);
+ arm->waitMotionDone(1.0, 4.0);
+ }
+
+ inputPort.interrupt();
+ return yarp::os::RFModule::interruptModule();
+}
+
+bool vTrackToRobotModule::close()
+{
+
+ if(gazedriver.isValid()) {
+ gazecontrol->stopControl();
+ gazedriver.close();
+ }
+
+ if(armdriver.isValid()) {
+ arm->stopControl();
+ arm->restoreContext(startup_context_id);
+ armdriver.close();
+ }
+
+
+ inputPort.close();
+ return yarp::os::RFModule::close();
+}
+
diff --git a/pf-ball-demo/vTrackToRobot.h b/pf-ball-demo/vTrackToRobot.h
new file mode 100644
index 0000000..ba5b98b
--- /dev/null
+++ b/pf-ball-demo/vTrackToRobot.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2017 Event-driven Perception for Robotics
+ * Author: arren.glover@iit.it
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+// \defgroup RobotIO RobotIO
+// \defgroup vTrackToRobot vTrackToRobot
+// \ingroup RobotIO
+// \brief perform gaze control given cluster track events
+
+#ifndef __ICUB_VTRACKTOROBOT_H__
+#define __ICUB_VTRACKTOROBOT_H__
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+using namespace ev;
+
+/*//////////////////////////////////////////////////////////////////////////////
+ VELOCITY CONTROL (WITHOUT GAZECONTROLLER)
+ ////////////////////////////////////////////////////////////////////////////*/
+//PID by Ugo Pattacini
+class PID
+{
+ double Kp,Ki;
+ double integral;
+
+public:
+ // constructor
+ PID() : Kp(0.0), Ki(0.0), integral(0.0) { }
+
+ // helper function to set up sample time and gains
+ void set(const double Kp, const double Ki)
+ {
+ this->Kp=Kp;
+ this->Ki=Ki;
+ }
+
+ // compute the control command
+ double command(const double reference, const double feedback, const double Ts)
+ {
+ // the actual error between reference and feedback
+ double error=reference-feedback;
+
+ // accumulate the error
+ integral+=error*Ts;
+
+ // compute the PID output
+ return (Kp*error+Ki*integral);
+ }
+
+ void reset()
+ {
+ integral = 0;
+ }
+};
+
+class eyeControlPID
+{
+
+protected:
+
+ yarp::dev::PolyDriver driver;
+ yarp::dev::IEncoders *ienc;
+ yarp::dev::IVelocityControl *ivel;
+
+ int nAxes;
+ std::vector controllers;
+ std::vector velocity;
+ std::vector encs;
+
+ int u_fixation;
+ int v_fixation;
+
+public:
+
+ eyeControlPID() : nAxes(6), u_fixation(0), v_fixation(0) {}
+
+ bool initialise(int height, int width)
+ {
+ u_fixation = width / 2;
+ v_fixation = height / 2;
+
+ yarp::os::Property option;
+ option.put("device","remote_controlboard");
+ option.put("remote","/icub/head");
+ option.put("local","/controller");
+
+ if (!driver.open(option))
+ {
+ yError()<<"Unable to open the device driver";
+ return false;
+ }
+
+ // open the views
+
+ if(!driver.view(ienc)) {
+ yError() << "Driver does not implement encoder mode";
+ return false;
+ }
+ if(!driver.view(ivel)) {
+ yError() << "Driver does not implement velocity mode";
+ return false;
+ }
+
+ // retrieve number of axes
+ int readAxes;
+ ienc->getAxes(&readAxes);
+ if(readAxes != nAxes) {
+ yError() << "Incorrect number of axes" << readAxes << nAxes;
+ return false;
+ }
+
+ velocity.resize(nAxes);
+ encs.resize(nAxes);
+ controllers.resize(nAxes);
+ for(int i = 0; i < nAxes; i++)
+ controllers[i] = new PID;
+
+ // set up our controllers
+ controllers[0]->set(2.0, 0.0);
+ controllers[1]->set(0.0, 0.0);
+ controllers[2]->set(2.0, 0.0);
+ controllers[3]->set(2.5, 0.0);
+ controllers[4]->set(2.5, 0.0);
+ controllers[5]->set(1.0, 0.0);
+
+ //set velocity control mode
+ return setVelocityControl();
+
+ }
+
+ bool setVelocityControl()
+ {
+ yarp::dev::IControlMode *imod;
+ if(!driver.view(imod)) {
+ yError() << "Driver does not implement control mode";
+ return false;
+ }
+
+ std::vector modes(nAxes, VOCAB_CM_VELOCITY);
+ if(!imod->setControlModes(modes.data())) {
+ yError() << "Could not set velocity control mode";
+ return false;
+ }
+ return true;
+ }
+
+ bool resetToPositionControl()
+ {
+ yarp::dev::IControlMode *imod;
+ if(!driver.view(imod)) {
+ yError() << "Driver does not implement control mode";
+ return false;
+ }
+
+ std::vector modes(nAxes, VOCAB_CM_POSITION);
+ if(!imod->setControlModes(modes.data())) {
+ yError() << "Could not set velocity control mode";
+ return false;
+ }
+ return true;
+ }
+
+ void controlMono(int u, int v, double dt)
+ {
+ ienc->getEncoders(encs.data());
+
+ double eyes_pan=-controllers[4]->command(u_fixation, u, dt);
+ double eyes_tilt=controllers[3]->command(v_fixation, v, dt);
+ if(encs[3] < -6 && (v_fixation - v) < 0) {
+ eyes_tilt = 0.0;
+ controllers[3]->reset();
+ }
+ if(encs[3] > 20 && (v_fixation - v) > 0) {
+ eyes_tilt = 0.0;
+ controllers[3]->reset();
+ }
+ controllers[2]->reset();
+ double eyes_ver=-controllers[5]->command(0, 0, dt);
+
+ double neck_tilt=-controllers[0]->command(0.0,encs[3], dt);
+ double neck_pan=controllers[2]->command(0.0,encs[4], dt);
+
+ // send commands to the robot head
+ velocity[0]=neck_tilt; // neck pitch
+ velocity[1]=0.0; // neck roll
+ velocity[2]=neck_pan; // neck yaw
+ velocity[3]=eyes_tilt; // eyes tilt
+ velocity[4]=eyes_pan; // eyes pan
+ velocity[5]=eyes_ver; // eyes vergence
+ ivel->velocityMove(velocity.data());
+ }
+
+ void controlStereo(int ul, int vl, int ur, int vr, double dt)
+ {
+ ienc->getEncoders(encs.data());
+
+ double eyes_pan=-controllers[4]->command(u_fixation, ul, dt);
+ double eyes_tilt=controllers[3]->command(v_fixation, vl, dt);
+ if(encs[3] < -6 && (v_fixation - vl) < 0) {
+ eyes_tilt = 0.0;
+ controllers[3]->reset();
+ }
+ if(encs[3] > 20 && (v_fixation - vl) > 0) {
+ eyes_tilt = 0.0;
+ controllers[3]->reset();
+ }
+ double eyes_ver=-controllers[5]->command(0,ul-ur, dt);
+
+ // feed-forward correction to reduce
+ // interplay between vergence and pan
+ eyes_pan-=eyes_ver/2.0;
+
+ double neck_tilt=-controllers[0]->command(0.0,encs[3], dt);
+ double neck_pan=controllers[2]->command(0.0,encs[4], dt);
+
+ // send commands to the robot head
+ velocity[0]=neck_tilt; // neck pitch
+ velocity[1]=0.0; // neck roll
+ velocity[2]=neck_pan; // neck yaw
+ velocity[3]=eyes_tilt; // eyes tilt
+ velocity[4]=eyes_pan; // eyes pan
+ velocity[5]=eyes_ver; // eyes vergence
+ ivel->velocityMove(velocity.data());
+ }
+
+ void controlReset()
+ {
+ for(int i = 0; i < nAxes; i++) {
+ controllers[i]->reset();
+ velocity[i] = 0;
+ }
+ ivel->velocityMove(velocity.data());
+ }
+
+};
+
+
+/*//////////////////////////////////////////////////////////////////////////////
+ MODULE
+ ////////////////////////////////////////////////////////////////////////////*/
+
+class vTrackToRobotModule : public yarp::os::RFModule
+{
+private:
+
+ ev::resolution res;
+ //the event bottle input and output handler
+ //positionReader inputPort;
+ yarp::os::BufferedPort inputPort;
+ yarp::os::BufferedPort cartOutPort;
+ yarp::os::BufferedPort debugOutPort;
+ yarp::sig::Vector arm_target_position;
+
+ //the remote procedure port
+ yarp::os::RpcServer rpcPort;
+
+ yarp::dev::PolyDriver gazedriver;
+ yarp::dev::IGazeControl *gazecontrol;
+
+ eyeControlPID velocityController;
+
+ yarp::sig::Vector armhomepos, armhomerot;
+ yarp::sig::Vector headhomepos, headhomerot;
+ bool usearm;
+ yarp::dev::PolyDriver armdriver;
+ yarp::dev::ICartesianControl *arm;
+ double arm_traj_time;
+ int startup_context_id;
+
+ double yThresh;
+ double rThresh;
+ bool gazingActive;
+ bool useDemoRedBall;
+ bool velocityControl;
+
+ double period;
+
+ bool controlCartesian(yarp::sig::Vector ltarget, yarp::sig::Vector rtarget);
+ bool controlArm(yarp::sig::Vector ltarget, yarp::sig::Vector rtarget);
+ bool controlVelocity(yarp::sig::Vector ltarget, yarp::sig::Vector rtarget);
+ bool controlExternal(yarp::sig::Vector ltarget, yarp::sig::Vector rtarget);
+
+public:
+
+ //the virtual functions that need to be overloaded
+ virtual bool configure(yarp::os::ResourceFinder &rf);
+ virtual bool interruptModule();
+ virtual bool close();
+ virtual double getPeriod();
+ virtual bool updateModule();
+
+ virtual bool respond(const yarp::os::Bottle &command,
+ yarp::os::Bottle &reply);
+
+};
+
+
+#endif
+//empty line to make gcc happy