Commit 9a0e26b6 authored by Ana Tanevska's avatar Ana Tanevska
Browse files

speed trial 3 - gray img

parent 18a25ffd
......@@ -77,7 +77,7 @@ std::vector<std::string> get_arguments()//(int argc, char **argv)
arguments.push_back("/root/openface/build/bin/FeatureExtraction");
arguments.push_back("-mloc");
//arguments.push_back("model/main_clm_general.txt");
arguments.push_back("/root/openface/build/bin/model/main_ceclm_general.txt");
arguments.push_back("/root/openface/build/bin/model/main_clm_general.txt");
/*
// First argument is reserved for the name of the executable
for (int i = 0; i < argc; ++i)
......@@ -374,7 +374,7 @@ bool yarpOpenFaceThread::processing(FaceAnalysis::FaceAnalyser face_analyser, in
{
cv::Mat captured_image = yarp::cv::toCvMat(*inputImage); //already in rgb
if(!captured_image.empty())
{
......@@ -385,7 +385,7 @@ bool yarpOpenFaceThread::processing(FaceAnalysis::FaceAnalyser face_analyser, in
// The actual facial landmark detection / tracking
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(captured_image, face_model, det_parameters, grayscale_image);
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters, grayscale_image);
......@@ -406,7 +406,7 @@ bool yarpOpenFaceThread::processing(FaceAnalysis::FaceAnalyser face_analyser, in
cv::Mat_<double> hog_descriptor; int num_hog_rows = 0, num_hog_cols = 0;
// Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, time_stamp, true); //try both true and false, true is for webca,
face_analyser.AddNextFrame(grayscale_image, face_model.detected_landmarks, face_model.detection_success, time_stamp, true); //try both true and false, true is for webca,
//face_analyser.GetLatestAlignedFace(sim_warped_img);
......@@ -573,19 +573,19 @@ bool yarpOpenFaceThread::processing(FaceAnalysis::FaceAnalyser face_analyser, in
// Displaying the tracking visualizations
// visualizer.SetImage(captured_image, fx, fy, cx, cy);
visualizer.SetImage(captured_image, fx, fy, cx, cy);
// visualizer.SetObservationFaceAlign(sim_warped_img);
visualizer.SetObservationFaceAlign(sim_warped_img);
// visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
// visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.GetVisibilities());
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.GetVisibilities());
// visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
// visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, fx, fy, cx, cy), face_model.detection_certainty);
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, fx, fy, cx, cy), face_model.detection_certainty);
// visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
cv::Mat new_image = visualizer.GetVisImage();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment