1
votes

I use librealsense2 library.

I refer to this site.. https://github.com/IntelRealSense/librealsense/blob/master/examples/align/rs-align.cpp

After mapping depth image to color image with realsense2 library, I want to display the image with opencv Mat(imshow) function.

so i coded as..

#include "librealsense2/rs.hpp"
#include <opencv2/opencv.hpp>



#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>

using namespace std;
using namespace cv;

void remove_background(rs2::video_frame& other, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
float get_depth_scale(rs2::device dev);
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams);
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev);


int main(int args, char * argv[]) try
{
    // Create and initialize GUI related objects

    rs2::colorizer c; 
    rs2::config cfg;
    rs2::pipeline pipe;
    const int width = 1280;
    const int height = 720;

    c.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED, 1.f);
    c.set_option(RS2_OPTION_COLOR_SCHEME, 2.f); // White to Black

    cfg.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_BGR8, 30);
    cfg.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, 30);


    rs2::pipeline_profile profile = pipe.start(cfg);

    float depth_scale = get_depth_scale(profile.get_device()); 


    rs2_stream align_to = find_stream_to_align(profile.get_streams());


    rs2::align align(align_to);

    float depth_clipping_distance = 3.f;

    while (true)
    {

        rs2::frameset frameset = pipe.wait_for_frames();

        if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
        {
            profile = pipe.get_active_profile();
            align_to = find_stream_to_align(profile.get_streams());
            align = rs2::align(align_to);
            depth_scale = get_depth_scale(profile.get_device());
        }

        auto processed = align.process(frameset);

        rs2::video_frame other_frame = processed.first(align_to);
        rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());

        if (!aligned_depth_frame || !other_frame)
        {
            continue;
        }

        remove_background(other_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);



        Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
        Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);


        namedWindow("other window", WINDOW_AUTOSIZE);
        namedWindow("depth window", WINDOW_AUTOSIZE);

        imshow("other window", other_frameaM);
        imshow("depth window", aligned_depthM);
    }

    return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
    std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n    " << e.what() << std::endl;
    return EXIT_FAILURE;
}
catch (const std::exception & e)
{
    std::cerr << e.what() << std::endl;
    return EXIT_FAILURE;
}

float get_depth_scale(rs2::device dev)
{
    // Go over the device's sensors
    for (rs2::sensor& sensor : dev.query_sensors())
    {
        // Check if the sensor if a depth sensor
        if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
        {
            return dpt.get_depth_scale();
        }
    }
    throw std::runtime_error("Device does not have a depth sensor");
}


void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
    const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
    uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));

    int width = other_frame.get_width();
    int height = other_frame.get_height();
    int other_bpp = other_frame.get_bytes_per_pixel();

#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
    for (int y = 0; y < height; y++)
    {
        auto depth_pixel_index = y * width;
        for (int x = 0; x < width; x++, ++depth_pixel_index)
        {
            // Get the depth value of the current pixel
            auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];

            // Check if the depth value is invalid (<=0) or greater than the threashold
            if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
            {
                // Calculate the offset in other frame's buffer to current pixel
                auto offset = depth_pixel_index * other_bpp;

                // Set pixel to "background" color (0x999999)
                std::memset(&p_other_frame[offset], 0x99, other_bpp);
            }
        }
    }
}

rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
    //Given a vector of streams, we try to find a depth stream and another stream to align depth with.
    //We prioritize color streams to make the view look better.
    //If color is not available, we take another stream that (other than depth)
    rs2_stream align_to = RS2_STREAM_ANY;
    bool depth_stream_found = false;
    bool color_stream_found = false;
    for (rs2::stream_profile sp : streams)
    {
        rs2_stream profile_stream = sp.stream_type();
        if (profile_stream != RS2_STREAM_DEPTH)
        {
            if (!color_stream_found)         //Prefer color
                align_to = profile_stream;

            if (profile_stream == RS2_STREAM_COLOR)
            {
                color_stream_found = true;
            }
        }
        else
        {
            depth_stream_found = true;
        }
    }

    if (!depth_stream_found)
        throw std::runtime_error("No Depth stream available");

    if (align_to == RS2_STREAM_ANY)
        throw std::runtime_error("No stream found to align with Depth");

    return align_to;
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
    for (auto&& sp : prev)
    {
        //If previous profile is in current (maybe just added another)
        auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
        if (itr == std::end(current)) //If it previous stream wasn't found in current
        {
            return true;
        }
    }
    return false;
}

There are only gray screens and nothing happens.

 Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);

I guess there are no problem. because the depth image and rgb image were opened well in CV_8UC3 format.

However, when I try to calibrate and then I got it in opencv, the image appears only in gray screen.

auto frames = pipe.wait_for_frames(); // Wait for next set of frames from the camera

rs2::video_frame color = frames.get_color_frame();
rs2::depth_frame depth = color_map(frames.get_depth_frame());

if (!color)
    color = frames.get_infrared_frame();

Mat colorM(Size(width, height), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
Mat depthM(Size(width, height), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);

It is a part of code that output color image and depth image. This works well.

so I guess..

rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());

Whatever the process, I thought it would run because it fetches it in frame format. I think I have a very big mistake on this code side.

Which part is wrong?

enter image description here

2

2 Answers

2
votes

There are several ways to store an image in memory. There is no guarantee that you can just pass the buffer and it'll all work. try to copy pixel by pixel. You should know that OpenCV uses BGR interleaved image format, while realsense might use another.

2
votes

1) Get aligned frames

frameset data = pipe.wait_for_frames();
frameset aligned_set = align_to.process(data);
auto color_mat = frame_to_mat(aligned_set.get_color_frame());
auto depth_mat = frame_to_mat(aligned_set.get_depth_frame());

2) frame_to_mat helper function

cv::Mat frame_to_mat(const rs2::frame& f)
{
    using namespace cv;
    using namespace rs2;

    auto vf = f.as<video_frame>();
    const int w = vf.get_width();
    const int h = vf.get_height();

    if (f.get_profile().format() == RS2_FORMAT_BGR8)
    {
        return Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
    }
    else if (f.get_profile().format() == RS2_FORMAT_RGB8)
    {
        auto r = Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
        cvtColor(r, r, CV_RGB2BGR);
        return r;
    }
    else if (f.get_profile().format() == RS2_FORMAT_Z16)
    {
        return Mat(Size(w, h), CV_16UC1, (void*)f.get_data(), Mat::AUTO_STEP);
    }
    else if (f.get_profile().format() == RS2_FORMAT_Y8)
    {
        return Mat(Size(w, h), CV_8UC1, (void*)f.get_data(), Mat::AUTO_STEP);
    }

    throw std::runtime_error("Frame format is not supported yet!");
}