How can I use gesture module with IplImage in opencv?

How can I use gesture module with IplImage in opencv?

Hi.

I've made IplImage from PXCImage. 

I'd like to put my depth IplImage into PXCImage module.However, your example like gesture_viewer.cpp shows that an image captured from camera enters gesture module and they are tied by asynchronizing.

I know that the input's type of gesture module should be PXCImage.

1.

I'd like to use PXCImage converted from IplImage as the input of gesture module. So, I can't use capture stream.

I only use capture stream for getting a image from camera not for input of any modules.

 I want to know how to put PXCImage converted from IplImage in opencv into gesture module .

2.

I want to know how to change IplImage to PXCImage. I implemented myself. However, I'm not sure of that.

Specifically, I'd like to know functions I need to realize.

Thank you for your time.

ps. Can I get your answer about prior questions I posted? I'm waiting your answer.

Following is the part of my code lines. I'll appreciate it if you look after my anootation.

int wmain(int argc, WCHAR* argv[]) {

...

PXCGesture *gesture=0;
session->CreateImpl(PXCGesture::CUID,(void**)&gesture);

PXCGesture::ProfileInfo pinfo;
gesture->QueryProfile(0,&pinfo);

//UtilCapture capture(&session);           //I don't want to use this!  it is already used for capture.
//capture.LocateStreams(&pinfo.inputs);    //I don't want to use this!   it is already used for capture.

gesture->SetProfile(&pinfo);

PXCSmartPtr<GestureRender> colorRender(new GestureRender(L"Skeleton Viewer"));

...

for (int f=0;f<(int)cmdl.m_nframes;f++) {

...

PXCSmartArray<PXCImage> images(2);
PXCSmartSPArray sp(2);

sts=capture.ReadStreamAsync(images, &sp[0]);
if (sts<PXC_STATUS_NO_ERROR) break;
sp[0]->Synchronize();

PXCImage::ImageData dcolor;
PXCImage::ImageData ddepth;
images[0]->AcquireAccess(PXCImage::ACCESS_READ_WRITE,PXCImage::COLOR_FORMAT_RGB32,&dcolor);
images[1]->AcquireAccess(PXCImage::ACCESS_READ,&ddepth); 

float *uvmap=(float*)ddepth.planes[2];

if (!uvmap) break;

...

PXCAccelerator *accelerator;
session->CreateAccelerator(PXCAccelerator::ACCEL_TYPE_CPU,&accelerator);

PXCImage::ImageInfo info;
memset(&info,0,sizeof(info));
info.height=rgb_d_img->height;
info.width=rgb_d_img->width;
info.format=PXCImage::COLOR_FORMAT_RGB24;
PXCImage::ImageData data;
memset(&data,0,sizeof(data));

data.format=PXCImage::COLOR_FORMAT_RGB24;
data.planes[0]=(unsigned char*)rgb_d_img->imageData;
data.pitches[0]=rgb_d_img->widthStep;
PXCImage **dst_img=0;
accelerator->CreateImage(&info,0,&data,dst_img);

//////////////////////////////////////////////////////////pxcimage to iplimage. end

/////////////////////////////////////////////////////////gesture_viewer_simple. start

// gesture->ProcessImageAsync(images.ReleaseRefs(),sp.ReleaseRef(1));   //It doesn't well. the system stops!
//sp.SynchronizeEx();                                                                           //It doesn't well. the system stops!

bool flag=colorRender->RenderFrame(images[1], gesture, &g_gdata, 0);

3 Beiträge / 0 neu
Letzter Beitrag
Nähere Informationen zur Compiler-Optimierung finden Sie in unserem Optimierungshinweis.

Between webcam queries, you don't have to convert or copy any data. Just create an IplImage structure and set the datapointer accordingly:

//Create the IplImage headers
IplImage* colorimg = cvCreateImageHeader(cvSize(640, 480), 8, 3);
IplImage* depthimg = cvCreateImageHeader(cvSize(320, 240), 16, 1);
IplImage* UVimg = cvCreateImageHeader(cvSize(320, 240), 32, 2);

//Query the webcam
PXCImage *colorImage = mCapture->QueryImage(images, PXCImage::IMAGE_TYPE_COLOR);
PXCImage *depthImage = mCapture->QueryImage(images, PXCImage::IMAGE_TYPE_DEPTH);

//Point the IpImage to the correct data
colorImage->AcquireAccess(PXCImage::ACCESS_READ, &colorImageData)
cvSetData(colorimg, (uchar*)colorImageData.planes[0], colorsize.width*3*sizeof(uchar));
colorImage->ReleaseAccess(&colorImageData);

//Point the IpImage to the correct data
depthImage->AcquireAccess(PXCImage::ACCESS_READ, &depthImageData)
cvSetData(depthimg, (short*)depthImageData.planes[0], depthsize.width*sizeof(short)*3);
cvSetData(UVimg, (float*)depthImageData.planes[2], depthsize.width*2*sizeof(float));
depthImage->ReleaseAccess(&depthImageData);

I want to know how to put my PXCImage into gesture module.

Following is my source code. I let you look after a line, colorRender->RenderFrame(..,..,..,..).

I've put my PXCImage like hihi into first input of colorRender->RenderFrame.

But, it doesn't work well. 

I thought that first input is only captured Image due to synchronizing. Is it right?

Anyway, I want to change PXCImage, which put into input of colorRender->RenderFrame(..,,.,..).

I just add my code lines to gesture_viewer.sln

/*******************************************************************************

INTEL CORPORATION PROPRIETARY INFORMATION
This software is supplied under the terms of a license agreement or nondisclosure
agreement with Intel Corporation and may not be copied or disclosed except in
accordance with the terms of that agreement
Copyright(c) 2011-2012 Intel Corporation. All Rights Reserved.

*******************************************************************************/
#include "stdafx.h"
#include "Labeling.h"
#include "util_pipeline.h"

#include <gesture_render.h>

PXCGesture::Gesture g_gdata;
bool g_disconnected=false;
int g_nframes;

class GestureHandler:public PXCGesture::Gesture::Handler {
public:
virtual void PXCAPI OnGesture(PXCGesture::Gesture *data) {
if ((data->label&PXCGesture::Gesture::LABEL_MASK_SET)<PXCGesture::Gesture::LABEL_SET_CUSTOMIZED) {
switch (data->label) {
case PXCGesture::Gesture::LABEL_HAND_WAVE: wprintf_s(L"[Gesture] WAVE"); break;
case PXCGesture::Gesture::LABEL_NAV_SWIPE_LEFT: wprintf_s(L"[Gesture] SWIPE_LEFT"); break;
case PXCGesture::Gesture::LABEL_NAV_SWIPE_RIGHT: wprintf_s(L"[Gesture] SWIPE_RIGHT"); break;
case PXCGesture::Gesture::LABEL_POSE_PEACE: wprintf_s(L"[Gesture] PEACE %s", data->active?L"ON":L"OFF"); break;
case PXCGesture::Gesture::LABEL_POSE_THUMB_DOWN: wprintf_s(L"[Gesture] THUMB_DOWN %s", data->active?L"ON":L"OFF"); break;
case PXCGesture::Gesture::LABEL_POSE_THUMB_UP: wprintf_s(L"[Gesture] THUMB_UP %s", data->active?L"ON":L"OFF"); break;
}
wprintf_s(L" time=%I64d frame=%d\n", data->timeStamp, g_nframes);
}
if (data->active) g_gdata=(*data);
}
};

class AlertHandler:public PXCGesture::Alert::Handler {
public:
virtual void PXCAPI OnAlert(PXCGesture::Alert *alert) {
pxcCHAR fov[8]=L"";
if (alert->label&PXCGesture::Alert::LABEL_FOV_LEFT) wcscat_s<8>(fov,L"L");
if (alert->label&PXCGesture::Alert::LABEL_FOV_RIGHT) wcscat_s<8>(fov,L"R");
if (alert->label&PXCGesture::Alert::LABEL_FOV_TOP) wcscat_s<8>(fov,L"T");
if (alert->label&PXCGesture::Alert::LABEL_FOV_BOTTOM) wcscat_s<8>(fov,L"B");
if (alert->label&PXCGesture::Alert::LABEL_FOV_BLOCKED) wcscat_s<8>(fov,L"X");
if (alert->label&PXCGesture::Alert::LABEL_FOV_OK) wcscpy_s<8>(fov,L"OK");
if (fov[0]) wprintf_s(L"[Alert] FOV: %s, body=%d, time=%I64d frame=%d\n",fov,alert->body, alert->timeStamp,g_nframes);
if (alert->label&PXCGesture::Alert::LABEL_GEONODE_ACTIVE) wprintf_s(L"[Alert] GeoNode FOUND, body=%d, time=%I64d frame=%d\n",alert->body, alert->timeStamp,g_nframes);
if (alert->label&PXCGesture::Alert::LABEL_GEONODE_INACTIVE) wprintf_s(L"[Alert] GeoNode LOST, body=%d, time=%I64d frame=%d\n",alert->body, alert->timeStamp,g_nframes);
}
};

static bool IsCameraDisconnected(pxcStatus sts) {
if (sts==PXC_STATUS_DEVICE_LOST) {
if (!g_disconnected) wprintf_s(L"Camera disconnected\n");
g_disconnected=true;
Sleep(5);
return true;
}
if (sts>=PXC_STATUS_NO_ERROR) {
if (g_disconnected) wprintf_s(L"Camera reconnected\n");
g_disconnected=false;
}
return false;
}

void haH_Threshold(IplImage* t_image)
{
// t_image=cvCreateImage( cvGetSize(t_image), IPL_DEPTH_8U, 1);
unsigned int height=t_image->height;
unsigned int width=t_image->width;
unsigned int step=t_image->widthStep;
//printf("height=%d",height);
//printf("width=%d",width);
//printf("step=%d",step);
// unsigned int ccc=t_image->nChannels;
// printf("ccc=%d",ccc);

for(int y=0; y<height; y++)
{
for(int x=0; x<width; x++)
{

if ( (unsigned char)(t_image->imageData[y*width+x] > 30 ) ) //&& (t_image->imageData[y*width+x] < 140 ) )
{
t_image->imageData[y*width+x] = 255;
// t_image->imageData[y*width+x+1] = 255;
// t_image->imageData[y*width+x+2] = 255;
}
else
{
t_image->imageData[y*width+x] = 0;
// t_image->imageData[y*width+x+1] = 0;
// t_image->imageData[y*width+x+2] = 0;
}

}
}
}

void haS_Threshold(IplImage* p_image)
{
// t_image=cvCreateImage( cvGetSize(t_image), IPL_DEPTH_8U, 1);
unsigned int height=p_image->height;
unsigned int width=p_image->width;
unsigned int step=p_image->widthStep;
//printf("height=%d",height);
//printf("width=%d",width);
//printf("step=%d",step);
// unsigned int ccc=t_image->nChannels;
// printf("ccc=%d",ccc);

for(int y=0; y<height; y++)
{
for(int x=0; x<width; x++)
{

if ( (unsigned char)(p_image->imageData[y*width+x] < 40 ) ) //&& (t_image->imageData[y*width+x] < 140 ) )
{
p_image->imageData[y*width+x] = 255;
// t_image->imageData[y*width+x+1] = 255;
// t_image->imageData[y*width+x+2] = 255;
}
else
{
p_image->imageData[y*width+x] = 0;
// t_image->imageData[y*width+x+1] = 0;
// t_image->imageData[y*width+x+2] = 0;
}

}
}
}

void haAND(IplImage* t_image1, IplImage* t_image2, IplImage* t_image3)
{
for(int y=0; y<t_image3->height; y++)
{
for(int x=0; x<t_image3->width; x++)
{
if( (((uchar*)(t_image1->imageData + t_image1->widthStep*y))[x] == 255 ) &&
(((uchar*)(t_image2->imageData + t_image2->widthStep*y))[x] == 255 )
)

((uchar*)(t_image3->imageData + t_image3->widthStep*y))[x] = 255;
else
((uchar*)(t_image3->imageData + t_image3->widthStep*y))[x] = 0;
}
}
}

void uvmap_gy(IplImage *depth_img,IplImage *u_n_v,float * uvmap,IplImage *rgb_d_img)
{

CvSize size_rgb;
size_rgb.height=480;
size_rgb.width=640;

IplImage *u_n_v_3c=cvCreateImage(size_rgb,8,3);
cvCvtColor(u_n_v, u_n_v_3c, CV_GRAY2BGR);
cvCopy(u_n_v_3c,rgb_d_img);

/*
IplImage *depth_img_resize=cvCreateImage(size_rgb,8,1);
cvResize(depth_img,depth_img_resize,CV_INTER_CUBIC);

IplImage *depth_img_3c=cvCreateImage(size_rgb,8,3);
cvCvtColor(depth_img_resize,depth_img_3c,CV_GRAY2BGR);

cvMul(u_n_v_3c,depth_img_3c,rgb_d_img);
*/

for (int y=0;y<240;y++) {
for (int x=0;x<320;x++) {
int xx=(int)(uvmap[(y*320+x)*2+0]*640+0.5);
int yy=(int)(uvmap[(y*320+x)*2+1]*480+0.5);
if (xx>=0 && xx<(int)640 && yy>=0 && yy<(int)480)
{
if(depth_img->imageData[y*320+x]!=0)
{
rgb_d_img->imageData[yy*640*3+xx*3]=180 ; //((pxcU32 *)dcolor.planes[0])[yy*pcolor.imageInfo.width+xx]|=0x000000FF; // (..)(R)(G)(B) 순서임
rgb_d_img->imageData[yy*640*3+xx*3+1]=180;
rgb_d_img->imageData[yy*640*3+xx*3+2]=0;
}
}
}
}

/*
for (int y=0; y<480; y++){
for (int x=0; x<640;x++){
if(rgb_d_img->imageData[y*640*3+x*3]!=255)
{
rgb_d_img->imageData[y*640*3+x*3+0]=0;
rgb_d_img->imageData[y*640*3+x*3+1]=0;
rgb_d_img->imageData[y*640*3+x*3+2]=0;

}

}
}
*/

}

int wmain(int argc, WCHAR* argv[]) {
// Create session
PXCSmartPtr<PXCSession> session;
pxcStatus sts=PXCSession_Create(&session);
if (sts<PXC_STATUS_NO_ERROR) {
wprintf_s(L"Failed to create the SDK session\n");
return 1;
}

UtilCmdLine cmdline(session);
if (!cmdline.Parse(L"-nframes-iuid-file-record",argc,argv)) return 3; //여기까지.. 공통으로 하는 부분인듯.

// Gesture Module
PXCSmartPtr<PXCGesture> gestureDetector;
sts = session->CreateImpl(cmdline.m_iuid, PXCGesture::CUID, (void**)&gestureDetector);
if (sts<PXC_STATUS_NO_ERROR) {
wprintf_s(L"Failed to load any gesture recognition module\n");
return 3;
}

UtilCaptureFile capture(session, cmdline.m_recordedFile, cmdline.m_bRecord); //이것도 공통으로 하는 부분
PXCGesture::ProfileInfo gesture_profile;

/* If the RGB display is a must, let's force RGB, just in case the gesture module does not request RGB */

PXCCapture::VideoStream::DataDesc desc;
memset(&desc,0,sizeof(desc));
desc.streams[0].format=PXCImage::COLOR_FORMAT_RGB32;
desc.streams[0].sizeMin.width=640;
desc.streams[0].sizeMin.height=480;

///mine start
desc.streams[1].format=PXCImage::COLOR_FORMAT_DEPTH;
desc.streams[1].sizeMin.width=320;
desc.streams[1].sizeMin.height=240;
//mine end

for (int i=0;;i++) {
sts=gestureDetector->QueryProfile(i,&gesture_profile);
if (sts<PXC_STATUS_NO_ERROR) break;
std::vector<PXCCapture::VideoStream::DataDesc*> minputs;
minputs.push_back(&gesture_profile.inputs);
minputs.push_back(&desc);
sts=capture.LocateStreams(minputs);
if (sts>=PXC_STATUS_NO_ERROR) break;
}

/* if the RGB display is optional */
/*
for (int i=0;;i++) {
sts=gestureDetector->QueryProfile(i,&gesture_profile);
if (sts<PXC_STATUS_NO_ERROR) break;
sts=capture.LocateStreams(&gesture_profile.inputs);
if (sts>=PXC_STATUS_NO_ERROR) break;
}

*/

if (sts<PXC_STATUS_NO_ERROR) {
wprintf_s(L"Failed to locate a capture module\n");
return 3;
}

gesture_profile.activationDistance = 75; //configuration 하는거임
gestureDetector->SetProfile(&gesture_profile); // 이것도 configuration

PXCSmartPtr<GestureHandler> ghandler(new GestureHandler);
sts=gestureDetector->SubscribeGesture(100,ghandler); // (threshold,*handler)

PXCSmartPtr<AlertHandler> ahandler(new AlertHandler);
sts=gestureDetector->SubscribeAlert(ahandler);

// Create a capture device
PXCSmartPtr<GestureRender> colorRender(new GestureRender(L"Skeleton Viewer"));//PXCSmartPtr<GestureRender>를 colorRender로 선언
PXCSmartPtr<UtilRender> blobRender(gesture_profile.blobs?new GestureRender(L"Blob Viewer"):0); //이 두줄이 윈도우 창을 만드는 부분인가보군

///////////////////////////////////////////////opencv configuration start
CvSize size_rgb;
size_rgb.height=480;
size_rgb.width=640;
//img_rgb=cvCreateImage(size_rgb,8,3);

//IplImage *img_depth=0;
CvSize size_depth;
size_depth.height=240;
size_depth.width=320;
//img_depth=cvCreateImage(size_depth,8,1);

unsigned short *depth_data;
unsigned int *rgb_data;
//////////////////////////////////////////////opencv configuration end

for (g_nframes=0;g_nframes<(int)cmdline.m_nframes; g_nframes++) {
PXCSmartArray<PXCImage> images; // 여기에 이미지에 대한 정보가 들어있음

PXCSmartSPArray sps(2);
pxcStatus sts=capture.ReadStreamAsync(images,&sps[0]);
if (IsCameraDisconnected(sts)) continue; // device disconnected
if (sts<PXC_STATUS_NO_ERROR) break; // EOF

PXCCapture::VideoStream::Images images2; // PXCCapture::VideoStream::Images 를 images2로 선언 한거.
capture.MapImages(0,images,images2); // map images to the gesture module's view
sts=gestureDetector->ProcessImageAsync(images2,&sps[1]);

if (sts<PXC_STATUS_NO_ERROR && sts!=PXC_STATUS_EXEC_ABORTED) break; // error in the gesture module
sps.SynchronizeEx();
sts=sps[0]->Synchronize(0);
if (IsCameraDisconnected(sts)) continue; // device disconnected
if (sts<PXC_STATUS_NO_ERROR) break; // EOF

PXCImage *rgbImage=capture.QueryImage(images,PXCImage::IMAGE_TYPE_COLOR); //한 프레임당 수행 된다
PXCImage *depthImage=capture.QueryImage(images,PXCImage::IMAGE_TYPE_DEPTH);

//mine start
PXCImage::ImageData dcolor;
PXCImage::ImageData ddepth;
rgbImage->AcquireAccess(PXCImage::ACCESS_READ,PXCImage::COLOR_FORMAT_RGB32,&dcolor);
depthImage->AcquireAccess(PXCImage::ACCESS_READ,PXCImage::COLOR_FORMAT_DEPTH,&ddepth);

float *uvmap=(float*)ddepth.planes[2];
if (!uvmap) break;

rgb_data=(unsigned int *)dcolor.planes[0];
IplImage* rgb_img=cvCreateImage(size_rgb,8,3);
unsigned char temp_r;
unsigned char temp_g;
unsigned char temp_b;

for(int y=0; y<480; y++)
{
for(int x=0; x<640; x++)
{
//for(int k=0; k<3 ; k++)
//{
temp_r=rgb_data[y*640+x];
temp_g=((rgb_data[y*640+x])>>8);
temp_b=((rgb_data[y*640+x])>>16);

//rgb_img->imageData[y*640*3+x*3+k]=rgb_data[y*640*3+x*3+k];
rgb_img->imageData[y*640*3+x*3]=temp_r;
rgb_img->imageData[y*640*3+x*3+1]=temp_g;
rgb_img->imageData[y*640*3+x*3+2]=temp_b; //왜 이거랑 순서가 다르지? ((pxcU32 *)dcolor.planes[0])[yy*pcolor.imageInfo.width+xx]|=0x000000FF; // (..)(R)(G)(B) 순서임
//}
}
}

depth_data=(unsigned short*)ddepth.planes[0];
IplImage* depth_img=cvCreateImage(size_depth,8,1);
unsigned char temp_d=0;

cv::Mat depthImage_size(240/*DepthSize.height*/, 320/*DepthSize.width*/,CV_16SC1, ddepth.planes[0], ddepth.pitches[0]);

double m,M;

//double m=30.0;
//double M=700.0;

cv::minMaxIdx(depthImage_size, &m, &M, 0, 0, depthImage_size < 32000);
cv::Mat dstImage(depthImage_size.size(), CV_8UC1);
depthImage_size.convertTo(dstImage, CV_8UC1, 255/(M-m), 1.0*(-m)/(M-m));
dstImage =255 - dstImage;

depth_img=&IplImage(dstImage);

/*
IplImage* hehe=cvCreateImage(size_depth,8,3);
cvCvtColor(depth_img, hehe, CV_GRAY2BGR);
cvDrawRect(hehe, cvPoint(100, 50), cvPoint(100+100,50+100), CV_RGB(0, 128, 0));
unsigned char temp0=(unsigned char)hehe->imageData[51*320*3+101*3+0];//((hehe->imageData[(50+5)*320*3+100*3])+(hehe->imageData[(50+5)*320*3+100*3+1])+(hehe->imageData[(50+5)*320*3+100*3+2]))/3;
unsigned char temp1=(unsigned char)hehe->imageData[51*320*3+101*3+1];
unsigned char temp2=(unsigned char)hehe->imageData[51*320*3+101*3+2];
unsigned char temppp=(temp0+temp1+temp2)/3;

cvShowImage("hehe",hehe);
cvWaitKey(1);
cvReleaseImage(&hehe);
*/

IplImage* yuv=cvCreateImage(size_rgb, IPL_DEPTH_8U, 3);
IplImage* yyy=cvCreateImage(size_rgb, IPL_DEPTH_8U, 1);
IplImage* uuu=cvCreateImage(size_rgb, IPL_DEPTH_8U, 1);
IplImage* vvv=cvCreateImage(size_rgb, IPL_DEPTH_8U, 1);
IplImage* u_n_v=cvCreateImage(size_rgb, IPL_DEPTH_8U, 1);
IplImage* output=cvCreateImage(size_rgb, IPL_DEPTH_8U, 3);

cvCvtColor(rgb_img, yuv, CV_BGR2YUV);
cvSplit( yuv,0 , uuu, 0, 0 );
cvSplit( yuv, 0, 0, vvv, 0 );

haH_Threshold(uuu);
haS_Threshold(vvv);
haAND(uuu, vvv, u_n_v);

cvErode(u_n_v,u_n_v,0,1);
cvDilate(u_n_v,u_n_v,0,1);

for(int y=0;y<240;y++)
for(int x=0;x<320;x++)
{
if(depth_img->imageData[y*320+x]>40)
depth_img->imageData[y*320+x]=0;

}

cvShowImage("depth_img",depth_img);
cvWaitKey(10);

IplImage *rgb_d_img=cvCreateImage(size_rgb,8,3);
uvmap_gy(depth_img,u_n_v,uvmap,rgb_d_img); //,IplImage *rgb_d_img);

///////////////////////////////////////////////////////////pxcimage to iplimage. start
//IPL_TO_PXC(session,PXCImage::, rgb_d_img);
PXCAccelerator *accelerator;
session->CreateAccelerator(PXCAccelerator::ACCEL_TYPE_CPU,&accelerator);

PXCImage::ImageInfo info;
memset(&info,0,sizeof(info));
info.height=depth_img->height;
info.width=depth_img->width;
info.format=PXCImage::COLOR_FORMAT_DEPTH;
PXCImage::ImageData data;
memset(&data,0,sizeof(data));

data.format=PXCImage::COLOR_FORMAT_DEPTH;
data.planes[0]=(unsigned char*)depth_img->imageData;
data.pitches[0]=depth_img->widthStep;
PXCImage **dst_img=0;
accelerator->CreateImage(&info,0,&data,dst_img);

PXCImage *hihi;
dst_img=&hihi;
//////////////////////////////////////////////////////////pxcimage to iplimage. end
//mine end

/*
cvShowImage("rgb_d_img",rgb_d_img);
cvWaitKey(10);
cvReleaseImage(&rgb_d_img);

cvShowImage("u_n_v",u_n_v);
cvWaitKey(1);
cvReleaseImage(&u_n_v);

cvShowImage("test_rgb",rgb_img);
cvWaitKey(1);
cvReleaseImage(&rgb_img);
*/

bool flag=colorRender->RenderFrame(hihi, gestureDetector, &g_gdata,0); //이게 함수 수행의 메인이군..자세히분석해보자.
if (gesture_profile.blobs) {
PXCSmartPtr<PXCImage> blobImage;
gestureDetector->QueryBlobImage(PXCGesture::Blob::LABEL_SCENE,0,&blobImage);
if (!blobRender->RenderFrame(blobImage)) flag=false;
}
// if (!flag) break;

rgbImage->ReleaseAccess(&dcolor);
depthImage->ReleaseAccess(&ddepth);

}
return 0;
}

Kommentar hinterlassen

Bitte anmelden, um einen Kommentar hinzuzufügen. Sie sind noch nicht Mitglied? Jetzt teilnehmen