Skip to content

Commit

Permalink
Modifications for BAdaCost multi-view detection.
Browse files Browse the repository at this point in the history
  • Loading branch information
jmbuena committed Feb 20, 2018
1 parent e6e563e commit eec6698
Show file tree
Hide file tree
Showing 41 changed files with 3,266 additions and 83 deletions.
19 changes: 18 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,18 @@
http://vision.ucsd.edu/~pdollar/toolbox/doc/
# Multi-class object detection with BAdaCost.

This repo has a modified version of [Piotr Dollar toolbox](http://vision.ucsd.edu/~pdollar/toolbox/doc/) (Matlab and C++ code) to replicate the experiments we made for our Cost-Sensitive Multiclass algorithm paper. If you use this code for your own researh, **you must reference our journal paper**:

**BAdaCost: Multi-class Boosting with Costs.**
Antonio Fernández-Baldera, José M. Buenaposada, and Luis Baumela.
Pattern Recognition, Elsevier. In press, 2018.
[DOI:10.1016/j.patcog.2018.02.022](https://doi.org/10.1016/j.patcog.2018.02.022)


# Replicate paper experiments or simply use our trained classifiers.

Our modifications to P.Dollar toolbox have only been tested on GNU/Linux Matlab. To replicate paper experiments you have to:

* Clone this repo
* Execute toolboxCompile
* Clone the [multi-view car detection scripts repo](https://github.com/jmbuena/toolbox.badacost.kitti.public) and follow instructions there.
* Clone the [multi-view face detection scripts repo](https://github.com/jmbuena/toolbox.badacost.faces.public) and follow instructions there.
2 changes: 1 addition & 1 deletion channels/chnsCompute.m
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
% Compute channel features at a single scale given an input image.
%
% Compute the channel features as described in:
% P. Dollár, Z. Tu, P. Perona and S. Belongie
% P. Doll�r, Z. Tu, P. Perona and S. Belongie
% "Integral Channel Features", BMVC 2009.
% Channel features have proven very effective in sliding window object
% detection, both in terms of *accuracy* and *speed*. Numerous feature
Expand Down
5 changes: 3 additions & 2 deletions channels/chnsPyramid.m
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
% can be used to approximate feature responses at nearby scales. The
% approximation is accurate at least within an entire scale octave. For
% details and to understand why this unexpected result holds, please see:
% P. Dollár, R. Appel, S. Belongie and P. Perona
% P. Doll�r, R. Appel, S. Belongie and P. Perona
% "Fast Feature Pyramids for Object Detection", PAMI 2014.
%
% The parameter "nApprox" determines how many intermediate scales are
Expand Down Expand Up @@ -123,7 +123,8 @@
if(p.nApprox<0), p.nApprox=p.nPerOct-1; end
end
if(nargin==0), pyramid=p; return; end; pPyramid=p;
vs=struct2cell(p); [pChns,nPerOct,nOctUp,nApprox,lambdas,...
vs=struct2cell(p);
[pChns,nPerOct,nOctUp,nApprox,lambdas,...
pad,minDs,smooth,concat,~]=deal(vs{:}); shrink=pChns.shrink;

% convert I to appropriate color space (or simply normalize)
Expand Down
5 changes: 3 additions & 2 deletions channels/private/convConst.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -197,15 +197,16 @@ void convMax( float *I, float *O, int h, int w, int d, int r ) {
// B=convConst(type,A,r,s); fast 2D convolutions (see convTri.m and convBox.m)
#ifdef MATLAB_MEX_FILE
void mexFunction( int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[] ) {
int *ns, ms[3], nDims, d, m, r, s; float *A, *B, p;
int nDims; float *A, *B, p;
mwSize ms[3]; mwSize *ns, d, m, r, s;;
mxClassID id; char type[1024];

// error checking on arguments
if(nrhs!=4) mexErrMsgTxt("Four inputs required.");
if(nlhs > 1) mexErrMsgTxt("One output expected.");
nDims = mxGetNumberOfDimensions(prhs[1]);
id = mxGetClassID(prhs[1]);
ns = (int*) mxGetDimensions(prhs[1]);
ns = (mwSize*) mxGetDimensions(prhs[1]);
d = (nDims == 3) ? ns[2] : 1;
m = (ns[0] < ns[1]) ? ns[0] : ns[1];
if( (nDims!=2 && nDims!=3) || id!=mxSINGLE_CLASS || m<4 )
Expand Down
33 changes: 21 additions & 12 deletions channels/private/gradientMex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <math.h>
#include "string.h"
#include "sse.hpp"
#include <iostream>

#define PI 3.14159265f

Expand Down Expand Up @@ -152,11 +153,11 @@ void gradHist( float *M, float *O, float *H, int h, int w,
float *H0, *H1, *M0, *M1; int x, y; int *O0, *O1; float xb, init;
O0=(int*)alMalloc(h*sizeof(int),16); M0=(float*) alMalloc(h*sizeof(float),16);
O1=(int*)alMalloc(h*sizeof(int),16); M1=(float*) alMalloc(h*sizeof(float),16);

// main loop
for( x=0; x<w0; x++ ) {
// compute target orientation bins for entire column - very fast
gradQuantize(O+x*h,M+x*h,O0,O1,M0,M1,nb,h0,sInv2,nOrients,full,softBin>=0);

if( softBin<0 && softBin%2==0 ) {
// no interpolation w.r.t. either orienation or spatial bin
H1=H+(x/bin)*hb;
Expand All @@ -167,7 +168,6 @@ void gradHist( float *M, float *O, float *H, int h, int w,
else if( bin==4 ) for(y=0; y<h0;) { GH; GH; GH; GH; H1++; }
else for( y=0; y<h0;) { for( int y1=0; y1<bin; y1++ ) { GH; } H1++; }
#undef GH

} else if( softBin%2==0 || bin==1 ) {
// interpolate w.r.t. orientation only, not spatial bin
H1=H+(x/bin)*hb;
Expand All @@ -178,7 +178,6 @@ void gradHist( float *M, float *O, float *H, int h, int w,
else if( bin==4 ) for(y=0; y<h0;) { GH; GH; GH; GH; H1++; }
else for( y=0; y<h0;) { for( int y1=0; y1<bin; y1++ ) { GH; } H1++; }
#undef GH

} else {
// interpolate using trilinear interpolation
float ms[4], xyd, yb, xd, yd; __m128 _m, _m0, _m1;
Expand Down Expand Up @@ -312,14 +311,17 @@ void fhog( float *M, float *O, float *H, int h, int w, int binSize,
hogChannels( H+nbo*0, R1, N, hb, wb, nOrients*2, clip, 1 );
hogChannels( H+nbo*2, R2, N, hb, wb, nOrients*1, clip, 1 );
hogChannels( H+nbo*3, R1, N, hb, wb, nOrients*2, clip, 2 );
wrFree(N); mxFree(R1); wrFree(R2);
// Change from toolbox v3.5
// wrFree(N); mxFree(R1); wrFree(R2);
wrFree(N); wrFree(R1); wrFree(R2);
}

/******************************************************************************/
#ifdef MATLAB_MEX_FILE
// Create [hxwxd] mxArray array, initialize to 0 if c=true
mxArray* mxCreateMatrix3( int h, int w, int d, mxClassID id, bool c, void **I ){
const int dims[3]={h,w,d}, n=h*w*d; int b; mxArray* M;
mxArray* mxCreateMatrix3( mwSize h, mwSize w, mwSize d, mxClassID id, bool c, void **I ){
//const size_t dims[3]={size_t(h),size_t(w),size_t(d)}, n=h*w*d; int b; mxArray* M;
const mwSize dims[3]={h,w,d}, n=h*w*d; int b; mxArray* M;
if( id==mxINT32_CLASS ) b=sizeof(int);
else if( id==mxDOUBLE_CLASS ) b=sizeof(double);
else if( id==mxSINGLE_CLASS ) b=sizeof(float);
Expand All @@ -331,9 +333,10 @@ mxArray* mxCreateMatrix3( int h, int w, int d, mxClassID id, bool c, void **I ){

// Check inputs and outputs to mex, retrieve first input I
void checkArgs( int nl, mxArray *pl[], int nr, const mxArray *pr[], int nl0,
int nl1, int nr0, int nr1, int *h, int *w, int *d, mxClassID id, void **I )
int nl1, int nr0, int nr1, mwSize *h, mwSize *w, mwSize *d, mxClassID id, void **I )
{
const int *dims; int nDims;
// const size_t *dims; int nDims;
const mwSize *dims; int nDims;
if( nl<nl0 || nl>nl1 ) mexErrMsgTxt("Incorrect number of outputs.");
if( nr<nr0 || nr>nr1 ) mexErrMsgTxt("Incorrect number of inputs.");
nDims = mxGetNumberOfDimensions(pr[0]); dims = mxGetDimensions(pr[0]);
Expand All @@ -344,7 +347,8 @@ void checkArgs( int nl, mxArray *pl[], int nr, const mxArray *pr[], int nl0,

// [Gx,Gy] = grad2(I) - see gradient2.m
void mGrad2( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {
int h, w, d; float *I, *Gx, *Gy;
//int h, w, d; float *I, *Gx, *Gy;
mwSize h, w, d; float *I, *Gx, *Gy;
checkArgs(nl,pl,nr,pr,1,2,1,1,&h,&w,&d,mxSINGLE_CLASS,(void**)&I);
if(h<2 || w<2) mexErrMsgTxt("I must be at least 2x2.");
pl[0]= mxCreateMatrix3( h, w, d, mxSINGLE_CLASS, 0, (void**) &Gx );
Expand All @@ -354,7 +358,9 @@ void mGrad2( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {

// [M,O] = gradMag( I, channel, full ) - see gradientMag.m
void mGradMag( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {
int h, w, d, c, full; float *I, *M, *O=0;
// int h, w, d, c, full; float *I, *M, *O=0;
mwSize h, w, d;
int c, full; float *I, *M, *O=0;
checkArgs(nl,pl,nr,pr,1,2,3,3,&h,&w,&d,mxSINGLE_CLASS,(void**)&I);
if(h<2 || w<2) mexErrMsgTxt("I must be at least 2x2.");
c = (int) mxGetScalar(pr[1]); full = (int) mxGetScalar(pr[2]);
Expand All @@ -366,7 +372,8 @@ void mGradMag( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {

// gradMagNorm( M, S, norm ) - operates on M - see gradientMag.m
void mGradMagNorm( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {
int h, w, d; float *M, *S, norm;
//int h, w, d; float *M, *S, norm;
mwSize h, w, d; float *M, *S, norm;
checkArgs(nl,pl,nr,pr,0,0,3,3,&h,&w,&d,mxSINGLE_CLASS,(void**)&M);
if( mxGetM(pr[1])!=h || mxGetN(pr[1])!=w || d!=1 ||
mxGetClassID(pr[1])!=mxSINGLE_CLASS ) mexErrMsgTxt("M or S is bad.");
Expand All @@ -376,7 +383,9 @@ void mGradMagNorm( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {

// H=gradHist(M,O,[...]) - see gradientHist.m
void mGradHist( int nl, mxArray *pl[], int nr, const mxArray *pr[] ) {
int h, w, d, hb, wb, nChns, binSize, nOrients, softBin, useHog;
//int h, w, d, hb, wb, nChns, binSize, nOrients, softBin, useHog;
mwSize h, w, d, hb, wb;
int nChns, binSize, nOrients, softBin, useHog;
bool full; float *M, *O, *H, clipHog;
checkArgs(nl,pl,nr,pr,1,3,2,8,&h,&w,&d,mxSINGLE_CLASS,(void**)&M);
O = (float*) mxGetPr(pr[1]);
Expand Down
19 changes: 13 additions & 6 deletions channels/private/imPadMex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
*******************************************************************************/
#include "wrappers.hpp"
#include "string.h"
#include <iostream>
typedef unsigned char uchar;

// pad A by [pt,pb,pl,pr] and store result in B
Expand Down Expand Up @@ -69,14 +70,16 @@ template<class T> void imPad( T *A, T *B, int h, int w, int d, int pt, int pb,
// B = imPadMex(A,pad,type); see imPad.m for usage details
#ifdef MATLAB_MEX_FILE
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
int *ns, ms[3], nCh, nDims, pt, pb, pl, pr, flag, k; double *p;
int nCh, nDims, pt, pb, pl, pr, flag, k; double *p;
mwSize *ns, ms[3], *ns_out;
void *A, *B; mxClassID id; double val=0; char type[1024];

// Error checking on arguments
if( nrhs!=3 ) mexErrMsgTxt("Three inputs expected.");
if( nlhs>1 ) mexErrMsgTxt("One output expected.");
nDims=mxGetNumberOfDimensions(prhs[0]); id=mxGetClassID(prhs[0]);
ns = (int*) mxGetDimensions(prhs[0]); nCh=(nDims==2) ? 1 : ns[2];
ns = (mwSize*) mxGetDimensions(prhs[0]); nCh=(nDims==2) ? 1 : ns[2];

if( (nDims!=2 && nDims!=3) ||
(id!=mxSINGLE_CLASS && id!=mxDOUBLE_CLASS && id!=mxUINT8_CLASS) )
mexErrMsgTxt("A should be 2D or 3D single, double or uint8 array.");
Expand All @@ -102,10 +105,14 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
if( ns[0]==0 || ns[1]==0 ) flag=0;

// create output array
ms[0]=ns[0]+pt+pb; ms[1]=ns[1]+pl+pr; ms[2]=nCh;
if( ms[0]<0 || ns[0]<=-pt || ns[0]<=-pb ) ms[0]=0;
if( ms[1]<0 || ns[1]<=-pl || ns[1]<=-pr ) ms[1]=0;
plhs[0] = mxCreateNumericArray(3, (const mwSize*) ms, id, mxREAL);
ms[0]=ns[0]+mwSize(pt)+mwSize(pb);
ms[1]=ns[1]+mwSize(pl)+mwSize(pr);
ms[2]=mwSize(nCh);

if( ms[0]<0 || int(ns[0])<=-int(pt) || int(ns[0])<=-int(pb) ) ms[0]=0;
if( ms[1]<0 || int(ns[1])<=-int(pl) || int(ns[1])<=-int(pr) ) ms[1]=0;
plhs[0] = mxCreateNumericArray(3, ms, id, mxREAL);

if( ms[0]==0 || ms[1]==0 ) return;

// pad array
Expand Down
17 changes: 13 additions & 4 deletions channels/private/imResampleMex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
#include <math.h>
#include <typeinfo>
#include "sse.hpp"
#include <iostream>

typedef unsigned char uchar;

// compute interpolation values for single column for resapling
Expand Down Expand Up @@ -117,7 +119,12 @@ void resample( T *A, T *B, int ha, int hb, int wa, int wb, int d, T r ) {
if(ybd[0]==2) for(; y<hb; y++) { ya=yas[y*4]; B0[y]=U(0)+U(1); }
if(ybd[0]==3) for(; y<hb; y++) { ya=yas[y*4]; B0[y]=U(0)+U(1)+U(2); }
if(ybd[0]==4) for(; y<hb; y++) { ya=yas[y*4]; B0[y]=U(0)+U(1)+U(2)+U(3); }
if(ybd[0]>4) for(; y<hn; y++) { B0[ybs[y]] += C[yas[y]] * ywts[y]; }
// JMBUENA: Fixed initialization of B0
// if(ybd[0]>4) for(; y<hn; y++) { B0[ybs[y]] += C[yas[y]] * ywts[y]; }
if(ybd[0]>4) {
for(int i=0; i<hb; i++) { B0[i] = 0; }
for(; y<hn; y++) { B0[ybs[y]] += C[yas[y]] * ywts[y]; } }
// JMBUENA: Fixed initialization of B0
#undef U
} else {
for(y=0; y<ybd[0]; y++) B0[y] = C[yas[y]]*ywts[y];
Expand All @@ -132,21 +139,23 @@ void resample( T *A, T *B, int ha, int hb, int wa, int wb, int d, T r ) {
// B = imResampleMex(A,hb,wb,nrm); see imResample.m for usage details
#ifdef MATLAB_MEX_FILE
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
int *ns, ms[3], n, m, nCh, nDims;
int n, m, nCh, nDims;
mwSize ms[3];
mwSize *ns;
void *A, *B; mxClassID id; double nrm;

// Error checking on arguments
if( nrhs!=4) mexErrMsgTxt("Four inputs expected.");
if( nlhs>1 ) mexErrMsgTxt("One output expected.");
nDims=mxGetNumberOfDimensions(prhs[0]); id=mxGetClassID(prhs[0]);
ns = (int*) mxGetDimensions(prhs[0]); nCh=(nDims==2) ? 1 : ns[2];
ns = (mwSize*) mxGetDimensions(prhs[0]); nCh=(nDims==2) ? 1 : ns[2];
if( (nDims!=2 && nDims!=3) ||
(id!=mxSINGLE_CLASS && id!=mxDOUBLE_CLASS && id!=mxUINT8_CLASS) )
mexErrMsgTxt("A should be 2D or 3D single, double or uint8 array.");
ms[0]=(int)mxGetScalar(prhs[1]); ms[1]=(int)mxGetScalar(prhs[2]); ms[2]=nCh;
if( ms[0]<=0 || ms[1]<=0 ) mexErrMsgTxt("downsampling factor too small.");
nrm=(double)mxGetScalar(prhs[3]);

// create output array
plhs[0] = mxCreateNumericArray(3, (const mwSize*) ms, id, mxREAL);
n=ns[0]*ns[1]*nCh; m=ms[0]*ms[1]*nCh;
Expand Down
5 changes: 3 additions & 2 deletions channels/private/rgbConvertMex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,13 +168,14 @@ oT* rgbConvert( iT *I, int n, int d, int flag, oT nrm ) {
// J = rgbConvertMex(I,flag,single); see rgbConvert.m for usage details
#ifdef MATLAB_MEX_FILE
void mexFunction(int nl, mxArray *pl[], int nr, const mxArray *pr[]) {
const int *dims; int nDims, n, d, dims1[3]; void *I; void *J; int flag;
const mwSize *dims; int nDims, n, d; void *I; void *J; int flag;
mwSize dims1[3];
bool single; mxClassID idIn, idOut;

// Error checking
if( nr!=3 ) mexErrMsgTxt("Three inputs expected.");
if( nl>1 ) mexErrMsgTxt("One output expected.");
dims = (const int*) mxGetDimensions(pr[0]); n=dims[0]*dims[1];
dims = (const mwSize*) mxGetDimensions(pr[0]); n=dims[0]*dims[1];
nDims = mxGetNumberOfDimensions(pr[0]);
d = 1; for( int i=2; i<nDims; i++ ) d*=dims[i];

Expand Down
2 changes: 2 additions & 0 deletions channels/private/sse.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ RETf LD( const float &x ) { return _mm_load_ps(&x); }
RETf LDu( const float &x ) { return _mm_loadu_ps(&x); }
RETf STR( float &x, const __m128 y ) { _mm_store_ps(&x,y); return y; }
RETf STR1( float &x, const __m128 y ) { _mm_store_ss(&x,y); return y; }
// Fix sugested by shmulik509 on GitHub (issue #8 Piotr dollar toolbox.
//RETf STRlow( __m64 &x, const __m128 y ) { _mm_storel_pi(&x,y); return y; }
RETf STRu( float &x, const __m128 y ) { _mm_storeu_ps(&x,y); return y; }
RETf STR( float &x, const float y ) { return STR(x,SET(y)); }

Expand Down
37 changes: 37 additions & 0 deletions classify/badacostApply.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
function predicted = badacostApply(X, classfr)
% Apply learned badacost weak learners ensemble
%
% USAGE
% hs = badacostApply( X, model, [maxDepth], [minWeight], [nThreads] )
%
% INPUTS
% X - [FXN] N num vectors to classfy, F num feature vectors
% model - learned boosted tree classifier
%
% OUTPUTS
% predicted - [Nx1] predicted output labels
%
% EXAMPLE
%
% See also badacostTrain
%
% Author: Antonio Baldera, modified by Jose M. Buenaposada

n = size(X,2);
margin_vec = zeros(classfr.num_classes, n);

for i=1:length(classfr.WEAK_LEARNERS)
% z is a row vector with the labels
z = classfr.classify_weak_learner(classfr.WEAK_LEARNERS{i}, X);

for j=1:n
margin_vec(:,j) = margin_vec(:,j) + (classfr.WEIGHTS(i).*classfr.Y(:, z(j)));
end
end;

% WARNING: Change to accomodate with theory (2016/11)
%[~, predicted] = min(classfr.Cprime' * margin_vec);
[~, predicted] = min(classfr.Cprime * margin_vec);
predicted = predicted(:);

end
Loading

0 comments on commit eec6698

Please sign in to comment.