Basic Image AlgorithmS Library  2.8.0
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
Condensation.cpp
1 /*
2 This file is part of the BIAS library (Basic ImageAlgorithmS).
3 
4 Copyright (C) 2003-2009 (see file CONTACT for details)
5  Multimediale Systeme der Informationsverarbeitung
6  Institut fuer Informatik
7  Christian-Albrechts-Universitaet Kiel
8 
9 
10 BIAS is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as published by
12 the Free Software Foundation; either version 2.1 of the License, or
13 (at your option) any later version.
14 
15 BIAS is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU Lesser General Public License for more details.
19 
20 You should have received a copy of the GNU Lesser General Public License
21 along with BIAS; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24 
25 
26 #include "Condensation.hh"
27 #include <float.h>
28 #include <Base/Common/W32Compat.hh>
29 
30 //#define TIMING
31 #ifdef TIMING
32 #include <Base/Debug/TimeMeasure.hh>
33 #endif
34 
35 using namespace BIAS;
36 using namespace std;
37 
39 {
40  stateDim_=0;
41  defaultInitFraction_=0.1;
42  importanceFraction_=0.0;
43  _doSecondOrderARP=true;
44  _randomArray=NULL;
45  NewDebugLevel("D_COND_INIT");
46  NewDebugLevel("D_COND_PROCESS");
47  NewDebugLevel("D_COND_WEIGHTS");
48  NewDebugLevel("D_COND_DEBUG");
49  NewDebugLevel("D_COND_ERROR");
50  NewDebugLevel("D_COND_MEANVAR");
51 }
52 
54 {
55  if (_randomArray) delete[] _randomArray;
56 }
57 
58 int Condensation::Init(unsigned int N)
59 {
60  initial_=true;
61  Nsamples_=N;
62  sumOfWeights_=0;
63  random_.Reset();
64  _randomArray=new double[N];
65  // memory allocation
66  samplePosNew_.resize(N);
67  samplePosOld_.resize(N);
68  samplePosTminus2_.resize(N);
69  indexTminus2_.resize(N);
70  oldIndices_.resize(N);
71  sampleWeights_.resize(N);
72  sampleWeights_.assign(N,1);
73  cumulProbArray_.resize(N);
74  sampleImportanceWeights_.resize(N);
75  cumulProbArrayImportance_.resize(N);
76  correctionOfImportanceWeights_.resize(N);
77  correctionOfImportanceWeights_.assign(N,1);
78 
79  if (stateDim_==0) {
80  BIASERR("Error at Init: stateDim_==0, set this at constructor");
81  return -1;
82  }
83 
84  // further memory allocation
85  diffusionSigma_.newsize(stateDim_);
86  diffusionSigma_.Set(0);
87  processFirstOrderScale_.newsize(stateDim_);
88  processFirstOrderScale_.Set(0);
89  processSecondOrderScale_.newsize(stateDim_);
90  processSecondOrderScale_.Set(0);
91  processMean_.newsize(stateDim_);
92  processMean_.Set(0);
93  offSet_.newsize(stateDim_);
94  offSet_.Set(0);
95  mean_.newsize(stateDim_);
96  mean_.Set(0);
97  meanTminus1_.newsize(stateDim_);
98  meanTminus1_.Set(0);
99  BCDOUT(D_COND_INIT,"allocating memory for sample vectors\n");
100 
101  InitModelDefaults();
102  BCDOUT(D_COND_INIT,"InitModelDefaults() passed.\n");
103 
104  // initial positions
105  for (unsigned int n=0;n<Nsamples_;n++) {
106  samplePosNew_[n].newsize(stateDim_);
107  samplePosOld_[n].newsize(stateDim_);
108  samplePosTminus2_[n].newsize(stateDim_);
109  }
110  return 0;
111 }
112 
114 {
115 #ifdef TIMING
116  TimeMeasure timer;
117  timer.Start();
118 #endif
119  BCDOUT(D_COND_INIT,"Initial Positions set.\n");
120  InitPriorPositions(Nsamples_);
121  samplePosOld_ = samplePosNew_;
122  samplePosTminus2_ = samplePosOld_;
123  sumOfWeights_=0;
124  for (unsigned int n=0; n<Nsamples_; n++) {
125  BCDOUT(D_COND_INIT,"prior position of sample "<<n<<": "
126  <<samplePosOld_[n]<<endl);
127  sumOfWeights_ += sampleWeights_[n];
128  cumulProbArray_[n] = sumOfWeights_;
129  indexTminus2_[n] = n;
130  }
131  BCDOUT(D_COND_INIT,"init done\n");
132 #ifdef TIMING
133  timer.Stop();
134  cerr << "Condensation::InitPrior() took "<<timer.GetRealTime()<<" us"<<endl;
135 #endif
136 
137  return 0;
138 }
139 
140 
141 
143 {
144 #ifdef TIMING
145  TimeMeasure timer;
146  timer.Start();
147 #endif
148 
149  unsigned int sampleIndex=0;
150  // the number of samples distributed by PriorPositions
151  // samples with index < initSamples are distributed in this way
152  unsigned int initSamples=(unsigned int)((double)Nsamples_*
153  (defaultInitFraction_));
154  // samples with index < importanceSamples and index >= initSamples
155  // are distributed using importance sampling
156  unsigned int importanceSamples=(unsigned int)((double)Nsamples_*
157  importanceFraction_+
158  initSamples);
159 
160  InitPriorPositions(initSamples);
161 
162 #ifdef TIMING
163  timer.Stop();
164  cerr << "Condensation::PredictNewSamplePositions() pick prior samples took "<<timer.GetRealTime()<<" us"<<endl;
165  timer.Reset(); timer.Start();
166 #endif
167 
168 
169  if (importanceSamples!=initSamples && importanceSamples!=0) {
170  //// importance sampling
171  EvaluateImportanceWeights();
172 
173  sumOfImportanceWeights_ = 0;
174  for (unsigned int n=0; n<Nsamples_; n++) {
175  BCDOUT(D_COND_WEIGHTS,"importance weight for sample "<<n<<": "
176  <<sampleImportanceWeights_[n]<<endl);
177  sumOfImportanceWeights_ += sampleImportanceWeights_[n];
178  cumulProbArrayImportance_[n] = sumOfImportanceWeights_;
179  }
180  for (unsigned int i=initSamples; i<importanceSamples; i++) {
181  BCDOUT(D_COND_PROCESS,"importance sample with index:"
182  <<sampleIndex<<endl);
183  sampleIndex = PickImportanceSample_();
184  // indexTminus2_[i]=sampleIndex;
185  oldIndices_[i]=sampleIndex;
186  SecondOrderARP_(sampleIndex, samplePosNew_[i]);
187  correctionOfImportanceWeights_[i]=
188  sampleWeights_[sampleIndex] /
189  (sampleImportanceWeights_[sampleIndex]+1E-9);
190  BCDOUT(D_COND_PROCESS,"adding diffusion for sampleIndex:"
191  <<sampleIndex<<endl);
192  }
193  }
194 
195 #ifdef TIMING
196  timer.Stop();
197  cerr << "Condensation::PredictNewSamplePositions() pick importance samples took "<<timer.GetRealTime()<<" us"<<endl;
198  timer.Reset(); timer.Start();
199 #endif
200 
201  //// standard
202  /*
203  int num=Nsamples_-importanceSamples;
204  random_.GetUniformDistributed(0.0, sumOfWeights_, num, _randomArray);
205  double r;
206  int p;
207 
208  // fast
209  qsort((void*)_randomArray, num, sizeof(double), Condensation::compdouble);
210  p=0;
211  if (_doSecondOrderARP){
212  for (int n=0; n<num; n++){
213  r=_randomArray[n];
214  while (cumulProbArray_[p]<r) p++;
215  indexTminus2_[n]=p;
216  SecondOrderARP_(sampleIndex, samplePosNew_[n]);
217  }
218  } else {
219  for (int n=0; n<num; n++){
220  r=_randomArray[n];
221  while (cumulProbArray_[p]<r) p++;
222  samplePosNew_[n]=samplePosOld_[p];
223  }
224  }
225  */
226  // this is astonishingly faster ?
227  /// now pick samples from the last time step using the weights
228  for (unsigned int n=importanceSamples; n<Nsamples_; n++) {
229  sampleIndex = PickOneSample_();
230  BCDOUT(D_COND_PROCESS,"predicting sample with index:"<<sampleIndex<<endl);
231  oldIndices_[n]=sampleIndex;
232  //indexTminus2_[n]=sampleIndex;
233  BCDOUT(D_COND_DEBUG,"Nsamples in SecondOrderARP: "<<n<<" SampleIndex: "
234  <<sampleIndex<<endl);
235  SecondOrderARP_(sampleIndex, samplePosNew_[n]);
236  }
237  indexTminus2_=oldIndices_;
238 
239 #ifdef TIMING
240  timer.Stop();
241  cerr << "Condensation::PredictNewSamplePositions() pick standard samples took "<<timer.GetRealTime()<<" us"<<endl;
242  timer.Reset(); timer.Start();
243 #endif
244 
245  /// most probably from the process model
246  AddOffsets_();
247 
248  /// most probably gaussian noise
249  AddDiffusions_();
250 
251 #ifdef TIMING
252  timer.Stop();
253  cerr << "Condensation::PredictNewSamplePositions() took "<<timer.GetRealTime()<<" us"<<endl;
254 #endif
255 
256  return 0;
257 }
258 
259 
261 {
262  if (fraction<0) defaultInitFraction_=0;
263  else if (fraction>1) defaultInitFraction_=1;
264  else defaultInitFraction_=fraction;
265 }
266 
267 
269 {
270  if (fraction<0) importanceFraction_=0;
271  else if (fraction>1) importanceFraction_=1;
272  else importanceFraction_=fraction;
273 }
274 
276 {
277  // do not add diffusion to newly generated samples
278  const unsigned StartIndex=
279  (unsigned int)((double)Nsamples_*defaultInitFraction_);
280 
281  std::vector<double> r;
282  r.reserve(Nsamples_-StartIndex);
283  for (unsigned int d=0; d<stateDim_; d++) {
284  random_.GetNormalDistributed(0.0, diffusionSigma_[d],
285  Nsamples_-StartIndex, r);
286  for (unsigned i=StartIndex; i<Nsamples_; i++){
287  samplePosNew_[i][d] += r[i-StartIndex];
288  }
289  }
290 }
291 
293 {
294  if (offSet_.size()>0)
295  for (unsigned int n=0;n<Nsamples_;n++) {
296  samplePosNew_[n] += offSet_;
297  samplePosOld_[n] += offSet_;
298  }
299 }
300 
302 {
303 #ifdef TIMING
304  TimeMeasure timer;
305  timer.Start();
306 #endif
307  double cumul_total;
308 
309  cumul_total = 0.0;
310  EvaluateObservationDensities();
311 
312 #ifdef TIMING
313  timer.Stop();
314  std::cout<< "Condensation::CalculateBaseWeights_() Eavl took "<<timer.GetRealTime()<<" us"<<std::endl;
315  timer.Reset();
316  timer.Start();
317 #endif
318 
319  for (unsigned int n=0; n<Nsamples_; n++) {
320  BCDOUT(D_COND_WEIGHTS,"weight for sample "<<n<<": "<<sampleWeights_[n]<<endl);
321  cumul_total += sampleWeights_[n];
322  cumulProbArray_[n] = cumul_total;
323  }
324  sumOfWeights_ = cumul_total;
325 
326 #ifdef TIMING
327  timer.Stop();
328  std::cerr << "Condensation::CalculateBaseWeights_() took "<<timer.GetRealTime()<<" us"<<std::endl;
329 #endif
330 }
331 
333 {
334 #ifdef TIMING
335  TimeMeasure timer, ot;
336  timer.Start();
337  ot.Start();
338 #endif
339 
340  BCDOUT(D_COND_PROCESS,"Calculating Weights\n");
341  CalculateBaseWeights_();
342 
343 #ifdef TIMING
344  timer.Stop();
345  std::cerr << "Condensation::Process() CalculateBaseWeights_() took "<<timer.GetRealTime()<<" us"<<std::endl;
346  timer.Reset();
347  timer.Start();
348 #endif
349 
350  samplePosTminus2_=samplePosOld_;
351  samplePosOld_=samplePosNew_;
352  meanTminus1_=mean_;
353 
354 #ifdef TIMING
355  timer.Stop();
356  std::cerr << "Condensation::Process() copy of vector took "<<timer.GetRealTime()<<" us"<<std::endl;
357  timer.Reset();
358  timer.Start();
359 #endif
360 
361  CalculateMean_();
362  if (initial_) {
363  meanTminus1_=mean_;
364  initial_=false;
365  }
366 
367 #ifdef TIMING
368  timer.Stop();
369  std::cerr << "Condensation::Process() calculate mean took "<<timer.GetRealTime()<<" us"<<std::endl;
370  ot.Stop();
371  std::cerr << "Condensation::Process() took "<<ot.GetRealTime()<<" us"<<std::endl;
372 #endif
373 }
374 
376 {
377  BIASASSERT(!isnan(sumOfWeights_) && !isinf(sumOfWeights_));
378  if (sumOfWeights_<1E-20||sumOfWeights_!=sumOfWeights_) return 0;
379  double var=0;
381  for (unsigned int n=0;n<Nsamples_;n++) {
382  diff = samplePosOld_[n] - mean_;
383  var += (diff*diff) * sampleWeights_[n];
384  }
385  var /= sumOfWeights_;
386  if (var!=var) return 0;
387  return var;
388 }
389 
391 {
392  var.newsize(stateDim_);
393  var.SetZero();
394  unsigned start=(unsigned)rint(Nsamples_*(defaultInitFraction_+
395  importanceFraction_));
396  double SumOfWeights=cumulProbArray_[Nsamples_-1]-cumulProbArray_[start];
397  if (SumOfWeights<DBL_EPSILON) {
398  BCDOUT(D_COND_MEANVAR, "sum of weights top small for variance "
399  <<"calculation "<<SumOfWeights);
400  return false;
401  }
402  Vector<double> diff;
403  for (unsigned int n=start;n<Nsamples_;n++) {
404  diff = samplePosOld_[n] - mean_;
405  //diff *= sampleWeights_[n];
406  var += (diff.ElementwiseProduct(diff)) * sampleWeights_[n];
407  }
408  var /= SumOfWeights;
409  return true;
410 }
411 
413 {
414  var.newsize(stateDim_);
415  var.SetZero();
416  unsigned start=(unsigned)rint(Nsamples_*(defaultInitFraction_+
417  importanceFraction_));
418  if (Nsamples_-start<=1) {
419  BCDOUT(D_COND_MEANVAR, "not enough samples "<<Nsamples_-start<<endl);
420  return false;
421  }
422  Vector<double> diff;
423 
424  for (unsigned int n=start;n<Nsamples_;n++) {
425  diff = samplePosOld_[n] - mean_;
426  var += (diff.ElementwiseProduct(diff));
427  }
428  var /= (double)(Nsamples_-start-1);
429  return true;
430 }
431 
433 {
434  double mean=0.0;
435  unsigned start=(unsigned)rint(Nsamples_*(defaultInitFraction_+
436  importanceFraction_));
437  if (Nsamples_-start<=0){
438  BCDOUT(D_COND_MEANVAR, "not enough samples "<<Nsamples_-start<<endl);
439  return -1.0;
440  }
441  Vector<double> diff;
442 
443  for (unsigned n=start;n<Nsamples_;n++) {
444  //cout << setw(5) << n << setw(15) << sampleWeights_[n]<<endl;
445  mean += sampleWeights_[n];
446  }
447  mean /= (double)(Nsamples_-start);
448  return mean;
449 }
bool GetWeightedVariance(Vector< double > &var) const
Returns the &quot;weighted variance&quot; of the sample positions in var.
int InitPrior()
sets the sample positions for first time step
int Init(unsigned int nrSmaples)
Init the Condensation directly after constructor, with the desired amount of Samples, there more the better the approximation of the real densities.
virtual void AddOffsets_()
adds offSet_ to the samplePosNew_, can be overwritten if some other offset mechanism is needed ...
void SetDefaultInitFraction(double fraction)
Set the amount of samples, whose position is always taken by the prior distribution.
void Process()
This really does one iteration of Condensation.
double GetMeanWeight() const
Returns the mean weight of all samples.
virtual void AddDiffusions_()
Adds gaussian noise to the position, can be overwritten if some other (i.e.
void ElementwiseProduct(const Vector< T > &arg, Vector< T > &dest) const
multiply this with arg elementwise and store the result in dest
Definition: Vector.hh:491
virtual ~Condensation()
void SetImportanceSampleFraction(double fraction)
Only specifiy a value unlike zero, if EvaluateImportanceWeights() is overloaded.
void SetZero()
equivalent to matrix call
Definition: Vector.hh:156
Vector< T > & newsize(Subscript N)
Definition: vec.h:220
double GetRealTime() const
return real time (=wall time clock) in usec JW For Win32: real-time is measured differently from user...
virtual void CalculateBaseWeights_()
Once all the unweighted sample positions have been computed using predict_new_bases, this routine computes the weights by evaluating the observation density at each of the positions.
double GetVariance() const
returns the weighted variance as a scalar
class TimeMeasure contains functions for timing real time and cpu time.
Definition: TimeMeasure.hh:111