Basic Image AlgorithmS Library  2.8.0
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
Condensation.hh
1 /*
2 This file is part of the BIAS library (Basic ImageAlgorithmS).
3 
4 Copyright (C) 2003-2009 (see file CONTACT for details)
5 Multimediale Systeme der Informationsverarbeitung
6 Institut fuer Informatik
7 Christian-Albrechts-Universitaet Kiel
8 
9 
10 BIAS is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as published by
12 the Free Software Foundation; either version 2.1 of the License, or
13 (at your option) any later version.
14 
15 BIAS is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU Lesser General Public License for more details.
19 
20 You should have received a copy of the GNU Lesser General Public License
21 along with BIAS; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24 #ifndef _CONDENSATION_GENERAL_HH_
25 #define _CONDENSATION_GENERAL_HH_
26 #include <Base/Common/BIASpragmaStart.hh>
27 
28 #include <vector>
29 #include "Base/Debug/Debug.hh"
30 #include "Base/Math/Random.hh"
31 #include "Base/Math/Vector.hh"
32 
33 //#define TIMING true
34 #ifdef TIMING
35 #include "Base/Debug/TimeMeasure.hh"
36 #endif
37 
38 namespace BIAS {
39 
40  /** @class Condensation
41  * @ingroup g_stateestimation
42  @brief CONDENSATION (CONditional DENSity EstimATION) is a state estimator
43  very similiar to the Kalman Filter, not only for Gaussian-, but also for
44  any multi-modal distribution.
45  Also it is a rather simple algorithm.
46 
47  The current state of a system, described by a state vector, is
48  estimated using a certain number of samples, which approximate the real
49  densities. This is done by incorporating measurements ( p(Z|X) )
50  with a prior distribution p(X)= p(X_t-1 | z_t).
51 
52  Usage:
53  -# Write a class derived from Condensation and overload the
54  three necessary functions. See CondensImg and Examples/condensImgExample
55  for examples.
56 
57  -# In your main program, just call Init() once with the desired
58  amount of samples
59  -# optional, set parameter for the initial prior
60  distribution(write your own function)
61 
62  -# call InitPrior(), which calls InitPriorPositions() and
63  precalculates the weigths etc.
64 
65  In the loop:
66 
67  -# call PredictNewSamplePositions();
68 
69  -# GetSamplePositions() and measure at those positions
70 
71  -# call your own necessary functions to give the condensation
72  your observation values, e. g.SetObservation()
73 
74  -# call Process(), which calls EvaluateObservationDensities()
75  and calculates the mean etc.
76 
77  -# Get the real state of the system, e.g. the mean with GetMean()
78  @author Daniel Grest, Feb. 2004
79  */
80 
81  class BIASStateEstimator_EXPORT Condensation : public Debug
82  {
83  public:
84  Condensation();
85  virtual ~Condensation();
86 
87  /** Init the Condensation directly after constructor, with the desired
88  amount of Samples, there more the better the approximation of
89  the real densities.
90  The other Init functions are called here also. */
91  int Init(unsigned int nrSmaples);
92 
93  /** sets the sample positions for first time step */
94  int InitPrior();
95 
96  /* This routine computes all of the new (unweighted) sample
97  positions. For each sample, first a base is chosen, then the new
98  sample position is computed by sampling from the prediction density
99  p(x_t|x_t-1 = base) */
100  int PredictNewSamplePositions();
101 
102  /** all samples are moved by this offset at the prediction step */
104  { offSet_=offSet; }
105 
106  inline const std::vector<BIAS::Vector<double> >&
107  GetSamplePositions() { return samplePosNew_;}
108 
109  /** This really does one iteration of Condensation. It calls
110  PredictSamplePosition() and EvaluateObservationDensity()
111  in the necessary order. */
112  void Process();
113 
114  /** returns the mean state of the density distribution */
115  Vector<double> GetMean() { return mean_;}
116 
117  /** returns the weighted variance as a scalar
118  @author grest */
119  double GetVariance() const;
120 
121  /** Returns the "weighted variance" of the sample positions in var.
122  Initialization samples and importance samples are not used in
123  calculation.
124  @author woelk 12/2004 */
125  bool GetWeightedVariance(Vector<double>& var) const;
126 
127  /** Returns the variance of the sample positions in var.
128  Initialization samples and importance samples are not used in
129  calculation.
130  @author woelk 12/2004 */
131  bool GetVariance(Vector<double>& var) const;
132 
133  /** Returns the mean weight of all samples. Initialization samples and
134  importance samples are not used in calculation.
135  @author woelk 12/2004 */
136  double GetMeanWeight() const;
137 
138  /** Set the amount of samples, whose position is always taken
139  by the prior distribution.
140  Default is 0.2. <br>
141  fraction must be in [0..1]; */
142  void SetDefaultInitFraction(double fraction);
143 
144  /** Only specifiy a value unlike zero, if EvaluateImportanceWeights()
145  is overloaded. */
146  void SetImportanceSampleFraction(double fraction);
147 
148 
149  /** set a new processMean for the PredictSamplePosition step,
150  see SecondOrderARP_()
151  */
152  inline void SetProcessMean(const BIAS::Vector<double> &newProcessMean)
153  { processMean_=newProcessMean; }
154 
155 
156  /** Here u can specify the model and process defaults */
157  virtual void InitModelDefaults() = 0;
158 
159  /** The Prior condition is used to init the first sample positions.
160  Calculates a initial pos for all samples.
161  The prior cond. maybe defined by an image (if state vector is 2-dim) or
162  by a mixture of gaussions or Uniform_distributed or ... or ... */
163  virtual void InitPriorPositions(unsigned int nrOfInitSamples) = 0;
164 
165 
166  /** most important function, here u have to calculate a probability for each
167  sample position based on your observations
168  and assign this prob. as a weight to the sample */
169  virtual void EvaluateObservationDensities()=0;
170 
171  /** If you want to use importance sampling, then overload this function,
172  which is the importance function. It should be a density distribution,
173  which gives hint where to measure the object state.<br>
174  Evaluate here the samplePosOld_, not the new ones <br>
175  IMPORTANT: the scale of the importance weights must be equal to the
176  normal weights, like in EvaluateObservationDensities() */
177  virtual void EvaluateImportanceWeights() {};
178 
179  protected:
180  bool _doSecondOrderARP; // should SecondOrderARP_ be used
181  /** question: real pos_tminus2 of each sample
182  or better old mean value for all samples? */
183  inline void SecondOrderARP_(unsigned int indexOldSample,
184  Vector<double> &newSample);
185 
186  /** The process model for a first-order auto-regressive process is:<br>
187  x_{t+1} - mean = (x_t - mean)*scaling + sigma*w_t <br>
188  where w_t is unit iid Gaussian noise, which is added by AddDiffusion
189  */
190  //void FirstOrderARP_(unsigned int indexOldSample,
191  // Vector<double> &new_sample);
192 
193  /** This is binary search using cumulative probabilities to pick a base
194  sample. The use of this routine makes Condensation O(NlogN) where N
195  is the number of samples. It is probably better to pick base
196  samples deterministically, since then the algorithm is O(N) and
197  probably marginally more efficient, but this routine is kept here
198  for conceptual simplicity and because it maps better to the
199  published literature. */
200  inline unsigned int PickOneSample_();
201 
202 
203  inline unsigned int PickImportanceSample_();
204 
205  /** Once all the unweighted sample positions have been computed using
206  predict_new_bases, this routine computes the weights by evaluating
207  the observation density at each of the positions. Cumulative
208  probabilities are also computed at the same time, to permit an
209  efficient implementation of pick_base_sample using binary
210  search. */
211  virtual void CalculateBaseWeights_();
212 
213  /** Adds gaussian noise to the position, _can_ be overwritten if some
214  other (i.e. non gaussian) noise is needed*/
215  virtual void AddDiffusions_();
216 
217  /** adds offSet_ to the samplePosNew_, can be overwritten if some other
218  offset mechanism is needed */
219  virtual void AddOffsets_();
220 
221  inline void CalculateMean_();
222 
223  unsigned int Nsamples_;
224  unsigned int stateDim_;
225  std::vector<Vector<double> > samplePosNew_;
226  std::vector<Vector<double> > samplePosOld_;
227  std::vector<Vector<double> > samplePosTminus2_;
228  // indexTminus2_[i] is the index in samplePosTminus2_ for sample
229  // samplePosOld_[i]
230  std::vector<unsigned int> indexTminus2_,oldIndices_;
231 
233 
234  std::vector<double> sampleWeights_, cumulProbArray_;
235  std::vector<double> sampleImportanceWeights_, cumulProbArrayImportance_;
236  std::vector<double> correctionOfImportanceWeights_;
237 
238  double sumOfWeights_, sumOfImportanceWeights_;
239 
240  /** Process parameters */
241  Vector<double> diffusionSigma_, processMean_;
242  Vector<double> processFirstOrderScale_, processSecondOrderScale_;
243 
244  bool initial_;
247 
249  double *_randomArray; // size Nsamples
251 
252  inline static int compdouble(const void*l, const void*r)
253  { return (*((double*)l))>(*((double*)r)); }
254 
255  };
256 
257 
258  inline unsigned int Condensation::PickOneSample_()
259  {
260  if (sumOfWeights_<1E-12)
261  return (int)random_.GetUniformDistributed(0, Nsamples_);
262 
263  double choice = random_.GetUniformDistributed(0.0, sumOfWeights_);
264 
265  register unsigned int low, middle, high;
266 
267  low = 0;
268  high = Nsamples_-1;
269 
270  while (high>(low+1)) {
271  middle = (high+low) >> 1;
272  if (choice > cumulProbArray_[middle])
273  low = middle;
274  else high = middle;
275  }
276 
277  if (cumulProbArray_[low]==cumulProbArray_[high])
278  return low;
279  else return high;
280  }
281 
283  {
285 
286  unsigned int low, middle, high;
287 
288  low = 0;
289  high = Nsamples_;
290 
291  while (high>(low+1)) {
292  middle = (high+low)/2;
293  if (choice > cumulProbArrayImportance_[middle])
294  low = middle;
295  else high = middle;
296  }
297 
299  return low;
300  else return high;
301  }
302 
303  inline void Condensation::SecondOrderARP_(unsigned int indexOldSample,
304  Vector<double> &newSample)
305  {
306  // mean_ is in this moment the mean_ of the previous iteration
307  for (unsigned int d=0;d<stateDim_;d++){
308  BCDOUT(D_COND_DEBUG,"in SOARP d: "<<d<<std::endl);
309  // newSample[d]= processMean_[d] +
310  // (samplePosOld_[indexOldSample][d] - processMean_[d]) *
311  // processFirstOrderScale_[d] +
312  // (mean_[d] - meanTminus1_[d]) * processSecondOrderScale_[d];
313 
314  newSample[d]= processMean_[d] +
315  (samplePosOld_[indexOldSample][d] - processMean_[d]) *
317  (samplePosOld_[indexOldSample][d]-
318  samplePosTminus2_[indexTminus2_[indexOldSample]][d]) *
320  }
321  }
322 
323 
325  {
326 #ifdef TIMING
327  TimeMeasure timer;
328  timer.Start();
329 #endif
330 
331  if (sumOfWeights_<1E-20||sumOfWeights_!=sumOfWeights_) return; // new mean is old mean
332  mean_.Set(0);
333  for (unsigned int n=0;n<Nsamples_;n++)
336 
337 #ifdef TIMING
338  timer.Stop();
339  std::cerr << "Condensation::CalculateMean_() took "<<timer.GetRealTime()<<" us"<<std::endl;
340 #endif
341  }
342 
343 }
344 
345 #include <Base/Common/BIASpragmaEnd.hh>
346 
347 #endif
virtual void EvaluateImportanceWeights()
If you want to use importance sampling, then overload this function, which is the importance function...
BIAS::Vector< double > offSet_
unsigned int PickImportanceSample_()
CONDENSATION (CONditional DENSity EstimATION) is a state estimator very similiar to the Kalman Filter...
Definition: Condensation.hh:81
std::vector< double > sampleWeights_
Vector< double > processFirstOrderScale_
unsigned int Nsamples_
void SetPredictionOffset(const BIAS::Vector< double > &offSet)
all samples are moved by this offset at the prediction step
std::vector< Vector< double > > samplePosNew_
Vector< double > meanTminus1_
double GetUniformDistributed(const double min, const double max)
on succesive calls return uniform distributed random variable between min and max ...
Definition: Random.hh:84
std::vector< unsigned int > oldIndices_
Vector< double > GetMean()
returns the mean state of the density distribution
Vector< double > processMean_
Vector< double > processSecondOrderScale_
double sumOfImportanceWeights_
unsigned int stateDim_
std::vector< double > correctionOfImportanceWeights_
void SetProcessMean(const BIAS::Vector< double > &newProcessMean)
set a new processMean for the PredictSamplePosition step, see SecondOrderARP_()
void Set(const T &scalar)
Definition: Vector.hh:153
void SecondOrderARP_(unsigned int indexOldSample, Vector< double > &newSample)
question: real pos_tminus2 of each sample or better old mean value for all samples?
double GetRealTime() const
return real time (=wall time clock) in usec JW For Win32: real-time is measured differently from user...
static int compdouble(const void *l, const void *r)
std::vector< double > sampleImportanceWeights_
std::vector< Vector< double > > samplePosOld_
std::vector< unsigned int > indexTminus2_
std::vector< Vector< double > > samplePosTminus2_
Vector< double > mean_
std::vector< double > cumulProbArray_
const std::vector< BIAS::Vector< double > > & GetSamplePositions()
class for producing random numbers from different distributions
Definition: Random.hh:51
class TimeMeasure contains functions for timing real time and cpu time.
Definition: TimeMeasure.hh:111
unsigned int PickOneSample_()
The process model for a first-order auto-regressive process is: x_{t+1} - mean = (x_t - mean)*scalin...
void MultiplyIP(const T &scalar)
in place multiplication with scalar
Definition: Vector.hh:202
std::vector< double > cumulProbArrayImportance_