Fawkes API Fawkes Development Version
pf.c
1
2/***************************************************************************
3 * pf.c: Simple particle filter for localization
4 *
5 * Created: Wed May 16 16:04:41 2012
6 * Copyright 2000 Brian Gerkey
7 * 2000 Kasper Stoy
8 * 2012 Tim Niemueller [www.niemueller.de]
9 ****************************************************************************/
10
11/* This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU Library General Public License for more details.
20 *
21 * Read the full text in the LICENSE.GPL file in the doc directory.
22 */
23
24/* From:
25 * Player - One Hell of a Robot Server (LGPL)
26 * Copyright (C) 2000 Brian Gerkey & Kasper Stoy
27 * gerkey@usc.edu kaspers@robotics.usc.edu
28 */
29/**************************************************************************
30 * Desc: Simple particle filter for localization.
31 * Author: Andrew Howard
32 * Date: 10 Dec 2002
33 *************************************************************************/
34
35#include <assert.h>
36#include <math.h>
37#include <stdlib.h>
38#include <time.h>
39
40#include "pf.h"
41#include "pf_pdf.h"
42#include "pf_kdtree.h"
43
44/// @cond EXTERNAL
45
46// Compute the required number of samples, given that there are k bins
47// with samples in them.
48static int pf_resample_limit(pf_t *pf, int k);
49
50// Re-compute the cluster statistics for a sample set
51static void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set);
52
53
54// Create a new filter
55pf_t *pf_alloc(int min_samples, int max_samples,
56 double alpha_slow, double alpha_fast,
57 pf_init_model_fn_t random_pose_fn, void *random_pose_data)
58{
59 int i, j;
60 pf_t *pf;
61 pf_sample_set_t *set;
62 pf_sample_t *sample;
63
64 srand48(time(NULL));
65
66 pf = calloc(1, sizeof(pf_t));
67
68 pf->random_pose_fn = random_pose_fn;
69 pf->random_pose_data = random_pose_data;
70
71 pf->min_samples = min_samples;
72 pf->max_samples = max_samples;
73
74 // Control parameters for the population size calculation. [err] is
75 // the max error between the true distribution and the estimated
76 // distribution. [z] is the upper standard normal quantile for (1 -
77 // p), where p is the probability that the error on the estimated
78 // distrubition will be less than [err].
79 pf->pop_err = 0.01;
80 pf->pop_z = 3;
81
82 pf->current_set = 0;
83 for (j = 0; j < 2; j++)
84 {
85 set = pf->sets + j;
86
87 set->sample_count = max_samples;
88 set->samples = calloc(max_samples, sizeof(pf_sample_t));
89
90 for (i = 0; i < set->sample_count; i++)
91 {
92 sample = set->samples + i;
93 sample->pose.v[0] = 0.0;
94 sample->pose.v[1] = 0.0;
95 sample->pose.v[2] = 0.0;
96 sample->weight = 1.0 / max_samples;
97 }
98
99 // HACK: is 3 times max_samples enough?
100 set->kdtree = pf_kdtree_alloc(3 * max_samples);
101
102 set->cluster_count = 0;
103 set->cluster_max_count = max_samples;
104 set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t));
105
106 set->mean = pf_vector_zero();
107 set->cov = pf_matrix_zero();
108 }
109
110 pf->w_slow = 0.0;
111 pf->w_fast = 0.0;
112
113 pf->alpha_slow = alpha_slow;
114 pf->alpha_fast = alpha_fast;
115
116 return pf;
117}
118
119
120// Free an existing filter
121void pf_free(pf_t *pf)
122{
123 int i;
124
125 for (i = 0; i < 2; i++)
126 {
127 free(pf->sets[i].clusters);
128 pf_kdtree_free(pf->sets[i].kdtree);
129 free(pf->sets[i].samples);
130 }
131 free(pf);
132
133 return;
134}
135
136
137// Initialize the filter using a guassian
138void pf_init(pf_t *pf, pf_vector_t *mean, pf_matrix_t *cov)
139{
140 int i;
141 pf_sample_set_t *set;
142 pf_pdf_gaussian_t *pdf;
143
144 set = pf->sets + pf->current_set;
145
146 // Create the kd tree for adaptive sampling
147 pf_kdtree_clear(set->kdtree);
148
149 set->sample_count = pf->max_samples;
150
151 pdf = pf_pdf_gaussian_alloc(mean, cov);
152
153 // Compute the new sample poses
154 for (i = 0; i < set->sample_count; i++)
155 {
156 pf_sample_t *sample = set->samples + i;
157 sample->weight = 1.0 / pf->max_samples;
158 sample->pose = pf_pdf_gaussian_sample(pdf);
159
160 // Add sample to histogram
161 pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
162 }
163
164 pf->w_slow = pf->w_fast = 0.0;
165
166 pf_pdf_gaussian_free(pdf);
167
168 // Re-compute cluster statistics
169 pf_cluster_stats(pf, set);
170
171 return;
172}
173
174
175// Initialize the filter using some model
176void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
177{
178 int i;
179 pf_sample_set_t *set;
180 pf_sample_t *sample;
181
182 set = pf->sets + pf->current_set;
183
184 // Create the kd tree for adaptive sampling
185 pf_kdtree_clear(set->kdtree);
186
187 set->sample_count = pf->max_samples;
188
189 // Compute the new sample poses
190 for (i = 0; i < set->sample_count; i++)
191 {
192 sample = set->samples + i;
193 sample->weight = 1.0 / pf->max_samples;
194 sample->pose = (*init_fn) (init_data);
195
196 // Add sample to histogram
197 pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
198 }
199
200 pf->w_slow = pf->w_fast = 0.0;
201
202 // Re-compute cluster statistics
203 pf_cluster_stats(pf, set);
204
205 return;
206}
207
208
209// Update the filter with some new action
210void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data)
211{
212 pf_sample_set_t *set;
213
214 set = pf->sets + pf->current_set;
215
216 (*action_fn) (action_data, set);
217
218 return;
219}
220
221
222#include <float.h>
223// Update the filter with some new sensor observation
224void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data)
225{
226 int i;
227 pf_sample_set_t *set;
228 pf_sample_t *sample;
229 double total;
230
231 set = pf->sets + pf->current_set;
232
233 // Compute the sample weights
234 total = (*sensor_fn) (sensor_data, set);
235
236 if (total > 0.0)
237 {
238 // Normalize weights
239 double w_avg=0.0;
240 for (i = 0; i < set->sample_count; i++)
241 {
242 sample = set->samples + i;
243 w_avg += sample->weight;
244 sample->weight /= total;
245 }
246 // Update running averages of likelihood of samples (Prob Rob p258)
247 w_avg /= set->sample_count;
248 if(pf->w_slow == 0.0)
249 pf->w_slow = w_avg;
250 else
251 pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow);
252 if(pf->w_fast == 0.0)
253 pf->w_fast = w_avg;
254 else
255 pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast);
256 //printf("w_avg: %e slow: %e fast: %e\n",
257 //w_avg, pf->w_slow, pf->w_fast);
258 }
259 else
260 {
261 //PLAYER_WARN("pdf has zero probability");
262
263 // Handle zero total
264 for (i = 0; i < set->sample_count; i++)
265 {
266 sample = set->samples + i;
267 sample->weight = 1.0 / set->sample_count;
268 }
269 }
270
271 return;
272}
273
274
275// Resample the distribution
276void pf_update_resample(pf_t *pf)
277{
278 int i;
279 double total;
280 pf_sample_set_t *set_a, *set_b;
281 pf_sample_t *sample_a, *sample_b;
282
283 //double r,c,U;
284 //int m;
285 //double count_inv;
286 double* c;
287
288 double w_diff;
289
290 set_a = pf->sets + pf->current_set;
291 set_b = pf->sets + (pf->current_set + 1) % 2;
292
293 // Build up cumulative probability table for resampling.
294 // TODO: Replace this with a more efficient procedure
295 // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
296 c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
297 c[0] = 0.0;
298 for(i=0;i<set_a->sample_count;i++)
299 c[i+1] = c[i]+set_a->samples[i].weight;
300
301 // Create the kd tree for adaptive sampling
302 pf_kdtree_clear(set_b->kdtree);
303
304 // Draw samples from set a to create set b.
305 total = 0;
306 set_b->sample_count = 0;
307
308 w_diff = 1.0 - pf->w_fast / pf->w_slow;
309 if(w_diff < 0.0)
310 w_diff = 0.0;
311 //printf("w_diff: %9.6f\n", w_diff);
312
313 // Can't (easily) combine low-variance sampler with KLD adaptive
314 // sampling, so we'll take the more traditional route.
315 /*
316 // Low-variance resampler, taken from Probabilistic Robotics, p110
317 count_inv = 1.0/set_a->sample_count;
318 r = drand48() * count_inv;
319 c = set_a->samples[0].weight;
320 i = 0;
321 m = 0;
322 */
323 while(set_b->sample_count < pf->max_samples)
324 {
325 sample_b = set_b->samples + set_b->sample_count++;
326
327 if(drand48() < w_diff)
328 sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
329 else
330 {
331 // Can't (easily) combine low-variance sampler with KLD adaptive
332 // sampling, so we'll take the more traditional route.
333 /*
334 // Low-variance resampler, taken from Probabilistic Robotics, p110
335 U = r + m * count_inv;
336 while(U>c)
337 {
338 i++;
339 // Handle wrap-around by resetting counters and picking a new random
340 // number
341 if(i >= set_a->sample_count)
342 {
343 r = drand48() * count_inv;
344 c = set_a->samples[0].weight;
345 i = 0;
346 m = 0;
347 U = r + m * count_inv;
348 continue;
349 }
350 c += set_a->samples[i].weight;
351 }
352 m++;
353 */
354
355 // Naive discrete event sampler
356 double r;
357 r = drand48();
358 for(i=0;i<set_a->sample_count;i++)
359 {
360 if((c[i] <= r) && (r < c[i+1]))
361 break;
362 }
363 assert(i<set_a->sample_count);
364
365 sample_a = set_a->samples + i;
366
367 assert(sample_a->weight > 0);
368
369 // Add sample to list
370 sample_b->pose = sample_a->pose;
371 }
372
373 sample_b->weight = 1.0;
374 total += sample_b->weight;
375
376 // Add sample to histogram
377 pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
378
379 // See if we have enough samples yet
380 if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
381 break;
382 }
383
384 // Reset averages, to avoid spiraling off into complete randomness.
385 if(w_diff > 0.0)
386 pf->w_slow = pf->w_fast = 0.0;
387
388 //fprintf(stderr, "\n\n");
389
390 // Normalize weights
391 for (i = 0; i < set_b->sample_count; i++)
392 {
393 sample_b = set_b->samples + i;
394 sample_b->weight /= total;
395 }
396
397 // Re-compute cluster statistics
398 pf_cluster_stats(pf, set_b);
399
400 // Use the newly created sample set
401 pf->current_set = (pf->current_set + 1) % 2;
402
403 free(c);
404 return;
405}
406
407
408// Compute the required number of samples, given that there are k bins
409// with samples in them. This is taken directly from Fox et al.
410int pf_resample_limit(pf_t *pf, int k)
411{
412 double a, b, c, x;
413 int n;
414
415 if (k <= 1)
416 return pf->max_samples;
417
418 a = 1;
419 b = 2 / (9 * ((double) k - 1));
420 c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z;
421 x = a - b + c;
422
423 n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x);
424
425 if (n < pf->min_samples)
426 return pf->min_samples;
427 if (n > pf->max_samples)
428 return pf->max_samples;
429
430 return n;
431}
432
433
434// Re-compute the cluster statistics for a sample set
435void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set)
436{
437 int i, j, k;
438 pf_cluster_t *cluster;
439
440 // Workspace
441 double m[4], c[2][2];
442 size_t count;
443 double weight;
444
445 // Cluster the samples
446 pf_kdtree_cluster(set->kdtree);
447
448 // Initialize cluster stats
449 set->cluster_count = 0;
450
451 for (i = 0; i < set->cluster_max_count; i++)
452 {
453 cluster = set->clusters + i;
454 cluster->count = 0;
455 cluster->weight = 0;
456 cluster->mean = pf_vector_zero();
457 cluster->cov = pf_matrix_zero();
458
459 for (j = 0; j < 4; j++)
460 cluster->m[j] = 0.0;
461 for (j = 0; j < 2; j++)
462 for (k = 0; k < 2; k++)
463 cluster->c[j][k] = 0.0;
464 }
465
466 // Initialize overall filter stats
467 count = 0;
468 weight = 0.0;
469 set->mean = pf_vector_zero();
470 set->cov = pf_matrix_zero();
471 for (j = 0; j < 4; j++)
472 m[j] = 0.0;
473 for (j = 0; j < 2; j++)
474 for (k = 0; k < 2; k++)
475 c[j][k] = 0.0;
476
477 // Compute cluster stats
478 for (i = 0; i < set->sample_count; i++)
479 {
480 pf_sample_t *sample = set->samples + i;
481
482 //printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
483
484 // Get the cluster label for this sample
485 int cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
486 assert(cidx >= 0);
487 if (cidx >= set->cluster_max_count)
488 continue;
489 if (cidx + 1 > set->cluster_count)
490 set->cluster_count = cidx + 1;
491
492 cluster = set->clusters + cidx;
493
494 cluster->count += 1;
495 cluster->weight += sample->weight;
496
497 count += 1;
498 weight += sample->weight;
499
500 // Compute mean
501 cluster->m[0] += sample->weight * sample->pose.v[0];
502 cluster->m[1] += sample->weight * sample->pose.v[1];
503 cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
504 cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
505
506 m[0] += sample->weight * sample->pose.v[0];
507 m[1] += sample->weight * sample->pose.v[1];
508 m[2] += sample->weight * cos(sample->pose.v[2]);
509 m[3] += sample->weight * sin(sample->pose.v[2]);
510
511 // Compute covariance in linear components
512 for (j = 0; j < 2; j++)
513 for (k = 0; k < 2; k++)
514 {
515 cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
516 c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
517 }
518 }
519
520 // Normalize
521 for (i = 0; i < set->cluster_count; i++)
522 {
523 cluster = set->clusters + i;
524
525 cluster->mean.v[0] = cluster->m[0] / cluster->weight;
526 cluster->mean.v[1] = cluster->m[1] / cluster->weight;
527 cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
528
529 cluster->cov = pf_matrix_zero();
530
531 // Covariance in linear components
532 for (j = 0; j < 2; j++)
533 for (k = 0; k < 2; k++)
534 cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
535 cluster->mean.v[j] * cluster->mean.v[k];
536
537 // Covariance in angular components; I think this is the correct
538 // formula for circular statistics.
539 cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] +
540 cluster->m[3] * cluster->m[3]));
541
542 //printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
543 //cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
544 //pf_matrix_fprintf(cluster->cov, stdout, "%e");
545 }
546
547 // Compute overall filter stats
548 set->mean.v[0] = m[0] / weight;
549 set->mean.v[1] = m[1] / weight;
550 set->mean.v[2] = atan2(m[3], m[2]);
551
552 // Covariance in linear components
553 for (j = 0; j < 2; j++)
554 for (k = 0; k < 2; k++)
555 set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
556
557 // Covariance in angular components; I think this is the correct
558 // formula for circular statistics.
559 set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
560
561 return;
562}
563
564
565// Compute the CEP statistics (mean and variance).
566void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var)
567{
568 int i;
569 double mn, mx, my, mrr;
570 pf_sample_set_t *set;
571
572 set = pf->sets + pf->current_set;
573
574 mn = 0.0;
575 mx = 0.0;
576 my = 0.0;
577 mrr = 0.0;
578
579 for (i = 0; i < set->sample_count; i++)
580 {
581 pf_sample_t *sample = set->samples + i;
582
583 mn += sample->weight;
584 mx += sample->weight * sample->pose.v[0];
585 my += sample->weight * sample->pose.v[1];
586 mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0];
587 mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1];
588 }
589
590 mean->v[0] = mx / mn;
591 mean->v[1] = my / mn;
592 mean->v[2] = 0.0;
593
594 *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn));
595
596 return;
597}
598
599
600// Get the statistics for a particular cluster.
601int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight,
602 pf_vector_t *mean, pf_matrix_t *cov)
603{
604 pf_sample_set_t *set;
605 pf_cluster_t *cluster;
606
607 set = pf->sets + pf->current_set;
608
609 if (clabel >= set->cluster_count)
610 return 0;
611 cluster = set->clusters + clabel;
612
613 *weight = cluster->weight;
614 *mean = cluster->mean;
615 *cov = cluster->cov;
616
617 return 1;
618}
619
620/// @endcond