DMP_BBO library
BasisFunction.cpp
Go to the documentation of this file.
1 
25 #include "BasisFunction.hpp"
26 
27 #include <iostream>
28 
29 #include <eigen3/Eigen/SVD>
30 #include <eigen3/Eigen/LU>
31 
32 using namespace Eigen;
33 using namespace std;
34 
35 namespace DmpBbo {
36 
37 namespace BasisFunction {
38 
39 void Gaussian::activations(
40  const std::vector<Eigen::VectorXd>& mus,
41  const std::vector<Eigen::MatrixXd>& covars,
42  std::vector<double> priors,
43  const Eigen::Ref<const Eigen::MatrixXd>& inputs,
44  Eigen::MatrixXd& kernel_activations,
45  bool normalized_basis_functions)
46 {
47 
48  unsigned int n_basis_functions = mus.size();
49  int n_samples = inputs.rows();
50 
51  assert(n_basis_functions>0);
52  assert(n_basis_functions==covars.size());
53 #ifndef NDEBUG // Variables below are only required for asserts; check for NDEBUG to avoid warnings.
54  int n_dims = mus[0].size();
55 #endif
56  assert(n_dims==covars[0].cols());
57  assert(n_dims==covars[0].rows());
58  assert(n_dims==inputs.cols());
59 
60  kernel_activations.resize(n_samples,n_basis_functions);
61 
62  if (normalized_basis_functions && n_basis_functions==1)
63  {
64  // Locally Weighted Regression with only one basis function is pretty odd.
65  // Essentially, you are taking the "Locally Weighted" part out of the regression, and it becomes
66  // standard least squares
67  // Anyhow, for those that still want to "abuse" LWR as R (i.e. without LW), we explicitly
68  // set the normalized kernels to 1 here, to avoid numerical issues in the normalization below.
69  // (normalizing a Gaussian basis function with itself leads to 1 everywhere).
70  kernel_activations.fill(1.0);
71  return;
72  }
73 
74  VectorXd mu,diff,exp_term;
75  MatrixXd covar_inv;
76  double prior;
77 
78  for (unsigned int bb=0; bb<n_basis_functions; bb++)
79  {
80  mu = mus[bb];
81  covar_inv = covars[bb].inverse();
82  prior = priors[bb];
83  for (int tt=0; tt<n_samples; tt++)
84  {
85  // Here, we compute the values of a (unnormalized) multi-variate Gaussian:
86  // activation = exp(-0.5*(x-mu)*Sigma^-1*(x-mu))
87  diff = inputs.row(tt)-mu.transpose();
88  exp_term = -0.5*diff.transpose()*covar_inv*diff;
89  assert(exp_term.size()==1);
90  kernel_activations(tt,bb) = prior*exp(exp_term[0]);
91  }
92  }
93 
94  if (normalized_basis_functions)
95  {
96  // Compute sum for each row (each value in input_vector)
97  MatrixXd sum_kernel_activations = kernel_activations.rowwise().sum(); // n_samples x 1
98 
99  // Add small number to avoid division by zero. Not full-proof...
100  if ((sum_kernel_activations.array()==0).any())
101  sum_kernel_activations.array() += sum_kernel_activations.maxCoeff()/100000.0;
102 
103  // Normalize for each row (each value in input_vector)
104  kernel_activations = kernel_activations.array()/sum_kernel_activations.replicate(1,n_basis_functions).array();
105  }
106 
107 
108 }
109 
110 void Gaussian::activations(const Eigen::MatrixXd& centers, const Eigen::MatrixXd& widths, const Eigen::Ref<const Eigen::MatrixXd>& inputs, Eigen::MatrixXd& kernel_activations, bool normalized_basis_functions, bool asymmetric_kernels)
111 {
112  ENTERING_REAL_TIME_CRITICAL_CODE
113 
114  // Check and set sizes
115  // centers = n_basis_functions x n_dim
116  // widths = n_basis_functions x n_dim
117  // inputs = n_samples x n_dim
118  // activations = n_samples x n_basis_functions
119  int n_basis_functions = centers.rows();
120  int n_samples = inputs.rows();
121  int n_dims = centers.cols();
122  assert( (n_basis_functions==widths.rows()) & (n_dims==widths.cols()) );
123  assert( (n_samples==inputs.rows() ) & (n_dims==inputs.cols()) );
124 
125  // If kernel_activations is passed with the right size, this
126  // function will not allocate memory.
127  kernel_activations.resize(n_samples,n_basis_functions);
128 
129  if (normalized_basis_functions && n_basis_functions==1)
130  {
131  // Locally Weighted Regression with only one basis function is pretty odd.
132  // Essentially, you are taking the "Locally Weighted" part out of the regression, and it becomes
133  // standard least squares
134  // Anyhow, for those that still want to "abuse" LWR as R (i.e. without LW), we explicitly
135  // set the normalized kernels to 1 here, to avoid numerical issues in the normalization below.
136  // (normalizing a Gaussian basis function with itself leads to 1 everywhere).
137  kernel_activations.fill(1.0);
138  EXITING_REAL_TIME_CRITICAL_CODE
139  return;
140  }
141 
142 
143  double c,w,x;
144  for (int bb=0; bb<n_basis_functions; bb++)
145  {
146 
147  // Here, we compute the values of a (unnormalized) multi-variate Gaussian:
148  // activation = exp(-0.5*(x-mu)*Sigma^-1*(x-mu))
149  // Because Sigma is diagonal in our case, this simplifies to
150  // activation = exp(\sum_d=1^D [-0.5*(x_d-mu_d)^2/Sigma_(d,d)])
151  // = \prod_d=1^D exp(-0.5*(x_d-mu_d)^2/Sigma_(d,d))
152  // This last product is what we compute below incrementally
153 
154  kernel_activations.col(bb).fill(1.0);
155  for (int i_dim=0; i_dim<n_dims; i_dim++)
156  {
157  c = centers(bb,i_dim);
158  for (int i_s=0; i_s<n_samples; i_s++)
159  {
160  x = inputs(i_s,i_dim);
161  w = widths(bb,i_dim);
162 
163  if (asymmetric_kernels && x<c && bb>0)
164  // Get the width of the previous basis function
165  // This is the part that makes it assymetric
166  w = widths(bb-1,i_dim);
167 
168  kernel_activations(i_s,bb) *= exp(-0.5*pow(x-c,2)/(w*w));
169  }
170  }
171  }
172 
173 
174  if (normalized_basis_functions)
175  {
176  // Normalize the basis value; they should sum to 1.0 for each time step.
177  double sum_kernel_activations;
178  for (int i_sample=0; i_sample<n_samples; i_sample++)
179  {
180  sum_kernel_activations = kernel_activations.row(i_sample).sum();
181  for (int i_basis=0; i_basis<n_basis_functions; i_basis++)
182  {
183  if (sum_kernel_activations==0.0)
184  // Apparently, no basis function was active. Set all to same value
185  kernel_activations(i_sample,i_basis) = 1.0/n_basis_functions;
186  else
187  // Standard case, normalize so that they sum to 1.0
188  kernel_activations(i_sample,i_basis) /= sum_kernel_activations;
189 
190  }
191  }
192  }
193 
194  EXITING_REAL_TIME_CRITICAL_CODE
195 }
196 
197 void Cosine::activations(
198  const std::vector<Eigen::MatrixXd>& angular_frequencies,
199  const std::vector<Eigen::VectorXd>& phases,
200  const Eigen::Ref<const Eigen::MatrixXd>& inputs,
201  Eigen::MatrixXd& activations)
202 {
203  unsigned int n_basis_functions = angular_frequencies.size();
204  int n_samples = inputs.rows();
205 
206  assert(n_basis_functions>0);
207  assert(phases.size()==n_basis_functions);
208  assert(phases[0].size()==1);
209  // input_cols is input dim
210  assert(angular_frequencies[0].size()==inputs.cols());
211 
212 
213  activations.resize(n_samples,n_basis_functions);
214 
215  for (unsigned int bb=0; bb<n_basis_functions; bb++)
216  {
217  for (int i_s=0; i_s<n_samples; i_s++)
218  {
219  activations(i_s,bb) = cos(angular_frequencies[bb].row(0).dot(inputs.row(i_s)) + phases[bb][0]);
220  }
221  }
222 
223 }
224 
225 void Cosine::activations(
226  const Eigen::MatrixXd& angular_frequencies,
227  const Eigen::VectorXd& phases,
228  const Eigen::Ref<const Eigen::MatrixXd>& inputs,
229  Eigen::MatrixXd& activations)
230 {
231  // Activations for each basis function are computed with:
232  // activation(bb) = cos(inputs(bb)*freqs(bb).transpose() + phase(bb))
233  activations = inputs * angular_frequencies.transpose();
234  activations.rowwise() += phases.transpose();
235  activations = activations.array().cos();
236 }
237 
238 
239 } // namespace BasisFunction
240 
241 } // namespace DmpBbo
242 
243 
BasisFunction header file.