24 #include <boost/serialization/export.hpp> 25 #include <boost/archive/text_iarchive.hpp> 26 #include <boost/archive/text_oarchive.hpp> 27 #include <boost/archive/xml_iarchive.hpp> 28 #include <boost/archive/xml_oarchive.hpp> 35 #include <eigen3/Eigen/LU> 36 #include <eigen3/Eigen/Cholesky> 47 using namespace Eigen;
57 if (model_parameters!=NULL)
65 FunctionApproximatorGMR::FunctionApproximatorGMR(
const ModelParametersGMR *
const model_parameters)
76 void FunctionApproximatorGMR::preallocateMatrices(
int n_gaussians,
int n_input_dims,
int n_output_dims)
78 probabilities_prealloc_ = VectorXd::Zero(n_gaussians);
79 probabilities_dot_prealloc_ = VectorXd::Zero(n_gaussians);
80 diff_prealloc_ = VectorXd::Zero(n_input_dims);
81 covar_times_diff_prealloc_ = VectorXd::Zero(n_input_dims);
82 mean_output_prealloc_ = VectorXd::Zero(n_output_dims);
84 covar_input_times_output_ = MatrixXd::Zero(n_input_dims,n_output_dims);
85 covar_output_times_input_ = MatrixXd::Zero(n_output_dims,n_input_dims);
86 covar_output_prealloc_ = MatrixXd::Zero(n_output_dims,n_output_dims);
88 empty_prealloc_ = MatrixXd::Zero(0,0);
106 cerr <<
"WARNING: You may not call FunctionApproximatorGMR::train more than once. Doing nothing." << endl;
107 cerr <<
" (if you really want to retrain, call reTrain function instead)" << endl;
111 assert(inputs.rows() == targets.rows());
121 if(meta_parameters_GMR!=NULL)
122 n_gaussians = meta_parameters_GMR->number_of_gaussians_;
123 else if(model_parameters_GMR!=NULL)
124 n_gaussians = model_parameters_GMR->priors_.size();
126 cerr <<
"FunctionApproximatorGMR::train Something wrong happened, both ModelParameters and MetaParameters are not initialized." << endl;
128 int n_dims_in = inputs.cols();
129 int n_dims_out = targets.cols();
130 int n_dims_gmm = n_dims_in + n_dims_out;
133 std::vector<VectorXd> means(n_gaussians);
134 std::vector<MatrixXd> covars(n_gaussians);
135 std::vector<double> priors(n_gaussians);
136 int n_observations = 0;
138 for (
int i = 0; i < n_gaussians; i++)
140 means[i] = VectorXd(n_dims_gmm);
142 covars[i] = MatrixXd(n_dims_gmm, n_dims_gmm);
146 MatrixXd data = MatrixXd(inputs.rows(), n_dims_gmm);
147 data << inputs, targets;
148 n_observations = data.rows();
151 if (inputs.cols() == 1)
160 std::vector<Eigen::VectorXd> means_x(n_gaussians);
161 std::vector<Eigen::VectorXd> means_y(n_gaussians);
162 std::vector<Eigen::MatrixXd> covars_x(n_gaussians);
163 std::vector<Eigen::MatrixXd> covars_y(n_gaussians);
164 std::vector<Eigen::MatrixXd> covars_y_x(n_gaussians);
165 for (
int i_gau = 0; i_gau < n_gaussians; i_gau++)
167 means_x[i_gau] = means[i_gau].segment(0, n_dims_in);
168 means_y[i_gau] = means[i_gau].segment(n_dims_in, n_dims_out);
170 covars_x[i_gau] = covars[i_gau].block(0, 0, n_dims_in, n_dims_in);
171 covars_y[i_gau] = covars[i_gau].block(n_dims_in, n_dims_in, n_dims_out, n_dims_out);
172 covars_y_x[i_gau] = covars[i_gau].block(n_dims_in, 0, n_dims_out, n_dims_in);
178 preallocateMatrices(n_gaussians,n_dims_in,n_dims_out);
216 train(inputs,targets);
223 int n_gaussians = model_parameters_GMR->priors_.size();
224 int n_dims_in = inputs.cols();
225 int n_dims_out = targets.cols();
226 int n_dims_gmm = n_dims_in + n_dims_out;
229 std::vector<VectorXd> means(n_gaussians);
230 std::vector<MatrixXd> covars(n_gaussians);
231 std::vector<double> priors(n_gaussians);
232 int n_observations = 0;
233 for (
int i = 0; i < n_gaussians; i++)
235 means[i] = VectorXd(n_dims_gmm);
237 covars[i] = MatrixXd(n_dims_gmm, n_dims_gmm);
241 for (
int i = 0; i < n_gaussians; i++)
243 means[i].segment(0, n_dims_in) = model_parameters_GMR->means_x_[i];
244 means[i].segment(n_dims_in, n_dims_out) = model_parameters_GMR->means_y_[i];
246 covars[i].block(0, 0, n_dims_in, n_dims_in) = model_parameters_GMR->covars_x_[i];
247 covars[i].block(n_dims_in, n_dims_in, n_dims_out, n_dims_out) = model_parameters_GMR->covars_y_[i];
248 covars[i].block(n_dims_in, 0, n_dims_out, n_dims_in) = model_parameters_GMR->covars_y_x_[i];
250 priors[i] = model_parameters_GMR->priors_[i];
252 n_observations = model_parameters_GMR->n_observations_;
255 MatrixXd data = MatrixXd(inputs.rows(), n_dims_gmm);
256 data << inputs, targets;
262 std::vector<Eigen::VectorXd> means_x(n_gaussians);
263 std::vector<Eigen::VectorXd> means_y(n_gaussians);
264 std::vector<Eigen::MatrixXd> covars_x(n_gaussians);
265 std::vector<Eigen::MatrixXd> covars_y(n_gaussians);
266 std::vector<Eigen::MatrixXd> covars_y_x(n_gaussians);
267 for (
int i_gau = 0; i_gau < n_gaussians; i_gau++)
269 means_x[i_gau] = means[i_gau].segment(0, n_dims_in);
270 means_y[i_gau] = means[i_gau].segment(n_dims_in, n_dims_out);
272 covars_x[i_gau] = covars[i_gau].block(0, 0, n_dims_in, n_dims_in);
273 covars_y[i_gau] = covars[i_gau].block(n_dims_in, n_dims_in, n_dims_out, n_dims_out);
274 covars_y_x[i_gau] = covars[i_gau].block(n_dims_in, 0, n_dims_out, n_dims_in);
280 preallocateMatrices(n_gaussians,n_dims_in,n_dims_out);
285 MatrixXd covar_inverse = covar.inverse();
286 double output = exp(-0.5*(input-mu).transpose()*covar_inverse*(input-mu));
290 output *= pow(pow(2*M_PI,mu.size())/covar_inverse.determinant(),-0.5);
299 if(covar.determinant() > 0)
301 MatrixXd covar_inverse = covar.inverse();
303 double output = exp(-0.5*(input-mu).transpose()*covar_inverse*(input-mu));
312 output *= pow(pow(2*M_PI,mu.size())/(covar_inverse.determinant()),-0.5);
318 return std::numeric_limits<double>::min();
325 ENTERING_REAL_TIME_CRITICAL_CODE
327 predict(inputs,outputs,empty_prealloc_);
328 EXITING_REAL_TIME_CRITICAL_CODE
333 ENTERING_REAL_TIME_CRITICAL_CODE
335 predict(inputs,empty_prealloc_,variances);
336 EXITING_REAL_TIME_CRITICAL_CODE
341 ENTERING_REAL_TIME_CRITICAL_CODE
353 cerr <<
"WARNING: You may not call FunctionApproximatorGMR::predict if you have not trained yet. Doing nothing." << endl;
365 bool compute_means =
false;
366 if (outputs.rows()>0)
370 compute_means =
true;
374 bool compute_variance =
false;
375 if (variances.rows()>0)
377 compute_variance =
true;
383 for (
int i_input=0; i_input<inputs.rows(); i_input++)
405 diff_prealloc_ = inputs.row(i_input).transpose() - gmm->means_x_[i_gau];
407 covar_times_diff_prealloc_.noalias() = gmm->covars_x_inv_[i_gau]*diff_prealloc_;
409 probabilities_prealloc_[i_gau] = exp(-0.5*diff_prealloc_.dot(covar_times_diff_prealloc_));
413 probabilities_prealloc_[i_gau] *= gmm->mvgd_scale_[i_gau];
416 probabilities_prealloc_[i_gau] *= gmm->priors_[i_gau];
421 probabilities_prealloc_ /= probabilities_prealloc_.sum();
433 diff_prealloc_ = inputs.row(i_input).transpose() - gmm->means_x_[i_gau];
435 covar_times_diff_prealloc_.noalias() = gmm->covars_x_inv_[i_gau]*diff_prealloc_;
437 mean_output_prealloc_.noalias() = gmm->covars_y_x_[i_gau]*covar_times_diff_prealloc_;
439 mean_output_prealloc_ += gmm->means_y_[i_gau];
443 outputs.row(i_input) += probabilities_prealloc_[i_gau] * mean_output_prealloc_;
447 if (compute_variance)
454 covar_input_times_output_.noalias() = gmm->covars_x_inv_[i_gau]*gmm->covars_y_x_[i_gau].transpose();
456 covar_output_prealloc_.noalias() = gmm->covars_y_x_[i_gau] * covar_input_times_output_;
459 covar_output_prealloc_.noalias() = - covar_output_prealloc_;
461 covar_output_prealloc_ += gmm->covars_y_[i_gau];
463 variances.row(i_input) += probabilities_prealloc_[i_gau]*probabilities_prealloc_[i_gau] * ( covar_output_prealloc_ ).diagonal();
468 if (variances(i_input,i_output_dim)<0.0)
469 variances(i_input,i_output_dim) = 0.0;
476 EXITING_REAL_TIME_CRITICAL_CODE
481 ENTERING_REAL_TIME_CRITICAL_CODE
483 predictDot(inputs,outputs,outputs_dot,empty_prealloc_);
484 EXITING_REAL_TIME_CRITICAL_CODE
489 ENTERING_REAL_TIME_CRITICAL_CODE
501 cerr <<
"WARNING: You may not call FunctionApproximatorGMR::predict if you have not trained yet. Doing nothing." << endl;
508 assert(inputs.cols() == 1);
515 bool compute_means =
false;
516 if (outputs.rows()>0)
522 compute_means =
true;
526 bool compute_variance =
false;
527 if (variances.rows()>0)
529 compute_variance =
true;
535 for (
int i_input=0; i_input<inputs.rows(); i_input++)
557 diff_prealloc_ = inputs.row(i_input).transpose() - gmm->means_x_[i_gau];
559 covar_times_diff_prealloc_.noalias() = gmm->covars_x_inv_[i_gau]*diff_prealloc_;
561 probabilities_prealloc_[i_gau] = exp(-0.5*diff_prealloc_.dot(covar_times_diff_prealloc_));
565 probabilities_prealloc_[i_gau] *= gmm->mvgd_scale_[i_gau];
568 probabilities_dot_prealloc_[i_gau] *= - probabilities_prealloc_[i_gau] * covar_times_diff_prealloc_(0,0);
571 probabilities_prealloc_[i_gau] *= gmm->priors_[i_gau];
574 probabilities_dot_prealloc_[i_gau] *= gmm->priors_[i_gau];
579 probabilities_prealloc_sum_ = probabilities_prealloc_.sum();
580 probabilities_prealloc_ /= probabilities_prealloc_sum_;
583 probabilities_dot_prealloc_sum_ = probabilities_dot_prealloc_.sum();
584 probabilities_dot_prealloc_ = (probabilities_dot_prealloc_ * probabilities_prealloc_sum_ - probabilities_prealloc_ * probabilities_dot_prealloc_sum_)/pow(probabilities_prealloc_sum_,2);
594 diff_prealloc_ = inputs.row(i_input).transpose() - gmm->means_x_[i_gau];
596 covar_times_diff_prealloc_.noalias() = gmm->covars_x_inv_[i_gau]*diff_prealloc_;
598 mean_output_prealloc_.noalias() = gmm->covars_y_x_[i_gau]*covar_times_diff_prealloc_;
600 mean_output_prealloc_ += gmm->means_y_[i_gau];
604 outputs.row(i_input) += probabilities_prealloc_[i_gau] * mean_output_prealloc_;
608 covar_output_times_input_.noalias() = gmm->covars_y_x_[i_gau] * gmm->covars_x_inv_[i_gau];
610 outputs_dot.row(i_input) += probabilities_dot_prealloc_[i_gau] * mean_output_prealloc_ + probabilities_prealloc_[i_gau] * covar_output_times_input_;
615 if (compute_variance)
622 covar_input_times_output_.noalias() = gmm->covars_x_inv_[i_gau]*gmm->covars_y_x_[i_gau].transpose();
624 covar_output_prealloc_.noalias() = gmm->covars_y_x_[i_gau] * covar_input_times_output_;
627 covar_output_prealloc_.noalias() = - covar_output_prealloc_;
629 covar_output_prealloc_ += gmm->covars_y_[i_gau];
631 variances.row(i_input) += probabilities_prealloc_[i_gau]*probabilities_prealloc_[i_gau] * ( covar_output_prealloc_ ).diagonal();
636 if (variances(i_input,i_output_dim)<0.0)
637 variances(i_input,i_output_dim) = 0.0;
644 EXITING_REAL_TIME_CRITICAL_CODE
648 std::vector<MatrixXd>& covars)
651 VectorXd first_dim = data.col(0);
653 VectorXi assign(data.rows());
656 double min_val = first_dim.minCoeff();
657 double max_val = first_dim.maxCoeff();
659 for (
int i_first_dim = 0; i_first_dim < first_dim.size(); i_first_dim++)
661 unsigned int center = int((first_dim[i_first_dim]-min_val)/(max_val-min_val)*centers.size());
662 if (center==centers.size())
664 assign[i_first_dim] = center;
668 VectorXi nbPoints = VectorXi::Zero(centers.size());
669 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
670 centers[i_gau].setZero();
671 for (
int iData = 0; iData < data.rows(); iData++)
673 centers[assign[iData]] += data.row(iData).transpose();
674 nbPoints[assign[iData]]++;
676 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
677 centers[i_gau] /= nbPoints[i_gau];
680 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
681 covars[i_gau].setZero();
682 for (
int iData = 0; iData < data.rows(); iData++)
683 covars[assign[iData]] += (data.row(iData).transpose() - centers[assign[iData]]) * (data.row(iData).transpose() - centers[assign[iData]]).transpose();
684 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
685 covars[i_gau] /= nbPoints[i_gau];
688 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
689 covars[i_gau] += MatrixXd::Identity(covars[i_gau].rows(), covars[i_gau].cols()) * 1e-5;
692 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
693 priors[i_gau] = 1. / centers.size();
697 std::vector<MatrixXd>& covars,
int n_max_iter)
700 MatrixXd dataCentered = data.rowwise() - data.colwise().mean();
701 MatrixXd dataCov = dataCentered.transpose() * dataCentered / data.rows();
702 MatrixXd dataCovInverse = dataCov.inverse();
704 std::vector<int> dataIndex;
705 for (
int i = 0; i < data.rows(); i++)
706 dataIndex.push_back(i);
707 std::random_shuffle (dataIndex.begin(), dataIndex.end());
709 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
710 centers[i_gau] = data.row(dataIndex[i_gau]);
712 VectorXi assign(data.rows());
715 bool converged =
false;
716 for (
int iIter = 0; iIter < n_max_iter && !converged; iIter++)
722 for (
int iData = 0; iData < data.rows(); iData++)
724 VectorXd v = (centers[assign[iData]] - data.row(iData).transpose());
726 double minDist = v.transpose() * dataCovInverse * v;
728 for (
int i_gau = 0; i_gau < (int)centers.size(); i_gau++)
730 if (i_gau == assign[iData])
733 v = (centers[i_gau] - data.row(iData).transpose());
734 double dist = v.transpose() * dataCovInverse * v;
739 assign[iData] = i_gau;
745 VectorXi nbPoints = VectorXi::Zero(centers.size());
746 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
747 centers[i_gau].setZero();
748 for (
int iData = 0; iData < data.rows(); iData++)
750 centers[assign[iData]] += data.row(iData).transpose();
751 nbPoints[assign[iData]]++;
753 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
754 centers[i_gau] /= nbPoints[i_gau];
758 VectorXi nbPoints = VectorXi::Zero(centers.size());
759 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
760 covars[i_gau].setZero();
761 for (
int iData = 0; iData < data.rows(); iData++)
763 covars[assign[iData]] += (data.row(iData).transpose() - centers[assign[iData]]) * (data.row(iData).transpose() - centers[assign[iData]]).transpose();
764 nbPoints[assign[iData]]++;
766 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
767 covars[i_gau] /= nbPoints[i_gau];
770 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
771 covars[i_gau] += MatrixXd::Identity(covars[i_gau].rows(), covars[i_gau].cols()) * 1e-5f;
774 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
775 priors[i_gau] = 1. / centers.size();
779 std::vector<MatrixXd>& covars,
int n_max_iter)
781 MatrixXd assign(centers.size(), data.rows());
784 std::vector<double> E(centers.size());
786 double oldLoglik = -1e10f;
789 for (
int iIter = 0; iIter < n_max_iter; iIter++)
796 for (
int iData = 0; iData < data.rows(); iData++)
797 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
802 double sum_tmp = 0.0;
803 for (
int iData = 0; iData < data.rows(); iData++)
805 sum_tmp = assign.col(iData).sum();
806 loglik += log(sum_tmp);
808 loglik /= data.rows();
810 if (fabs(loglik / oldLoglik - 1) < 1e-8f)
813 for (
int iData = 0; iData < data.rows(); iData++)
814 assign.col(iData) /= assign.col(iData).sum();
816 if (fabs(loglik / oldLoglik - 1) < 1e-8f)
820 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
822 centers[i_gau].setZero();
823 covars[i_gau].setZero();
825 E[i_gau] = assign.row(i_gau).sum();
828 for (
int iData = 0; iData < data.rows(); iData++)
830 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
832 centers[i_gau] += assign(i_gau, iData) * data.row(iData).transpose();
833 priors[i_gau] += assign(i_gau, iData);
837 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
839 centers[i_gau] /= assign.row(i_gau).sum();
840 priors[i_gau] /= assign.cols();
843 for (
int iData = 0; iData < data.rows(); iData++)
844 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
845 covars[i_gau] += assign(i_gau, iData) * (data.row(iData).transpose() - centers[i_gau]) * (data.row(iData).transpose() - centers[i_gau]).transpose();
847 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
848 covars[i_gau] /= assign.row(i_gau).sum();
851 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
852 covars[i_gau] += MatrixXd::Identity(covars[i_gau].rows(), covars[i_gau].cols()) * 1e-5f;
884 std::vector<MatrixXd>& covars,
int& n_observations,
int n_max_iter)
887 std::vector<VectorXd> centers_prev = centers;
888 std::vector<double> priors_prev = priors;
889 std::vector<MatrixXd> covars_prev = covars;
890 int n_observations_prev = n_observations;
891 std::vector<double> E_prev;
892 std::vector<double> E(centers.size());
893 n_observations = data.rows();
895 double oldLoglik = -1e10f;
898 MatrixXd assign(centers.size(), data.rows());
902 E_prev.resize(centers.size());
903 for (
size_t i_gau = 0; i_gau<centers.size(); i_gau++)
904 E_prev[i_gau] = priors_prev[i_gau] * n_observations_prev;
906 for (
int iIter = 0; iIter < n_max_iter; iIter++)
913 for (
int iData = 0; iData < data.rows(); iData++)
914 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
919 double sum_tmp = 0.0;
920 for (
int iData = 0; iData < data.rows(); iData++)
922 sum_tmp = assign.col(iData).sum();
923 loglik += log(sum_tmp);
925 loglik /= data.rows();
927 for (
int iData = 0; iData < data.rows(); iData++)
928 assign.col(iData) /= assign.col(iData).sum();
930 if (fabs(loglik / oldLoglik - 1) < 1e-8f)
934 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
936 centers[i_gau].setZero();
937 covars[i_gau].setZero();
939 E[i_gau] = assign.row(i_gau).sum();
943 for (
int iData = 0; iData < data.rows(); iData++)
945 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
947 centers[i_gau] += assign(i_gau, iData) * data.row(iData).transpose();
951 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
953 centers[i_gau] = (E_prev[i_gau] * centers_prev[i_gau] + centers[i_gau])/(E[i_gau] + E_prev[i_gau]);
954 priors[i_gau] = (E[i_gau] + E_prev[i_gau])/(n_observations + n_observations_prev);
957 for (
int iData = 0; iData < data.rows(); iData++)
958 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
959 covars[i_gau] += assign(i_gau, iData) * (data.row(iData).transpose() - centers[i_gau]) * (data.row(iData).transpose() - centers[i_gau]).transpose();
961 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
962 covars[i_gau] = covars[i_gau] + E_prev[i_gau] * (covars_prev[i_gau] + (centers_prev[i_gau] - centers[i_gau]) * (centers_prev[i_gau] - centers[i_gau]).transpose());
964 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
965 covars[i_gau] /= (E[i_gau] + E_prev[i_gau]);
968 for (
size_t i_gau = 0; i_gau < centers.size(); i_gau++)
969 covars[i_gau] += MatrixXd::Identity(covars[i_gau].rows(), covars[i_gau].cols()) * 1e-5f;
973 n_observations += n_observations_prev;
976 template<
class Archive>
977 void FunctionApproximatorGMR::serialize(Archive & ar,
const unsigned int version)
FunctionApproximatorGMR class header file.
void train(const Eigen::Ref< const Eigen::MatrixXd > &inputs, const Eigen::Ref< const Eigen::MatrixXd > &targets)
Train the function approximator with corresponding input and target examples.
bool isTrained(void) const
Determine whether the function approximator has already been trained with data or not...
BOOST_CLASS_EXPORT_IMPLEMENT(DmpBbo::FunctionApproximatorGMR)
For boost::serialization.
void kMeansInit(const Eigen::MatrixXd &data, std::vector< Eigen::VectorXd > &means, std::vector< double > &priors, std::vector< Eigen::MatrixXd > &covars, int n_max_iter=1000)
Initialize Gaussian for EM algorithm using k-means.
static double normalPDF(const Eigen::VectorXd &mu, const Eigen::MatrixXd &covar, const Eigen::VectorXd &input)
The probability density function (PDF) of the multi-variate normal distribution.
GMR (Gaussian Mixture Regression) function approximator.
virtual int getExpectedOutputDim(void) const
The expected dimensionality of the output data.
int getExpectedOutputDim(void) const
The expected dimensionality of the output data.
Base class for all function approximators.
void firstDimSlicingInit(const Eigen::MatrixXd &data, std::vector< Eigen::VectorXd > &means, std::vector< double > &priors, std::vector< Eigen::MatrixXd > &covars)
Initialize Gaussian for EM algorithm using a same-size slicing on the first dimension (method used in...
Model parameters for the GMR function approximator.
ModelParametersGMR class header file.
void predict(const Eigen::Ref< const Eigen::MatrixXd > &inputs, Eigen::MatrixXd &output)
Query the function approximator to make a prediction.
void expectationMaximization(const Eigen::MatrixXd &data, std::vector< Eigen::VectorXd > &means, std::vector< double > &priors, std::vector< Eigen::MatrixXd > &covars, int n_max_iter=50)
EM algorithm.
void predictVariance(const Eigen::Ref< const Eigen::MatrixXd > &inputs, Eigen::MatrixXd &variances)
Query the function approximator to get the variance of a prediction This function is not implemented ...
virtual int getExpectedInputDim(void) const
The expected dimensionality of the input data.
const MetaParameters * getMetaParameters(void) const
Accessor for FunctionApproximator::meta_parameters_.
static double normalPDFDamped(const Eigen::VectorXd &mu, const Eigen::MatrixXd &covar, const Eigen::VectorXd &input)
The damped probability density function (PDF) of the multi-variate normal distribution.
const ModelParameters * getModelParameters(void) const
Accessor for FunctionApproximator::model_parameters_.
virtual FunctionApproximator * clone(void) const
Return a pointer to a deep copy of the FunctionApproximator object.
void predictDot(const Eigen::Ref< const Eigen::MatrixXd > &inputs, Eigen::MatrixXd &outputs, Eigen::MatrixXd &outputs_dot)
Query the function approximator to make a prediction and to compute the derivate of that prediction...
unsigned int getNumberOfGaussians(void) const
Get the number of Gaussians in the GMM.
void setModelParameters(ModelParameters *model_parameters)
Accessor for FunctionApproximator::model_parameters_.
int getExpectedInputDim(void) const
The expected dimensionality of the input data.
void trainIncremental(const Eigen::Ref< const Eigen::MatrixXd > &inputs, const Eigen::Ref< const Eigen::MatrixXd > &targets)
Train the function approximator incrementally with corresponding input and target examples...
void expectationMaximizationIncremental(const Eigen::MatrixXd &data, std::vector< Eigen::VectorXd > &means, std::vector< double > &priors, std::vector< Eigen::MatrixXd > &covars, int &n_observations, int n_max_iter=50)
EM algorithm Incremental.
Header file for serialization of Eigen matrices.