SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
EPInferenceMethod.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2013 Roman Votyakov
8  *
9  * Based on ideas from GAUSSIAN PROCESS REGRESSION AND CLASSIFICATION Toolbox
10  * Copyright (C) 2005-2013 by Carl Edward Rasmussen & Hannes Nickisch under the
11  * FreeBSD License
12  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
13  */
14 
16 
17 #ifdef HAVE_EIGEN3
18 
24 
26 
27 using namespace shogun;
28 using namespace Eigen;
29 
30 // try to use previously allocated memory for SGVector
31 #define CREATE_SGVECTOR(vec, len, sg_type) \
32  { \
33  if (!vec.vector || vec.vlen!=len) \
34  vec=SGVector<sg_type>(len); \
35  }
36 
37 // try to use previously allocated memory for SGMatrix
38 #define CREATE_SGMATRIX(mat, rows, cols, sg_type) \
39  { \
40  if (!mat.matrix || mat.num_rows!=rows || mat.num_cols!=cols) \
41  mat=SGMatrix<sg_type>(rows, cols); \
42  }
43 
45 {
46  init();
47 }
48 
50  CMeanFunction* mean, CLabels* labels, CLikelihoodModel* model)
51  : CInferenceMethod(kernel, features, mean, labels, model)
52 {
53  init();
54 }
55 
57 {
58 }
59 
60 void CEPInferenceMethod::init()
61 {
62  m_max_sweep=15;
63  m_min_sweep=2;
64  m_tol=1e-4;
65 }
66 
68 {
70  update();
71 
72  return m_nlZ;
73 }
74 
76 {
78  update();
79 
81 }
82 
84 {
86  update();
87 
88  return SGMatrix<float64_t>(m_L);
89 }
90 
92 {
94  update();
95 
96  return SGVector<float64_t>(m_sttau);
97 }
98 
100 {
102  update();
103 
104  return SGVector<float64_t>(m_mu);
105 }
106 
108 {
110  update();
111 
112  return SGMatrix<float64_t>(m_Sigma);
113 }
114 
116 {
117  SG_DEBUG("entering\n");
118 
119  // update kernel and feature matrix
121 
122  // get number of labels (trainig examples)
124 
125  // try to use tilde values from previous call
126  if (m_ttau.vlen==n)
127  {
128  update_chol();
132  }
133 
134  // get mean vector
136 
137  // get and scale diagonal of the kernel matrix
139  ktrtr_diag.scale(CMath::sq(m_scale));
140 
141  // marginal likelihood for ttau = tnu = 0
143  mean, ktrtr_diag, m_labels));
144 
145  // use zero values if we have no better guess or it's better
146  if (m_ttau.vlen!=n || m_nlZ>nlZ0)
147  {
148  CREATE_SGVECTOR(m_ttau, n, float64_t);
149  m_ttau.zero();
150 
151  CREATE_SGVECTOR(m_sttau, n, float64_t);
152  m_sttau.zero();
153 
154  CREATE_SGVECTOR(m_tnu, n, float64_t);
155  m_tnu.zero();
156 
158 
159  // copy data manually, since we don't have appropriate method
160  for (index_t i=0; i<m_ktrtr.num_rows; i++)
161  for (index_t j=0; j<m_ktrtr.num_cols; j++)
162  m_Sigma(i,j)=m_ktrtr(i,j)*CMath::sq(m_scale);
163 
164  CREATE_SGVECTOR(m_mu, n, float64_t);
165  m_mu.zero();
166 
167  // set marginal likelihood
168  m_nlZ=nlZ0;
169  }
170 
171  // create vector of the random permutation
172  SGVector<index_t> v(n);
173  v.range_fill();
174 
175  // cavity tau and nu vectors
176  SGVector<float64_t> tau_n(n);
177  SGVector<float64_t> nu_n(n);
178 
179  // cavity mu and s2 vectors
180  SGVector<float64_t> mu_n(n);
181  SGVector<float64_t> s2_n(n);
182 
183  float64_t nlZ_old=CMath::INFTY;
184  uint32_t sweep=0;
185 
186  while ((CMath::abs(m_nlZ-nlZ_old)>m_tol && sweep<m_max_sweep) ||
187  sweep<m_min_sweep)
188  {
189  nlZ_old=m_nlZ;
190  sweep++;
191 
192  // shuffle random permutation
193  CMath::permute(v);
194 
195  for (index_t j=0; j<n; j++)
196  {
197  index_t i=v[j];
198 
199  // find cavity paramters
200  tau_n[i]=1.0/m_Sigma(i,i)-m_ttau[i];
201  nu_n[i]=m_mu[i]/m_Sigma(i,i)+mean[i]*tau_n[i]-m_tnu[i];
202 
203  // compute cavity mean and variance
204  mu_n[i]=nu_n[i]/tau_n[i];
205  s2_n[i]=1.0/tau_n[i];
206 
207  // get moments
208  float64_t mu=m_model->get_first_moment(mu_n, s2_n, m_labels, i);
209  float64_t s2=m_model->get_second_moment(mu_n, s2_n, m_labels, i);
210 
211  // save old value of ttau
212  float64_t ttau_old=m_ttau[i];
213 
214  // compute ttau and sqrt(ttau)
215  m_ttau[i]=CMath::max(1.0/s2-tau_n[i], 0.0);
216  m_sttau[i]=CMath::sqrt(m_ttau[i]);
217 
218  // compute tnu
219  m_tnu[i]=mu/s2-nu_n[i];
220 
221  // compute difference ds2=ttau_new-ttau_old
222  float64_t ds2=m_ttau[i]-ttau_old;
223 
224  // create eigen representation of Sigma, tnu and mu
225  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows,
226  m_Sigma.num_cols);
227  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
228  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
229 
230  VectorXd eigen_si=eigen_Sigma.col(i);
231 
232  // rank-1 update Sigma
233  eigen_Sigma=eigen_Sigma-ds2/(1.0+ds2*eigen_si(i))*eigen_si*
234  eigen_si.adjoint();
235 
236  // update mu
237  eigen_mu=eigen_Sigma*eigen_tnu;
238  }
239 
240  // update upper triangular factor (L^T) of Cholesky decomposition of
241  // matrix B, approximate posterior covariance and mean, negative
242  // marginal likelihood
243  update_chol();
247  }
248 
249  if (sweep==m_max_sweep && CMath::abs(m_nlZ-nlZ_old)>m_tol)
250  {
251  SG_ERROR("Maximum number (%d) of sweeps reached, but tolerance (%f) was "
252  "not yet reached. You can manually set maximum number of sweeps "
253  "or tolerance to fix this problem.\n", m_max_sweep, m_tol);
254  }
255 
256  // update vector alpha
257  update_alpha();
258 
259  // update matrices to compute derivatives
260  update_deriv();
261 
262  // update hash of the parameters
264 
265  SG_DEBUG("leaving\n");
266 }
267 
269 {
270  // create eigen representations kernel matrix, L^T, sqrt(ttau) and tnu
271  Map<MatrixXd> eigen_K(m_ktrtr.matrix, m_ktrtr.num_rows, m_ktrtr.num_cols);
272  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
273  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
274  Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
275 
276  // create shogun and eigen representation of the alpha vector
278  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
279 
280  // solve LL^T * v = tS^(1/2) * K * tnu
281  VectorXd eigen_v=eigen_L.triangularView<Upper>().adjoint().solve(
282  eigen_sttau.cwiseProduct(eigen_K*CMath::sq(m_scale)*eigen_tnu));
283  eigen_v=eigen_L.triangularView<Upper>().solve(eigen_v);
284 
285  // compute alpha = (I - tS^(1/2) * B^(-1) * tS(1/2) * K) * tnu =
286  // tnu - tS(1/2) * (L^T)^(-1) * L^(-1) * tS^(1/2) * K * tnu =
287  // tnu - tS(1/2) * v
288  eigen_alpha=eigen_tnu-eigen_sttau.cwiseProduct(eigen_v);
289 }
290 
292 {
293  // create eigen representations of kernel matrix and sqrt(ttau)
294  Map<MatrixXd> eigen_K(m_ktrtr.matrix, m_ktrtr.num_rows, m_ktrtr.num_cols);
295  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
296 
297  // create shogun and eigen representation of the upper triangular factor
298  // (L^T) of the Cholesky decomposition of the matrix B
300  Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
301 
302  // compute upper triangular factor L^T of the Cholesky decomposion of the
303  // matrix: B = tS^(1/2) * K * tS^(1/2) + I
304  LLT<MatrixXd> eigen_chol((eigen_sttau*eigen_sttau.adjoint()).cwiseProduct(
305  eigen_K*CMath::sq(m_scale))+
306  MatrixXd::Identity(m_L.num_rows, m_L.num_cols));
307 
308  eigen_L=eigen_chol.matrixU();
309 }
310 
312 {
313  // create eigen representations of kernel matrix, L^T matrix and sqrt(ttau)
314  Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
315  Map<MatrixXd> eigen_K(m_ktrtr.matrix, m_ktrtr.num_rows, m_ktrtr.num_cols);
316  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
317 
318  // create shogun and eigen representation of the approximate covariance
319  // matrix
321  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows, m_Sigma.num_cols);
322 
323  // compute V = L^(-1) * tS^(1/2) * K, using upper triangular factor L^T
324  MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
325  eigen_sttau.asDiagonal()*eigen_K*CMath::sq(m_scale));
326 
327  // compute covariance matrix of the posterior:
328  // Sigma = K - K * tS^(1/2) * (L * L^T)^(-1) * tS^(1/2) * K =
329  // K - (K * tS^(1/2)) * (L^T)^(-1) * L^(-1) * tS^(1/2) * K =
330  // K - (tS^(1/2) * K)^T * (L^(-1))^T * L^(-1) * tS^(1/2) * K = K - V^T * V
331  eigen_Sigma=eigen_K*CMath::sq(m_scale)-eigen_V.adjoint()*eigen_V;
332 }
333 
335 {
336  // create eigen representation of posterior covariance matrix and tnu
337  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows, m_Sigma.num_cols);
338  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
339 
340  // create shogun and eigen representation of the approximate mean vector
341  CREATE_SGVECTOR(m_mu, m_tnu.vlen, float64_t);
342  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
343 
344  // compute mean vector of the approximate posterior: mu = Sigma * tnu
345  eigen_mu=eigen_Sigma*eigen_tnu;
346 }
347 
349 {
350  // create eigen representation of Sigma, L, mu, tnu, ttau
351  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows, m_Sigma.num_cols);
352  Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
353  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
354  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
355  Map<VectorXd> eigen_ttau(m_ttau.vector, m_ttau.vlen);
356 
357  // get and create eigen representation of the mean vector
359  Map<VectorXd> eigen_m(m.vector, m.vlen);
360 
361  // compute vector of cavity parameter tau_n
362  VectorXd eigen_tau_n=(VectorXd::Ones(m_ttau.vlen)).cwiseQuotient(
363  eigen_Sigma.diagonal())-eigen_ttau;
364 
365  // compute vector of cavity parameter nu_n
366  VectorXd eigen_nu_n=eigen_mu.cwiseQuotient(eigen_Sigma.diagonal())-
367  eigen_tnu+eigen_m.cwiseProduct(eigen_tau_n);
368 
369  // compute cavity mean: mu_n=nu_n/tau_n
370  SGVector<float64_t> mu_n(m_ttau.vlen);
371  Map<VectorXd> eigen_mu_n(mu_n.vector, mu_n.vlen);
372 
373  eigen_mu_n=eigen_nu_n.cwiseQuotient(eigen_tau_n);
374 
375  // compute cavity variance: s2_n=1.0/tau_n
376  SGVector<float64_t> s2_n(m_ttau.vlen);
377  Map<VectorXd> eigen_s2_n(s2_n.vector, s2_n.vlen);
378 
379  eigen_s2_n=(VectorXd::Ones(m_ttau.vlen)).cwiseQuotient(eigen_tau_n);
380 
382  m_model->get_log_zeroth_moments(mu_n, s2_n, m_labels));
383 
384  // compute nlZ_part1=sum(log(diag(L)))-sum(lZ)-tnu'*Sigma*tnu/2
385  float64_t nlZ_part1=eigen_L.diagonal().array().log().sum()-lZ-
386  (eigen_tnu.adjoint()*eigen_Sigma).dot(eigen_tnu)/2.0;
387 
388  // compute nlZ_part2=sum(tnu.^2./(tau_n+ttau))/2-sum(log(1+ttau./tau_n))/2
389  float64_t nlZ_part2=(eigen_tnu.array().square()/
390  (eigen_tau_n+eigen_ttau).array()).sum()/2.0-(1.0+eigen_ttau.array()/
391  eigen_tau_n.array()).log().sum()/2.0;
392 
393  // compute nlZ_part3=-(nu_n-m.*tau_n)'*((ttau./tau_n.*(nu_n-m.*tau_n)-2*tnu)
394  // ./(ttau+tau_n))/2
395  float64_t nlZ_part3=-(eigen_nu_n-eigen_m.cwiseProduct(eigen_tau_n)).dot(
396  ((eigen_ttau.array()/eigen_tau_n.array()*(eigen_nu_n.array()-
397  eigen_m.array()*eigen_tau_n.array())-2*eigen_tnu.array())/
398  (eigen_ttau.array()+eigen_tau_n.array())).matrix())/2.0;
399 
400  // compute nlZ=nlZ_part1+nlZ_part2+nlZ_part3
401  m_nlZ=nlZ_part1+nlZ_part2+nlZ_part3;
402 }
403 
405 {
406  // create eigen representation of L, sstau, alpha
407  Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
408  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
409  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
410 
411  // create shogun and eigen representation of F
413  Map<MatrixXd> eigen_F(m_F.matrix, m_F.num_rows, m_F.num_cols);
414 
415  // solve L*L^T * V = diag(sqrt(ttau))
416  MatrixXd V=eigen_L.triangularView<Upper>().adjoint().solve(
417  MatrixXd(eigen_sttau.asDiagonal()));
418  V=eigen_L.triangularView<Upper>().solve(V);
419 
420  // compute F=alpha*alpha'-repmat(sW,1,n).*solve_chol(L,diag(sW))
421  eigen_F=eigen_alpha*eigen_alpha.adjoint()-eigen_sttau.asDiagonal()*V;
422 }
423 
425  const TParameter* param)
426 {
427  REQUIRE(!strcmp(param->m_name, "scale"), "Can't compute derivative of "
428  "the nagative log marginal likelihood wrt %s.%s parameter\n",
429  get_name(), param->m_name)
430 
431  Map<MatrixXd> eigen_K(m_ktrtr.matrix, m_ktrtr.num_rows, m_ktrtr.num_cols);
432  Map<MatrixXd> eigen_F(m_F.matrix, m_F.num_rows, m_F.num_cols);
433 
434  SGVector<float64_t> result(1);
435 
436  // compute derivative wrt kernel scale: dnlZ=-sum(F.*K*scale*2)/2
437  result[0]=-(eigen_F.cwiseProduct(eigen_K)*m_scale*2.0).sum()/2.0;
438 
439  return result;
440 }
441 
443  const TParameter* param)
444 {
446  return SGVector<float64_t>();
447 }
448 
450  const TParameter* param)
451 {
452  // create eigen representation of the matrix Q
453  Map<MatrixXd> eigen_F(m_F.matrix, m_F.num_rows, m_F.num_cols);
454 
455  SGVector<float64_t> result;
456 
457  if (param->m_datatype.m_ctype==CT_VECTOR ||
458  param->m_datatype.m_ctype==CT_SGVECTOR)
459  {
461  "Length of the parameter %s should not be NULL\n", param->m_name)
462  result=SGVector<float64_t>(*(param->m_datatype.m_length_y));
463  }
464  else
465  {
466  result=SGVector<float64_t>(1);
467  }
468 
469  for (index_t i=0; i<result.vlen; i++)
470  {
472 
473  if (result.vlen==1)
474  dK=m_kernel->get_parameter_gradient(param);
475  else
476  dK=m_kernel->get_parameter_gradient(param, i);
477 
478  Map<MatrixXd> eigen_dK(dK.matrix, dK.num_rows, dK.num_cols);
479 
480  // compute derivative wrt kernel parameter: dnlZ=-sum(F.*dK*scale^2)/2.0
481  result[i]=-(eigen_F.cwiseProduct(eigen_dK)*CMath::sq(m_scale)).sum()/2.0;
482  }
483 
484  return result;
485 }
486 
488  const TParameter* param)
489 {
491  return SGVector<float64_t>();
492 }
493 
494 #endif /* HAVE_EIGEN3 */

SHOGUN Machine Learning Toolbox - Documentation