SHOGUN  5.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
EPInferenceMethod.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (W) 2013 Roman Votyakov
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  *
31  * Based on ideas from GAUSSIAN PROCESS REGRESSION AND CLASSIFICATION Toolbox
32  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
33  */
35 
36 
42 
44 
45 using namespace shogun;
46 using namespace Eigen;
47 
48 // try to use previously allocated memory for SGVector
49 #define CREATE_SGVECTOR(vec, len, sg_type) \
50  { \
51  if (!vec.vector || vec.vlen!=len) \
52  vec=SGVector<sg_type>(len); \
53  }
54 
55 // try to use previously allocated memory for SGMatrix
56 #define CREATE_SGMATRIX(mat, rows, cols, sg_type) \
57  { \
58  if (!mat.matrix || mat.num_rows!=rows || mat.num_cols!=cols) \
59  mat=SGMatrix<sg_type>(rows, cols); \
60  }
61 
63 {
64  init();
65 }
66 
68  CMeanFunction* mean, CLabels* labels, CLikelihoodModel* model)
69  : CInference(kernel, features, mean, labels, model)
70 {
71  init();
72 }
73 
75 {
76 }
77 
79 {
80  SG_WARNING("The method does not require a minimizer. The provided minimizer will not be used.\n");
81 }
82 
83 void CEPInferenceMethod::init()
84 {
85  m_max_sweep=15;
86  m_min_sweep=2;
87  m_tol=1e-4;
88  m_fail_on_non_convergence=true;
89 }
90 
92  CInference* inference)
93 {
94  if (inference==NULL)
95  return NULL;
96 
97  if (inference->get_inference_type()!=INF_EP)
98  SG_SERROR("Provided inference is not of type CEPInferenceMethod!\n")
99 
100  SG_REF(inference);
101  return (CEPInferenceMethod*)inference;
102 }
103 
105 {
107  update();
108 
109  return m_nlZ;
110 }
111 
113 {
115  update();
116 
118 }
119 
121 {
123  update();
124 
125  return SGMatrix<float64_t>(m_L);
126 }
127 
129 {
131  update();
132 
133  return SGVector<float64_t>(m_sttau);
134 }
135 
137 {
139 
140  return SGVector<float64_t>(m_mu);
141 }
142 
144 {
146 
147  return SGMatrix<float64_t>(m_Sigma);
148 }
149 
151 {
153 
154  if (!m_gradient_update)
155  {
156  // update matrices to compute derivatives
157  update_deriv();
158  m_gradient_update=true;
160  }
161 }
162 
164 {
165  SG_DEBUG("entering\n");
166 
167  // update kernel and feature matrix
169 
170  // get number of labels (trainig examples)
172 
173  // try to use tilde values from previous call
174  if (m_ttau.vlen==n)
175  {
176  update_chol();
180  }
181 
182  // get mean vector
184 
185  // get and scale diagonal of the kernel matrix
187  ktrtr_diag.scale(CMath::exp(m_log_scale*2.0));
188 
189  // marginal likelihood for ttau = tnu = 0
191  mean, ktrtr_diag, m_labels));
192 
193  // use zero values if we have no better guess or it's better
194  if (m_ttau.vlen!=n || m_nlZ>nlZ0)
195  {
196  CREATE_SGVECTOR(m_ttau, n, float64_t);
197  m_ttau.zero();
198 
199  CREATE_SGVECTOR(m_sttau, n, float64_t);
200  m_sttau.zero();
201 
202  CREATE_SGVECTOR(m_tnu, n, float64_t);
203  m_tnu.zero();
204 
206 
207  // copy data manually, since we don't have appropriate method
208  for (index_t i=0; i<m_ktrtr.num_rows; i++)
209  for (index_t j=0; j<m_ktrtr.num_cols; j++)
210  m_Sigma(i,j)=m_ktrtr(i,j)*CMath::exp(m_log_scale);
211 
212  CREATE_SGVECTOR(m_mu, n, float64_t);
213  m_mu.zero();
214 
215  // set marginal likelihood
216  m_nlZ=nlZ0;
217  }
218 
219  // create vector of the random permutation
220  SGVector<index_t> v(n);
221  v.range_fill();
222 
223  // cavity tau and nu vectors
224  SGVector<float64_t> tau_n(n);
225  SGVector<float64_t> nu_n(n);
226 
227  // cavity mu and s2 vectors
228  SGVector<float64_t> mu_n(n);
229  SGVector<float64_t> s2_n(n);
230 
231  float64_t nlZ_old=CMath::INFTY;
232  uint32_t sweep=0;
233 
234  while ((CMath::abs(m_nlZ-nlZ_old)>m_tol && sweep<m_max_sweep) ||
235  sweep<m_min_sweep)
236  {
237  nlZ_old=m_nlZ;
238  sweep++;
239 
240  // shuffle random permutation
241  CMath::permute(v);
242 
243  for (index_t j=0; j<n; j++)
244  {
245  index_t i=v[j];
246 
247  // find cavity paramters
248  tau_n[i]=1.0/m_Sigma(i,i)-m_ttau[i];
249  nu_n[i]=m_mu[i]/m_Sigma(i,i)+mean[i]*tau_n[i]-m_tnu[i];
250 
251  // compute cavity mean and variance
252  mu_n[i]=nu_n[i]/tau_n[i];
253  s2_n[i]=1.0/tau_n[i];
254 
255  // get moments
256  float64_t mu=m_model->get_first_moment(mu_n, s2_n, m_labels, i);
257  float64_t s2=m_model->get_second_moment(mu_n, s2_n, m_labels, i);
258 
259  // save old value of ttau
260  float64_t ttau_old=m_ttau[i];
261 
262  // compute ttau and sqrt(ttau)
263  m_ttau[i]=CMath::max(1.0/s2-tau_n[i], 0.0);
264  m_sttau[i]=CMath::sqrt(m_ttau[i]);
265 
266  // compute tnu
267  m_tnu[i]=mu/s2-nu_n[i];
268 
269  // compute difference ds2=ttau_new-ttau_old
270  float64_t ds2=m_ttau[i]-ttau_old;
271 
272  // create eigen representation of Sigma, tnu and mu
273  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows,
274  m_Sigma.num_cols);
275  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
276  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
277 
278  VectorXd eigen_si=eigen_Sigma.col(i);
279 
280  // rank-1 update Sigma
281  eigen_Sigma=eigen_Sigma-ds2/(1.0+ds2*eigen_si(i))*eigen_si*
282  eigen_si.adjoint();
283 
284  // update mu
285  eigen_mu=eigen_Sigma*eigen_tnu;
286  }
287 
288  // update upper triangular factor (L^T) of Cholesky decomposition of
289  // matrix B, approximate posterior covariance and mean, negative
290  // marginal likelihood
291  update_chol();
295  }
296 
297  if (sweep==m_max_sweep && CMath::abs(m_nlZ-nlZ_old)>m_tol)
298  {
299  SG_WARNING("Maximum number (%d) of sweeps reached, but tolerance (%f) was "
300  "not yet reached. You can increase or decrease both.\n",
301  m_max_sweep, m_tol);
302 
303  if (m_fail_on_non_convergence)
304  SG_ERROR("EP did not converge. This error can be explicitly disabled.\n")
305  }
306 
307  // update vector alpha
308  update_alpha();
309 
310  m_gradient_update=false;
311 
312  // update hash of the parameters
314 
315  SG_DEBUG("leaving\n");
316 }
317 
319 {
320  // create eigen representations kernel matrix, L^T, sqrt(ttau) and tnu
322  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
323  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
325 
326  // create shogun and eigen representation of the alpha vector
328  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
329 
330  // solve LL^T * v = tS^(1/2) * K * tnu
331  VectorXd eigen_v=eigen_L.triangularView<Upper>().adjoint().solve(
332  eigen_sttau.cwiseProduct(eigen_K*CMath::exp(m_log_scale*2.0)*eigen_tnu));
333  eigen_v=eigen_L.triangularView<Upper>().solve(eigen_v);
334 
335  // compute alpha = (I - tS^(1/2) * B^(-1) * tS(1/2) * K) * tnu =
336  // tnu - tS(1/2) * (L^T)^(-1) * L^(-1) * tS^(1/2) * K * tnu =
337  // tnu - tS(1/2) * v
338  eigen_alpha=eigen_tnu-eigen_sttau.cwiseProduct(eigen_v);
339 }
340 
342 {
343  // create eigen representations of kernel matrix and sqrt(ttau)
345  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
346 
347  // create shogun and eigen representation of the upper triangular factor
348  // (L^T) of the Cholesky decomposition of the matrix B
351 
352  // compute upper triangular factor L^T of the Cholesky decomposion of the
353  // matrix: B = tS^(1/2) * K * tS^(1/2) + I
354  LLT<MatrixXd> eigen_chol((eigen_sttau*eigen_sttau.adjoint()).cwiseProduct(
355  eigen_K*CMath::exp(m_log_scale*2.0))+
356  MatrixXd::Identity(m_L.num_rows, m_L.num_cols));
357 
358  eigen_L=eigen_chol.matrixU();
359 }
360 
362 {
363  // create eigen representations of kernel matrix, L^T matrix and sqrt(ttau)
366  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
367 
368  // create shogun and eigen representation of the approximate covariance
369  // matrix
371  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows, m_Sigma.num_cols);
372 
373  // compute V = L^(-1) * tS^(1/2) * K, using upper triangular factor L^T
374  MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
375  eigen_sttau.asDiagonal()*eigen_K*CMath::exp(m_log_scale*2.0));
376 
377  // compute covariance matrix of the posterior:
378  // Sigma = K - K * tS^(1/2) * (L * L^T)^(-1) * tS^(1/2) * K =
379  // K - (K * tS^(1/2)) * (L^T)^(-1) * L^(-1) * tS^(1/2) * K =
380  // K - (tS^(1/2) * K)^T * (L^(-1))^T * L^(-1) * tS^(1/2) * K = K - V^T * V
381  eigen_Sigma=eigen_K*CMath::exp(m_log_scale*2.0)-eigen_V.adjoint()*eigen_V;
382 }
383 
385 {
386  // create eigen representation of posterior covariance matrix and tnu
387  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows, m_Sigma.num_cols);
388  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
389 
390  // create shogun and eigen representation of the approximate mean vector
391  CREATE_SGVECTOR(m_mu, m_tnu.vlen, float64_t);
392  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
393 
394  // compute mean vector of the approximate posterior: mu = Sigma * tnu
395  eigen_mu=eigen_Sigma*eigen_tnu;
396 }
397 
399 {
400  // create eigen representation of Sigma, L, mu, tnu, ttau
401  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows, m_Sigma.num_cols);
403  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
404  Map<VectorXd> eigen_tnu(m_tnu.vector, m_tnu.vlen);
405  Map<VectorXd> eigen_ttau(m_ttau.vector, m_ttau.vlen);
406 
407  // get and create eigen representation of the mean vector
409  Map<VectorXd> eigen_m(m.vector, m.vlen);
410 
411  // compute vector of cavity parameter tau_n
412  VectorXd eigen_tau_n=(VectorXd::Ones(m_ttau.vlen)).cwiseQuotient(
413  eigen_Sigma.diagonal())-eigen_ttau;
414 
415  // compute vector of cavity parameter nu_n
416  VectorXd eigen_nu_n=eigen_mu.cwiseQuotient(eigen_Sigma.diagonal())-
417  eigen_tnu+eigen_m.cwiseProduct(eigen_tau_n);
418 
419  // compute cavity mean: mu_n=nu_n/tau_n
420  SGVector<float64_t> mu_n(m_ttau.vlen);
421  Map<VectorXd> eigen_mu_n(mu_n.vector, mu_n.vlen);
422 
423  eigen_mu_n=eigen_nu_n.cwiseQuotient(eigen_tau_n);
424 
425  // compute cavity variance: s2_n=1.0/tau_n
426  SGVector<float64_t> s2_n(m_ttau.vlen);
427  Map<VectorXd> eigen_s2_n(s2_n.vector, s2_n.vlen);
428 
429  eigen_s2_n=(VectorXd::Ones(m_ttau.vlen)).cwiseQuotient(eigen_tau_n);
430 
432  m_model->get_log_zeroth_moments(mu_n, s2_n, m_labels));
433 
434  // compute nlZ_part1=sum(log(diag(L)))-sum(lZ)-tnu'*Sigma*tnu/2
435  float64_t nlZ_part1=eigen_L.diagonal().array().log().sum()-lZ-
436  (eigen_tnu.adjoint()*eigen_Sigma).dot(eigen_tnu)/2.0;
437 
438  // compute nlZ_part2=sum(tnu.^2./(tau_n+ttau))/2-sum(log(1+ttau./tau_n))/2
439  float64_t nlZ_part2=(eigen_tnu.array().square()/
440  (eigen_tau_n+eigen_ttau).array()).sum()/2.0-(1.0+eigen_ttau.array()/
441  eigen_tau_n.array()).log().sum()/2.0;
442 
443  // compute nlZ_part3=-(nu_n-m.*tau_n)'*((ttau./tau_n.*(nu_n-m.*tau_n)-2*tnu)
444  // ./(ttau+tau_n))/2
445  float64_t nlZ_part3=-(eigen_nu_n-eigen_m.cwiseProduct(eigen_tau_n)).dot(
446  ((eigen_ttau.array()/eigen_tau_n.array()*(eigen_nu_n.array()-
447  eigen_m.array()*eigen_tau_n.array())-2*eigen_tnu.array())/
448  (eigen_ttau.array()+eigen_tau_n.array())).matrix())/2.0;
449 
450  // compute nlZ=nlZ_part1+nlZ_part2+nlZ_part3
451  m_nlZ=nlZ_part1+nlZ_part2+nlZ_part3;
452 }
453 
455 {
456  // create eigen representation of L, sstau, alpha
458  Map<VectorXd> eigen_sttau(m_sttau.vector, m_sttau.vlen);
459  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
460 
461  // create shogun and eigen representation of F
463  Map<MatrixXd> eigen_F(m_F.matrix, m_F.num_rows, m_F.num_cols);
464 
465  // solve L*L^T * V = diag(sqrt(ttau))
466  MatrixXd V=eigen_L.triangularView<Upper>().adjoint().solve(
467  MatrixXd(eigen_sttau.asDiagonal()));
468  V=eigen_L.triangularView<Upper>().solve(V);
469 
470  // compute F=alpha*alpha'-repmat(sW,1,n).*solve_chol(L,diag(sW))
471  eigen_F=eigen_alpha*eigen_alpha.adjoint()-eigen_sttau.asDiagonal()*V;
472 }
473 
475  const TParameter* param)
476 {
477  REQUIRE(!strcmp(param->m_name, "log_scale"), "Can't compute derivative of "
478  "the nagative log marginal likelihood wrt %s.%s parameter\n",
479  get_name(), param->m_name)
480 
482  Map<MatrixXd> eigen_F(m_F.matrix, m_F.num_rows, m_F.num_cols);
483 
484  SGVector<float64_t> result(1);
485 
486  // compute derivative wrt kernel scale: dnlZ=-sum(F.*K*scale*2)/2
487  result[0]=-(eigen_F.cwiseProduct(eigen_K)).sum();
488  result[0]*=CMath::exp(m_log_scale*2.0);
489 
490  return result;
491 }
492 
494  const TParameter* param)
495 {
497  return SGVector<float64_t>();
498 }
499 
501  const TParameter* param)
502 {
503  // create eigen representation of the matrix Q
504  Map<MatrixXd> eigen_F(m_F.matrix, m_F.num_rows, m_F.num_cols);
505 
506  REQUIRE(param, "Param not set\n");
507  SGVector<float64_t> result;
508  int64_t len=const_cast<TParameter *>(param)->m_datatype.get_num_elements();
509  result=SGVector<float64_t>(len);
510 
511  for (index_t i=0; i<result.vlen; i++)
512  {
514 
515  if (result.vlen==1)
516  dK=m_kernel->get_parameter_gradient(param);
517  else
518  dK=m_kernel->get_parameter_gradient(param, i);
519 
520  Map<MatrixXd> eigen_dK(dK.matrix, dK.num_rows, dK.num_cols);
521 
522  // compute derivative wrt kernel parameter: dnlZ=-sum(F.*dK*scale^2)/2.0
523  result[i]=-(eigen_F.cwiseProduct(eigen_dK)).sum();
524  result[i]*=CMath::exp(m_log_scale*2.0)/2.0;
525  }
526 
527  return result;
528 }
529 
531  const TParameter* param)
532 {
534  return SGVector<float64_t>();
535 }
536 
float64_t m_log_scale
Definition: Inference.h:490
void range_fill(T start=0)
Definition: SGVector.cpp:171
virtual void update()
Definition: Inference.cpp:316
static void permute(SGVector< T > v, CRandom *rand=NULL)
Definition: Math.h:1144
virtual SGVector< float64_t > get_diagonal_vector()
virtual void update_parameter_hash()
Definition: SGObject.cpp:281
virtual SGVector< float64_t > get_alpha()
int32_t index_t
Definition: common.h:62
static CEPInferenceMethod * obtain_from_generic(CInference *inference)
Vector::Scalar dot(Vector a, Vector b)
Definition: Redux.h:58
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
static const float64_t INFTY
infinity
Definition: Math.h:2048
virtual EInferenceType get_inference_type() const
Definition: Inference.h:104
virtual SGMatrix< float64_t > get_posterior_covariance()
virtual int32_t get_num_labels() const =0
CKernel * m_kernel
Definition: Inference.h:469
Definition: SGMatrix.h:20
parameter struct
#define SG_ERROR(...)
Definition: SGIO.h:129
virtual SGVector< float64_t > get_log_zeroth_moments(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab) const =0
#define REQUIRE(x,...)
Definition: SGIO.h:206
#define SG_NOTIMPLEMENTED
Definition: SGIO.h:139
index_t num_cols
Definition: SGMatrix.h:376
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
#define CREATE_SGMATRIX(mat, rows, cols, sg_type)
virtual float64_t get_negative_log_marginal_likelihood()
virtual float64_t get_second_moment(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab, index_t i) const =0
An abstract class of the mean function.
Definition: MeanFunction.h:49
void scale(T alpha)
Scale vector inplace.
Definition: SGVector.cpp:841
virtual SGVector< float64_t > get_derivative_wrt_kernel(const TParameter *param)
#define SG_REF(x)
Definition: SGObject.h:54
index_t num_rows
Definition: SGMatrix.h:374
CFeatures * m_features
Definition: Inference.h:478
SGMatrix< float64_t > m_ktrtr
Definition: Inference.h:493
virtual SGVector< float64_t > get_posterior_mean()
CMeanFunction * m_mean
Definition: Inference.h:472
SGVector< T > get_diagonal_vector() const
Definition: SGMatrix.cpp:1095
index_t vlen
Definition: SGVector.h:494
virtual SGVector< float64_t > get_derivative_wrt_mean(const TParameter *param)
CLabels * m_labels
Definition: Inference.h:481
double float64_t
Definition: common.h:50
virtual void compute_gradient()
Definition: Inference.cpp:343
#define CREATE_SGVECTOR(vec, len, sg_type)
static T sum(T *vec, int32_t len)
Return sum(vec)
Definition: SGVector.h:354
SGMatrix< float64_t > m_L
Definition: Inference.h:487
static T max(T a, T b)
Definition: Math.h:168
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
Definition: KLInference.h:52
virtual SGVector< float64_t > get_derivative_wrt_likelihood_model(const TParameter *param)
virtual void register_minimizer(Minimizer *minimizer)
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual SGVector< float64_t > get_derivative_wrt_inference_method(const TParameter *param)
The Inference Method base class.
Definition: Inference.h:81
Class of the Expectation Propagation (EP) posterior approximation inference method.
The class Features is the base class of all feature objects.
Definition: Features.h:68
#define SG_SERROR(...)
Definition: SGIO.h:179
static float64_t exp(float64_t x)
Definition: Math.h:621
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
Definition: Kernel.h:851
The Kernel base class.
Definition: Kernel.h:159
The minimizer base class.
Definition: Minimizer.h:43
#define SG_WARNING(...)
Definition: SGIO.h:128
virtual SGMatrix< float64_t > get_cholesky()
static float32_t sqrt(float32_t x)
Definition: Math.h:459
virtual float64_t get_first_moment(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab, index_t i) const =0
virtual const char * get_name() const
CLikelihoodModel * m_model
Definition: Inference.h:475
virtual bool parameter_hash_changed()
Definition: SGObject.cpp:295
The Likelihood model base class.
SGVector< float64_t > m_alpha
Definition: Inference.h:484
static T abs(T a)
Definition: Math.h:179

SHOGUN Machine Learning Toolbox - Documentation