SHOGUN  5.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
GaussianProcessMachine.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * Written (W) 2013 Roman Votyakov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice, this
11  * list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * The views and conclusions contained in the software and documentation are those
28  * of the authors and should not be interpreted as representing official policies,
29  * either expressed or implied, of the Shogun Development Team.
30  *
31  * Code adapted from
32  * Gaussian Process Machine Learning Toolbox
33  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
34  * and
35  * https://gist.github.com/yorkerlin/8a36e8f9b298aa0246a4
36  */
37 
38 
39 #include <shogun/lib/config.h>
42 #include <shogun/kernel/Kernel.h>
45 
46 using namespace shogun;
47 using namespace Eigen;
48 
50 {
51  init();
52 }
53 
55 {
56  init();
57  set_inference_method(method);
58 }
59 
60 void CGaussianProcessMachine::init()
61 {
62  m_method=NULL;
63 
64  SG_ADD((CSGObject**) &m_method, "inference_method", "Inference method",
65  MS_AVAILABLE);
66 }
67 
69 {
70  SG_UNREF(m_method);
71 }
72 
74 {
75  REQUIRE(m_method, "Inference method should not be NULL\n")
76 
77  CFeatures* feat;
78 
79  CSingleSparseInference* sparse_method=
80  dynamic_cast<CSingleSparseInference *>(m_method);
81  // use inducing features for sparse inference method
82  if (sparse_method)
83  {
84  sparse_method->optimize_inducing_features();
85  feat=sparse_method->get_inducing_features();
86  }
87  else
88  feat=m_method->get_features();
89 
90  // get kernel and compute kernel matrix: K(feat, data)*scale^2
91  CKernel* training_kernel=m_method->get_kernel();
92  CKernel* kernel=CKernel::obtain_from_generic(training_kernel->clone());
93  SG_UNREF(training_kernel);
94 
95  kernel->init(feat, data);
96 
97  // get kernel matrix and create eigen representation of it
98  SGMatrix<float64_t> k_trts=kernel->get_kernel_matrix();
99  Map<MatrixXd> eigen_Ks(k_trts.matrix, k_trts.num_rows, k_trts.num_cols);
100 
101  // compute Ks=Ks*scale^2
102  eigen_Ks*=CMath::sq(m_method->get_scale());
103 
104  // cleanup
105  SG_UNREF(feat);
106  SG_UNREF(kernel);
107 
108  // get alpha and create eigen representation of it
109  SGVector<float64_t> alpha=m_method->get_alpha();
110  Map<VectorXd> eigen_alpha(alpha.vector, alpha.vlen);
111 
112  // get mean and create eigen representation of it
113  CMeanFunction* mean_function=m_method->get_mean();
114  SGVector<float64_t> mean=mean_function->get_mean_vector(data);
115  Map<VectorXd> eigen_mean(mean.vector, mean.vlen);
116  SG_UNREF(mean_function);
117 
118  const index_t C=alpha.vlen/k_trts.num_rows;
119  const index_t n=k_trts.num_rows;
120  const index_t m=k_trts.num_cols;
121 
122  // compute mean: mu=Ks'*alpha+m
123  SGVector<float64_t> mu(C*m);
124  Map<MatrixXd> eigen_mu_matrix(mu.vector,C,m);
125 
126  for(index_t bl=0; bl<C; bl++)
127  eigen_mu_matrix.block(bl,0,1,m)=(eigen_Ks.adjoint()*eigen_alpha.block(bl*n,0,n,1)+eigen_mean).transpose();
128 
129  return mu;
130 }
131 
133  CFeatures* data)
134 {
135  REQUIRE(m_method, "Inference method should not be NULL\n")
136 
137  CFeatures* feat;
138 
139  bool is_sparse=false;
140  CSingleSparseInference* sparse_method=
141  dynamic_cast<CSingleSparseInference *>(m_method);
142  // use inducing features for sparse inference method
143  if (sparse_method)
144  {
145  sparse_method->optimize_inducing_features();
146  feat=sparse_method->get_inducing_features();
147  is_sparse=true;
148  }
149  else
150  feat=m_method->get_features();
151 
152  SG_REF(data);
153 
154  // get kernel and compute kernel matrix: K(data, data)*scale^2
155  CKernel* training_kernel=m_method->get_kernel();
156  CKernel* kernel=CKernel::obtain_from_generic(training_kernel->clone());
157  SG_UNREF(training_kernel);
158  kernel->init(data, data);
159 
160  // get kernel matrix and create eigen representation of it
161  SGVector<float64_t> k_tsts=kernel->get_kernel_diagonal();
162  Map<VectorXd> eigen_Kss_diag(k_tsts.vector, k_tsts.vlen);
163 
164  // compute Kss=Kss*scale^2
165  eigen_Kss_diag*=CMath::sq(m_method->get_scale());
166 
167  // compute kernel matrix: K(feat, data)*scale^2
168  kernel->init(feat, data);
169 
170  // get kernel matrix and create eigen representation of it
171  SGMatrix<float64_t> k_trts=kernel->get_kernel_matrix();
172  Map<MatrixXd> eigen_Ks(k_trts.matrix, k_trts.num_rows, k_trts.num_cols);
173 
174  // compute Ks=Ks*scale^2
175  eigen_Ks*=CMath::sq(m_method->get_scale());
176 
177  // cleanup
178  SG_UNREF(kernel);
179  SG_UNREF(feat);
180  SG_UNREF(data);
181 
182  // get shogun representation of cholesky and create eigen representation
183  SGMatrix<float64_t> L=m_method->get_cholesky();
184  Map<MatrixXd> eigen_L(L.matrix, L.num_rows, L.num_cols);
185 
186  SGVector<float64_t> alpha=m_method->get_alpha();
187  const index_t n=k_trts.num_rows;
188  const index_t m=k_tsts.vlen;
189  const index_t C=alpha.vlen/n;
190  // result variance vector
191  SGVector<float64_t> s2(m*C*C);
192  Map<VectorXd> eigen_s2(s2.vector, s2.vlen);
193 
194  if (eigen_L.isUpperTriangular() && !is_sparse)
195  {
196  if (alpha.vlen==L.num_rows)
197  {
198  //binary case
199  // get shogun of diagonal sigma vector and create eigen representation
200  SGVector<float64_t> sW=m_method->get_diagonal_vector();
201  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
202  // solve L' * V = sW * Ks and compute V.^2
203  MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
204  eigen_sW.asDiagonal()*eigen_Ks);
205  MatrixXd eigen_sV=eigen_V.cwiseProduct(eigen_V);
206 
207  eigen_s2=eigen_Kss_diag-eigen_sV.colwise().sum().adjoint();
208  }
209  else
210  {
211  if (m_method->supports_multiclass())
212  {
213  //multiclass case
214  //see the reference code of the gist link, which is based on the algorithm 3.4 of the GPML textbook
215  Map<MatrixXd> &eigen_M=eigen_L;
216  eigen_s2.fill(0);
217 
218  SGMatrix<float64_t> E=m_method->get_multiclass_E();
219  Map<MatrixXd> eigen_E(E.matrix, E.num_rows, E.num_cols);
220  ASSERT(E.num_cols==alpha.vlen);
221  for(index_t bl_i=0; bl_i<C; bl_i++)
222  {
223  //n by m
224  MatrixXd bi=eigen_E.block(0,bl_i*n,n,n)*eigen_Ks;
225  MatrixXd c_cav=eigen_M.triangularView<Upper>().adjoint().solve(bi);
226  c_cav=eigen_M.triangularView<Upper>().solve(c_cav);
227 
228  for(index_t bl_j=0; bl_j<C; bl_j++)
229  {
230  MatrixXd bj=eigen_E.block(0,bl_j*n,n,n)*eigen_Ks;
231  for (index_t idx_m=0; idx_m<m; idx_m++)
232  eigen_s2[bl_j+(bl_i+idx_m*C)*C]=(bj.block(0,idx_m,n,1).array()*c_cav.block(0,idx_m,n,1).array()).sum();
233  }
234  for (index_t idx_m=0; idx_m<m; idx_m++)
235  eigen_s2[bl_i+(bl_i+idx_m*C)*C]+=eigen_Kss_diag(idx_m)-(eigen_Ks.block(0,idx_m,n,1).array()*bi.block(0,idx_m,n,1).array()).sum();
236  }
237  }
238  else
239  {
240  SG_ERROR("Unsupported inference method!\n");
241  return s2;
242  }
243  }
244  }
245  else
246  {
247  // M = Ks .* (L * Ks)
248  MatrixXd eigen_M=eigen_Ks.cwiseProduct(eigen_L*eigen_Ks);
249  eigen_s2=eigen_Kss_diag+eigen_M.colwise().sum().adjoint();
250  }
251 
252  return s2;
253 }
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
int32_t index_t
Definition: common.h:62
virtual CSGObject * clone()
Definition: SGObject.cpp:747
SGVector< float64_t > get_posterior_variances(CFeatures *data)
static T sq(T x)
Definition: Math.h:450
Definition: SGMatrix.h:20
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
SGVector< float64_t > get_posterior_means(CFeatures *data)
index_t num_cols
Definition: SGMatrix.h:376
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
An abstract class of the mean function.
Definition: MeanFunction.h:49
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:220
#define SG_REF(x)
Definition: SGObject.h:54
index_t num_rows
Definition: SGMatrix.h:374
index_t vlen
Definition: SGVector.h:494
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:115
The sparse inference base class for classification and regression for 1-D labels (1D regression and b...
virtual CFeatures * get_inducing_features()
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:897
#define SG_UNREF(x)
Definition: SGObject.h:55
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
The Inference Method base class.
Definition: Inference.h:81
The class Features is the base class of all feature objects.
Definition: Features.h:68
SGVector< float64_t > get_kernel_diagonal(SGVector< float64_t > preallocated=SGVector< float64_t >())
Definition: Kernel.h:231
The Kernel base class.
Definition: Kernel.h:159
#define SG_ADD(...)
Definition: SGObject.h:84

SHOGUN Machine Learning Toolbox - Documentation