SHOGUN  5.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
MCLDA.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2013 Kevin Hughes
8  * Copyright (C) 2013 Kevin Hughes
9  *
10  * Thanks to Fernando Jose Iglesias Garcia (shogun)
11  * and Matthieu Perrot (scikit-learn)
12  */
13 
14 #include <shogun/lib/common.h>
15 
16 
20 #include <shogun/labels/Labels.h>
23 
25 
26 using namespace shogun;
27 using namespace Eigen;
28 
29 CMCLDA::CMCLDA(float64_t tolerance, bool store_cov)
31 {
32  init();
33  m_tolerance=tolerance;
34  m_store_cov=store_cov;
35 
36 }
37 
38 CMCLDA::CMCLDA(CDenseFeatures<float64_t>* traindat, CLabels* trainlab, float64_t tolerance, bool store_cov)
40 {
41  init();
42 
43  m_tolerance=tolerance;
44  m_store_cov=store_cov;
45 
46  set_features(traindat);
47  set_labels(trainlab);
48 }
49 
51 {
52  SG_UNREF(m_features);
53 
54  cleanup();
55 }
56 
57 void CMCLDA::init()
58 {
59  SG_ADD(&m_tolerance, "m_tolerance", "Tolerance member.", MS_AVAILABLE);
60  SG_ADD(&m_store_cov, "m_store_cov", "Store covariance member", MS_NOT_AVAILABLE);
61  SG_ADD((CSGObject**) &m_features, "m_features", "Feature object.", MS_NOT_AVAILABLE);
62  SG_ADD(&m_means, "m_means", "Mean vectors list", MS_NOT_AVAILABLE);
63  SG_ADD(&m_cov, "m_cov", "covariance matrix", MS_NOT_AVAILABLE);
64  SG_ADD(&m_xbar, "m_xbar", "total mean", MS_NOT_AVAILABLE);
65  SG_ADD(&m_scalings, "m_scalings", "scalings", MS_NOT_AVAILABLE);
66  SG_ADD(&m_rank, "m_rank", "rank", MS_NOT_AVAILABLE);
67  SG_ADD(&m_coef, "m_coef", "weight vector", MS_NOT_AVAILABLE);
68  SG_ADD(&m_intercept, "m_intercept", "intercept", MS_NOT_AVAILABLE);
69 
70  m_features = NULL;
71  m_num_classes=0;
72  m_dim=0;
73  m_rank=0;
74 }
75 
76 void CMCLDA::cleanup()
77 {
78  m_num_classes = 0;
79 }
80 
82 {
83  if (data)
84  {
85  if (!data->has_property(FP_DOT))
86  SG_ERROR("Specified features are not of type CDotFeatures\n")
87 
88  set_features((CDotFeatures*) data);
89  }
90 
91  if (!m_features)
92  return NULL;
93 
94  int32_t num_vecs = m_features->get_num_vectors();
95  ASSERT(num_vecs > 0)
96  ASSERT( m_dim == m_features->get_dim_feature_space() );
97 
98  // collect features into a matrix
100 
101  MatrixXd X(num_vecs, m_dim);
102 
103  int32_t vlen;
104  bool vfree;
105  float64_t* vec;
106  Map< VectorXd > Em_xbar(m_xbar, m_dim);
107  for (int i = 0; i < num_vecs; i++)
108  {
109  vec = rf->get_feature_vector(i, vlen, vfree);
110  ASSERT(vec)
111 
112  Map< VectorXd > Evec(vec, vlen);
113 
114  X.row(i) = Evec - Em_xbar;
115 
116  rf->free_feature_vector(vec, i, vfree);
117  }
118 
119 #ifdef DEBUG_MCLDA
120  SG_PRINT("\n>>> Displaying X ...\n")
121  SGMatrix< float64_t >::display_matrix(X.data(), num_vecs, m_dim);
122 #endif
123 
124  // center and scale data
125  MatrixXd Xs(num_vecs, m_rank);
126  Map< MatrixXd > Em_scalings(m_scalings.matrix, m_dim, m_rank);
127  Xs = X*Em_scalings;
128 
129 #ifdef DEBUG_MCLDA
130  SG_PRINT("\n>>> Displaying Xs ...\n")
131  SGMatrix< float64_t >::display_matrix(Xs.data(), num_vecs, m_rank);
132 #endif
133 
134  // decision function
135  MatrixXd d(num_vecs, m_num_classes);
136  Map< MatrixXd > Em_coef(m_coef.matrix, m_num_classes, m_rank);
137  Map< VectorXd > Em_intercept(m_intercept.vector, m_num_classes);
138  d = (Xs*Em_coef.transpose()).rowwise() + Em_intercept.transpose();
139 
140 #ifdef DEBUG_MCLDA
141  SG_PRINT("\n>>> Displaying d ...\n")
142  SGMatrix< float64_t >::display_matrix(d.data(), num_vecs, m_num_classes);
143 #endif
144 
145  // argmax to apply labels
146  CMulticlassLabels* out = new CMulticlassLabels(num_vecs);
147  for (int i = 0; i < num_vecs; i++)
148  out->set_label(i, CMath::arg_max(d.data()+i, num_vecs, m_num_classes));
149 
150  return out;
151 }
152 
154 {
155  if (!m_labels)
156  SG_ERROR("No labels allocated in MCLDA training\n")
157 
158  if (data)
159  {
160  if (!data->has_property(FP_DOT))
161  SG_ERROR("Speficied features are not of type CDotFeatures\n")
162 
163  set_features((CDotFeatures*) data);
164  }
165 
166  if (!m_features)
167  SG_ERROR("No features allocated in MCLDA training\n")
168 
169  SGVector< int32_t > train_labels = ((CMulticlassLabels*) m_labels)->get_int_labels();
170 
171  if (!train_labels.vector)
172  SG_ERROR("No train_labels allocated in MCLDA training\n")
173 
174  cleanup();
175 
176  m_num_classes = ((CMulticlassLabels*) m_labels)->get_num_classes();
177  m_dim = m_features->get_dim_feature_space();
178  int32_t num_vec = m_features->get_num_vectors();
179 
180  if (num_vec != train_labels.vlen)
181  SG_ERROR("Dimension mismatch between features and labels in MCLDA training")
182 
183  int32_t* class_idxs = SG_MALLOC(int32_t, num_vec*m_num_classes);
184  int32_t* class_nums = SG_MALLOC(int32_t, m_num_classes); // number of examples of each class
185  memset(class_nums, 0, m_num_classes*sizeof(int32_t));
186 
187  for (int i = 0; i < train_labels.vlen; i++)
188  {
189  int32_t class_idx = train_labels.vector[i];
190 
191  if (class_idx < 0 || class_idx >= m_num_classes)
192  {
193  SG_ERROR("found label out of {0, 1, 2, ..., num_classes-1}...")
194  return false;
195  }
196  else
197  {
198  class_idxs[ class_idx*num_vec + class_nums[class_idx]++ ] = i;
199  }
200  }
201 
202  for (int i = 0; i < m_num_classes; i++)
203  {
204  if (class_nums[i] <= 0)
205  {
206  SG_ERROR("What? One class with no elements\n")
207  return false;
208  }
209  }
210 
212 
213  // if ( m_store_cov )
214  index_t * cov_dims = SG_MALLOC(index_t, 3);
215  cov_dims[0] = m_dim;
216  cov_dims[1] = m_dim;
217  cov_dims[2] = m_num_classes;
218  SGNDArray< float64_t > covs(cov_dims, 3);
219 
220  m_means = SGMatrix< float64_t >(m_dim, m_num_classes, true);
221 
222  // matrix of all samples
223  MatrixXd X = MatrixXd::Zero(num_vec, m_dim);
224  int32_t iX = 0;
225 
226  m_means.zero();
227  m_cov.zero();
228 
229  int32_t vlen;
230  bool vfree;
231  float64_t* vec;
232  for (int k = 0; k < m_num_classes; k++)
233  {
234  // gather all the samples for class k into buffer and calculate the mean of class k
235  MatrixXd buffer(class_nums[k], m_dim);
236  Map< VectorXd > Em_mean(m_means.get_column_vector(k), m_dim);
237  for (int i = 0; i < class_nums[k]; i++)
238  {
239  vec = rf->get_feature_vector(class_idxs[k*num_vec + i], vlen, vfree);
240  ASSERT(vec)
241 
242  Map< VectorXd > Evec(vec, vlen);
243  Em_mean += Evec;
244  buffer.row(i) = Evec;
245 
246  rf->free_feature_vector(vec, class_idxs[k*num_vec + i], vfree);
247  }
248 
249  Em_mean /= class_nums[k];
250 
251  // subtract the mean of class k from each sample of class k and store the centered data in Xc
252  for (int i = 0; i < class_nums[k]; i++)
253  {
254  buffer.row(i) -= Em_mean;
255  X.row(iX) += buffer.row(i);
256  iX++;
257  }
258 
259  if (m_store_cov)
260  {
261  // calc cov = buffer.T * buffer
262  Map< MatrixXd > Em_cov_k(covs.get_matrix(k), m_dim, m_dim);
263  Em_cov_k = buffer.transpose() * buffer;
264  }
265  }
266 
267 #ifdef DEBUG_MCLDA
268  SG_PRINT("\n>>> Displaying means ...\n")
269  SGMatrix< float64_t >::display_matrix(m_means.matrix, m_dim, m_num_classes);
270 #endif
271 
272  if (m_store_cov)
273  {
274  m_cov = SGMatrix< float64_t >(m_dim, m_dim, true);
275  m_cov.zero();
276  Map< MatrixXd > Em_cov(m_cov.matrix, m_dim, m_dim);
277 
278  for (int k = 0; k < m_num_classes; k++)
279  {
280  Map< MatrixXd > Em_cov_k(covs.get_matrix(k), m_dim, m_dim);
281  Em_cov += Em_cov_k;
282  }
283 
284  Em_cov /= m_num_classes;
285  }
286 
287 #ifdef DEBUG_MCLDA
288  if (m_store_cov)
289  {
290  SG_PRINT("\n>>> Displaying cov ...\n")
291  SGMatrix< float64_t >::display_matrix(m_cov.matrix, m_dim, m_dim);
292  }
293 #endif
294 
296  // 1) within (univariate) scaling by with classes std-dev
297 
298  // std-dev of X
299  m_xbar = SGVector< float64_t >(m_dim);
300  m_xbar.zero();
301  Map< VectorXd > Em_xbar(m_xbar.vector, m_dim);
302  Em_xbar = X.colwise().sum();
303  Em_xbar /= num_vec;
304 
305  VectorXd std = VectorXd::Zero(m_dim);
306  std = (X.rowwise() - Em_xbar.transpose()).array().pow(2).colwise().sum();
307  std = std.array() / num_vec;
308 
309  for (int j = 0; j < m_dim; j++)
310  if(std[j] == 0)
311  std[j] = 1;
312 
313  float64_t fac = 1.0 / (num_vec - m_num_classes);
314 
315 #ifdef DEBUG_MCLDA
316  SG_PRINT("\n>>> Displaying m_xbar ...\n")
318 
319  SG_PRINT("\n>>> Displaying std ...\n")
320  SGVector< float64_t >::display_vector(std.data(), m_dim);
321 #endif
322 
324  // 2) Within variance scaling
325  for (int i = 0; i < num_vec; i++)
326  X.row(i) = sqrt(fac) * X.row(i).array() / std.transpose().array();
327 
328 
329  // SVD of centered (within)scaled data
330  VectorXd S(m_dim);
331  MatrixXd V(m_dim, m_dim);
332 
333  Eigen::JacobiSVD<MatrixXd> eSvd;
334  eSvd.compute(X,Eigen::ComputeFullV);
335  memcpy(S.data(), eSvd.singularValues().data(), m_dim*sizeof(float64_t));
336  memcpy(V.data(), eSvd.matrixV().data(), m_dim*m_dim*sizeof(float64_t));
337  V.transposeInPlace();
338 
339  int rank = 0;
340  while (rank < m_dim && S[rank] > m_tolerance)
341  {
342  rank++;
343  }
344 
345  if (rank < m_dim)
346  SG_ERROR("Warning: Variables are collinear\n")
347 
348  MatrixXd scalings(m_dim, rank);
349  for (int i = 0; i < m_dim; i++)
350  for (int j = 0; j < rank; j++)
351  scalings(i,j) = V(j,i) / std[j] / S[j];
352 
353 #ifdef DEBUG_MCLDA
354  SG_PRINT("\n>>> Displaying scalings ...\n")
355  SGMatrix< float64_t >::display_matrix(scalings.data(), m_dim, rank);
356 #endif
357 
359  // 3) Between variance scaling
360 
361  // Xc = m_means dot scalings
362  MatrixXd Xc(m_num_classes, rank);
363  Map< MatrixXd > Em_means(m_means.matrix, m_dim, m_num_classes);
364  Xc = (Em_means.transpose()*scalings);
365 
366  for (int i = 0; i < m_num_classes; i++)
367  Xc.row(i) *= sqrt(class_nums[i] * fac);
368 
369  // Centers are living in a space with n_classes-1 dim (maximum)
370  // Use svd to find projection in the space spanned by the
371  // (n_classes) centers
372  S = VectorXd(rank);
373  V = MatrixXd(rank, rank);
374 
375  eSvd.compute(Xc,Eigen::ComputeFullV);
376  memcpy(S.data(), eSvd.singularValues().data(), rank*sizeof(float64_t));
377  memcpy(V.data(), eSvd.matrixV().data(), rank*rank*sizeof(float64_t));
378 
379  m_rank = 0;
380  while (m_rank < rank && S[m_rank] > m_tolerance*S[0])
381  {
382  m_rank++;
383  }
384 
385  // compose the scalings
386  m_scalings = SGMatrix< float64_t >(rank, m_rank);
387  Map< MatrixXd > Em_scalings(m_scalings.matrix, rank, m_rank);
388  Em_scalings = scalings * V.leftCols(m_rank);
389 
390 #ifdef DEBUG_MCLDA
391  SG_PRINT("\n>>> Displaying m_scalings ...\n")
392  SGMatrix< float64_t >::display_matrix(m_scalings.matrix, rank, m_rank);
393 #endif
394 
395  // weight vectors / centroids
396  MatrixXd meansc(m_dim, m_num_classes);
397  meansc = Em_means.colwise() - Em_xbar;
398 
399  m_coef = SGMatrix< float64_t >(m_num_classes, m_rank);
400  Map< MatrixXd > Em_coef(m_coef.matrix, m_num_classes, m_rank);
401  Em_coef = meansc.transpose() * Em_scalings;
402 
403 #ifdef DEBUG_MCLDA
404  SG_PRINT("\n>>> Displaying m_coefs ...\n")
405  SGMatrix< float64_t >::display_matrix(m_coef.matrix, m_num_classes, m_rank);
406 #endif
407 
408  // intercept
409  m_intercept = SGVector< float64_t >(m_num_classes);
410  m_intercept.zero();
411  for (int j = 0; j < m_num_classes; j++)
412  m_intercept[j] = -0.5*m_coef[j]*m_coef[j] + log(class_nums[j]/float(num_vec));
413 
414 #ifdef DEBUG_MCLDA
415  SG_PRINT("\n>>> Displaying m_intercept ...\n")
416  SGVector< float64_t >::display_vector(m_intercept.vector, m_num_classes);
417 #endif
418 
419  SG_FREE(class_idxs);
420  SG_FREE(class_nums);
421 
422  return true;
423 }
424 
void display_matrix(const char *name="matrix") const
Definition: SGMatrix.cpp:390
CMCLDA(float64_t tolerance=1e-4, bool store_cov=false)
Definition: MCLDA.cpp:29
static int32_t arg_max(T *vec, int32_t inc, int32_t len, T *maxv_ptr=NULL)
Definition: Math.h:262
ST * get_feature_vector(int32_t num, int32_t &len, bool &dofree)
experimental abstract native multiclass machine class
virtual ~CMCLDA()
Definition: MCLDA.cpp:50
int32_t index_t
Definition: common.h:62
T * data() const
Definition: SGMatrix.h:179
virtual CMulticlassLabels * apply_multiclass(CFeatures *data=NULL)
Definition: MCLDA.cpp:81
virtual bool train_machine(CFeatures *data=NULL)
Definition: MCLDA.cpp:153
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
Definition: SGMatrix.h:20
virtual int32_t get_num_vectors() const =0
Definition: basetag.h:132
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
virtual void set_features(CDotFeatures *feat)
Definition: MCLDA.h:90
Features that support dot products among other operations.
Definition: DotFeatures.h:44
T * get_matrix(index_t matIdx) const
Definition: SGNDArray.h:72
virtual int32_t get_dim_feature_space() const =0
void display_vector(const char *name="vector", const char *prefix="") const
Definition: SGVector.cpp:354
Multiclass Labels for multi-class classification.
index_t vlen
Definition: SGVector.h:494
#define SG_PRINT(...)
Definition: SGIO.h:137
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:115
double float64_t
Definition: common.h:50
T * get_column_vector(index_t col) const
Definition: SGMatrix.h:113
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
Definition: KLInference.h:52
#define SG_UNREF(x)
Definition: SGObject.h:55
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
void free_feature_vector(ST *feat_vec, int32_t num, bool dofree)
The class Features is the base class of all feature objects.
Definition: Features.h:68
#define SG_ADD(...)
Definition: SGObject.h:84
bool has_property(EFeatureProperty p) const
Definition: Features.cpp:295
virtual void set_labels(CLabels *lab)

SHOGUN Machine Learning Toolbox - Documentation