SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
CustomKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 2012 Heiko Strathmann
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/common.h>
17 #include <shogun/io/SGIO.h>
20 
21 using namespace shogun;
22 
23 #ifdef HAVE_EIGEN3
24 using namespace Eigen;
25 #endif // HAVE_EIGEN3
26 
27 void CCustomKernel::init()
28 {
29  m_row_subset_stack=new CSubsetStack();
30  SG_REF(m_row_subset_stack)
31  m_col_subset_stack=new CSubsetStack();
32  SG_REF(m_col_subset_stack)
33  m_is_symmetric=false;
34  m_free_km=true;
35 
36  SG_ADD((CSGObject**)&m_row_subset_stack, "row_subset_stack",
37  "Subset stack of rows", MS_NOT_AVAILABLE);
38  SG_ADD((CSGObject**)&m_col_subset_stack, "col_subset_stack",
39  "Subset stack of columns", MS_NOT_AVAILABLE);
40  SG_ADD(&m_free_km, "free_km", "Whether kernel matrix should be freed in "
41  "destructor", MS_NOT_AVAILABLE);
42  SG_ADD(&m_is_symmetric, "is_symmetric", "Whether kernel matrix is symmetric",
43  MS_NOT_AVAILABLE);
44  SG_ADD(&kmatrix, "kmatrix", "Kernel matrix.", MS_NOT_AVAILABLE);
45  SG_ADD(&upper_diagonal, "upper_diagonal", "Upper diagonal", MS_NOT_AVAILABLE);
46 
47  /* new parameter from param version 0 to 1 */
48  m_parameter_map->put(
49  new SGParamInfo("free_km", CT_SCALAR, ST_NONE, PT_BOOL, 1),
50  new SGParamInfo()
51  );
52 
53  /* new parameter from param version 0 to 1 */
54  m_parameter_map->put(
55  new SGParamInfo("row_subset_stack", CT_SCALAR, ST_NONE, PT_SGOBJECT, 1),
56  new SGParamInfo()
57  );
58 
59  /* new parameter from param version 0 to 1 */
60  m_parameter_map->put(
61  new SGParamInfo("col_subset_stack", CT_SCALAR, ST_NONE, PT_SGOBJECT, 1),
62  new SGParamInfo()
63  );
64  m_parameter_map->finalize_map();
65 }
66 
68 : CKernel(10), kmatrix(), upper_diagonal(false)
69 {
70  SG_DEBUG("created CCustomKernel\n")
71  init();
72 }
73 
75 : CKernel(10)
76 {
77  SG_DEBUG("created CCustomKernel\n")
78  init();
79 
80  /* if constructed from a custom kernel, use same kernel matrix */
81  if (k->get_kernel_type()==K_CUSTOM)
82  {
83  CCustomKernel* casted=(CCustomKernel*)k;
86  m_free_km=false;
87  }
88  else
89  {
92  }
93 }
94 
96 : CKernel(10), upper_diagonal(false)
97 {
98  SG_DEBUG("Entering\n")
99  init();
101  SG_DEBUG("Leaving\n")
102 }
103 
105 : CKernel(10), upper_diagonal(false)
106 {
107  SG_DEBUG("Entering\n")
108  init();
110  SG_DEBUG("Leaving\n")
111 }
112 
114 {
115  SG_DEBUG("Entering\n")
116  cleanup();
119  SG_DEBUG("Leaving\n")
120 }
121 
122 bool CCustomKernel::dummy_init(int32_t rows, int32_t cols)
123 {
124  return init(new CDummyFeatures(rows), new CDummyFeatures(cols));
125 }
126 
127 bool CCustomKernel::init(CFeatures* l, CFeatures* r)
128 {
129  /* make it possible to call with NULL values since features are useless
130  * for custom kernel matrix */
131  if (!l)
132  l=lhs;
133 
134  if (!r)
135  r=rhs;
136 
137  /* Make sure l and r should not be NULL */
138  REQUIRE(l, "CFeatures l should not be NULL\n")
139  REQUIRE(r, "CFeatures r should not be NULL\n")
140 
141  /* Make sure l and r have the same type of CFeatures */
143  "Different FeatureClass: l is %d, r is %d\n",
146  "Different FeatureType: l is %d, r is %d\n",
148 
149  /* If l and r are the type of CIndexFeatures,
150  * the init function adds a subset to kernel matrix.
151  * Then call get_kernel_matrix will get the submatrix
152  * of the kernel matrix.
153  */
155  {
156  CIndexFeatures* l_idx = (CIndexFeatures*)l;
157  CIndexFeatures* r_idx = (CIndexFeatures*)r;
158 
161 
164 
166 
167  return true;
168  }
169 
170  /* For other types of CFeatures do the default actions below */
171  CKernel::init(l, r);
172 
174 
175  SG_DEBUG("num_vec_lhs: %d vs num_rows %d\n", l->get_num_vectors(), kmatrix.num_rows)
176  SG_DEBUG("num_vec_rhs: %d vs num_cols %d\n", r->get_num_vectors(), kmatrix.num_cols)
179  return init_normalizer();
180 }
181 
182 #ifdef HAVE_EIGEN3
184  index_t block_size, bool no_diag)
185 {
186  SG_DEBUG("Entering\n");
187 
189  {
190  SG_INFO("Row/col subsets initialized! Falling back to "
191  "CKernel::sum_symmetric_block (slower)!\n");
192  return CKernel::sum_symmetric_block(block_begin, block_size, no_diag);
193  }
194 
195  REQUIRE(kmatrix.matrix, "The kernel matrix is not initialized!\n")
196  REQUIRE(m_is_symmetric, "The kernel matrix is not symmetric!\n")
197  REQUIRE(block_begin>=0 && block_begin<kmatrix.num_cols,
198  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
199  REQUIRE(block_begin+block_size<=kmatrix.num_cols,
200  "Invalid block size (%d) at starting index (%d, %d)! "
201  "Please use smaller blocks!", block_size, block_begin, block_begin)
202  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
203 
204  Map<MatrixXf> k_m(kmatrix.matrix, kmatrix.num_rows, kmatrix.num_cols);
205  const MatrixXf& k_m_block=k_m.block(block_begin, block_begin, block_size,
206  block_size);
207 
208  // since the block is symmetric with main diagonal inside, we can save half
209  // the computation with using only the upper triangular part.
210  const MatrixXf& k_m_upper=k_m_block.triangularView<StrictlyUpper>();
211  float64_t sum=k_m_upper.sum();
212 
213  // the actual sum would be twice of what we computed
214  sum*=2;
215 
216  // add the diagonal elements if required
217  if (!no_diag)
218  sum+=k_m_block.diagonal().sum();
219 
220  SG_DEBUG("Leaving\n");
221 
222  return sum;
223 }
224 
226  index_t block_begin_col, index_t block_size_row,
227  index_t block_size_col, bool no_diag)
228 {
229  SG_DEBUG("Entering\n");
230 
232  {
233  SG_INFO("Row/col subsets initialized! Falling back to "
234  "CKernel::sum_block (slower)!\n");
235  return CKernel::sum_block(block_begin_row, block_begin_col,
236  block_size_row, block_size_col, no_diag);
237  }
238 
239  REQUIRE(kmatrix.matrix, "The kernel matrix is not initialized!\n")
240  REQUIRE(block_begin_row>=0 && block_begin_row<kmatrix.num_rows &&
241  block_begin_col>=0 && block_begin_col<kmatrix.num_cols,
242  "Invalid block begin index (%d, %d)!\n",
243  block_begin_row, block_begin_col)
244  REQUIRE(block_begin_row+block_size_row<=kmatrix.num_rows &&
245  block_begin_col+block_size_col<=kmatrix.num_cols,
246  "Invalid block size (%d, %d) at starting index (%d, %d)! "
247  "Please use smaller blocks!", block_size_row, block_size_col,
248  block_begin_row, block_begin_col)
249  REQUIRE(block_size_row>=1 && block_size_col>=1,
250  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
251 
252  // check if removal of diagonal is required/valid
253  if (no_diag && block_size_row!=block_size_col)
254  {
255  SG_WARNING("Not removing the main diagonal since block is not square!\n");
256  no_diag=false;
257  }
258 
259  Map<MatrixXf> k_m(kmatrix.matrix, kmatrix.num_rows, kmatrix.num_cols);
260  const MatrixXf& k_m_block=k_m.block(block_begin_row, block_begin_col,
261  block_size_row, block_size_col);
262 
263  float64_t sum=k_m_block.sum();
264 
265  // remove diagonal if specified
266  if (no_diag)
267  sum-=k_m_block.diagonal().sum();
268 
269  SG_DEBUG("Leaving\n");
270 
271  return sum;
272 }
273 
275  block_begin, index_t block_size, bool no_diag)
276 {
277  SG_DEBUG("Entering\n");
278 
280  {
281  SG_INFO("Row/col subsets initialized! Falling back to "
282  "CKernel::row_wise_sum_symmetric_block (slower)!\n");
283  return CKernel::row_wise_sum_symmetric_block(block_begin, block_size,
284  no_diag);
285  }
286 
287  REQUIRE(kmatrix.matrix, "The kernel matrix is not initialized!\n")
288  REQUIRE(m_is_symmetric, "The kernel matrix is not symmetric!\n")
289  REQUIRE(block_begin>=0 && block_begin<kmatrix.num_cols,
290  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
291  REQUIRE(block_begin+block_size<=kmatrix.num_cols,
292  "Invalid block size (%d) at starting index (%d, %d)! "
293  "Please use smaller blocks!", block_size, block_begin, block_begin)
294  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
295 
296  // initialize the vector that accumulates the row/col-wise sum on the go
297  SGVector<float64_t> row_sum(block_size);
298  row_sum.set_const(0.0);
299 
300  Map<MatrixXf> k_m(kmatrix.matrix, kmatrix.num_rows, kmatrix.num_cols);
301  const MatrixXf& k_m_block=k_m.block(block_begin, block_begin, block_size,
302  block_size);
303 
304  // compute row/col-wise sum
305  for (index_t i=0; i<block_size; ++i)
306  row_sum[i]=k_m_block.col(i).sum();
307 
308  // remove the diagonal elements if specified
309  if (no_diag)
310  {
311  const VectorXf& diag=k_m_block.diagonal();
312  for (index_t i=0; i<block_size; ++i)
313  row_sum[i]-=diag[i];
314  }
315 
316  SG_DEBUG("Leaving\n");
317 
318  return row_sum;
319 }
320 
322  index_t block_begin, index_t block_size, bool no_diag)
323 {
324  SG_DEBUG("Entering\n");
325 
327  {
328  SG_INFO("Row/col subsets initialized! Falling back to "
329  "CKernel::row_wise_sum_squared_sum_symmetric_block (slower)!\n");
331  block_size, no_diag);
332  }
333 
334  REQUIRE(kmatrix.matrix, "The kernel matrix is not initialized!\n")
335  REQUIRE(m_is_symmetric, "The kernel matrix is not symmetric!\n")
336  REQUIRE(block_begin>=0 && block_begin<kmatrix.num_cols,
337  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
338  REQUIRE(block_begin+block_size<=kmatrix.num_cols,
339  "Invalid block size (%d) at starting index (%d, %d)! "
340  "Please use smaller blocks!", block_size, block_begin, block_begin)
341  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
342 
343  // initialize the matrix that accumulates the row/col-wise sum on the go
344  // the first column stores the sum of kernel values
345  // the second column stores the sum of squared kernel values
346  SGMatrix<float64_t> row_sum(block_size, 2);
347  row_sum.set_const(0.0);
348 
349  Map<MatrixXf> k_m(kmatrix.matrix, kmatrix.num_rows, kmatrix.num_cols);
350  const MatrixXf& k_m_block=k_m.block(block_begin, block_begin, block_size,
351  block_size);
352 
353  // compute row/col-wise sum - includes diagonal entries
354  for (index_t i=0; i<block_size; ++i)
355  row_sum(i, 0)=k_m_block.col(i).sum();
356 
357  // compute row/col-wise sum of squared kernel values
358  // since the block is symmetric with main diagonal inside, we can save half
359  // the computation with using only the upper triangular part
360  for (index_t i=0; i<block_size; ++i)
361  {
362  // use the kernel values on the upper triangular part of the kernel
363  // matrix and compute row-wise sum and squared sum on the fly
364  // this doesn't include diagonal entries
365  for (index_t j=i+1; j<block_size; ++j)
366  {
367  float64_t k=k_m_block(i, j);
368  row_sum(i, 1)+=k*k;
369  row_sum(j, 1)+=k*k;
370  }
371  }
372 
373  // add/remove the diagonal elements as specified
374  const VectorXf& diag=k_m_block.diagonal();
375  if (no_diag)
376  {
377  for (index_t i=0; i<block_size; ++i)
378  row_sum(i, 0)-=diag[i];
379  }
380  else
381  {
382  for (index_t i=0; i<block_size; ++i)
383  row_sum(i, 1)+=diag[i]*diag[i];
384  }
385 
386  SG_DEBUG("Leaving\n");
387 
388  return row_sum;
389 }
390 
392  block_begin_row, index_t block_begin_col, index_t block_size_row,
393  index_t block_size_col, bool no_diag)
394 {
395  SG_DEBUG("Entering\n");
396 
398  {
399  SG_INFO("Row/col subsets initialized! Falling back to "
400  "CKernel::row_col_wise_sum_block (slower)!\n");
401  return CKernel::row_col_wise_sum_block(block_begin_row, block_begin_col,
402  block_size_row, block_size_col, no_diag);
403  }
404 
405  REQUIRE(kmatrix.matrix, "The kernel matrix is not initialized!\n")
406  REQUIRE(block_begin_row>=0 && block_begin_row<kmatrix.num_rows &&
407  block_begin_col>=0 && block_begin_col<kmatrix.num_cols,
408  "Invalid block begin index (%d, %d)!\n",
409  block_begin_row, block_begin_col)
410  REQUIRE(block_begin_row+block_size_row<=kmatrix.num_rows &&
411  block_begin_col+block_size_col<=kmatrix.num_cols,
412  "Invalid block size (%d, %d) at starting index (%d, %d)! "
413  "Please use smaller blocks!", block_size_row, block_size_col,
414  block_begin_row, block_begin_col)
415  REQUIRE(block_size_row>=1 && block_size_col>=1,
416  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
417 
418  // check if removal of diagonal is required/valid
419  if (no_diag && block_size_row!=block_size_col)
420  {
421  SG_WARNING("Not removing the main diagonal since block is not square!\n");
422  no_diag=false;
423  }
424 
425  // initialize the vector that accumulates the row/col-wise sum on the go
426  // the first block_size_row entries store the row-wise sum of kernel values
427  // the nextt block_size_col entries store the col-wise sum of kernel values
428  SGVector<float64_t> sum(block_size_row+block_size_col);
429  sum.set_const(0.0);
430 
431  Map<MatrixXf> k_m(kmatrix.matrix, kmatrix.num_rows, kmatrix.num_cols);
432  const MatrixXf& k_m_block=k_m.block(block_begin_row, block_begin_col,
433  block_size_row, block_size_col);
434 
435  // computing row-wise sum
436  for (index_t i=0; i<block_size_row; ++i)
437  sum[i]=k_m_block.row(i).sum();
438 
439  // computing col-wise sum
440  for (index_t i=0; i<block_size_col; ++i)
441  sum[i+block_size_row]=k_m_block.col(i).sum();
442 
443  // remove diagonal entries if specified
444  if (no_diag)
445  {
446  const VectorXf& diag=k_m_block.diagonal();
447  for (index_t i=0; i<diag.rows(); ++i)
448  {
449  sum[i]-=diag[i];
450  sum[i+block_size_row]-=diag[i];
451  }
452  }
453 
454  SG_DEBUG("Leaving\n");
455 
456  return sum;
457 }
458 #endif // HAVE_EIGEN3
459 
461 {
462  SG_DEBUG("Entering\n")
465 
467  upper_diagonal=false;
468 
469  SG_DEBUG("Leaving\n")
470 }
471 
473 {
474  cleanup_custom();
476 }
477 
479 {
482 }
483 
485 {
488 }
489 
491 {
494 }
495 
497 {
500 }
501 
503 {
506  else
508 }
509 
511 {
514 }
515 
517 {
520 }
521 
523 {
526 }
527 
529 {
532 }
533 
535 {
538  else
540 }

SHOGUN Machine Learning Toolbox - Documentation