SHOGUN  6.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
CombinedKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/common.h>
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/Signal.h>
15 #include <shogun/base/Parallel.h>
17 #include <shogun/kernel/Kernel.h>
21 #include <string.h>
24 
25 using namespace shogun;
26 using namespace Eigen;
27 
28 CCombinedKernel::CCombinedKernel(int32_t size, bool asw)
29 : CKernel(size), append_subkernel_weights(asw)
30 {
31  init();
32 
34  SG_INFO("(subkernel weights are appended)\n")
35 
36  SG_INFO("Combined kernel created (%p)\n", this)
37 }
38 
40 {
41  SG_FREE(subkernel_weights_buffer);
43 
44  cleanup();
46 
47  SG_INFO("Combined kernel deleted (%p).\n", this)
48 }
49 
51 {
52  weight_update=true;
55 
56  Map<VectorXd> eigen_wt(wt.vector, wt.vlen);
58 
59  // log_sum_exp trick
60  float64_t max_coeff=eigen_log_wt.maxCoeff();
61  VectorXd tmp = eigen_log_wt.array() - max_coeff;
62  float64_t sum = CMath::log(tmp.array().exp().sum());
63  eigen_wt = tmp.array() - sum;
64  eigen_wt = eigen_wt.array().exp();
66 }
67 
68 bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
69 {
71  {
73  }
74 
75  /* if the specified features are not combined features, but a single other
76  * feature type, assume that the caller wants to use all kernels on these */
77  if (l && r && l->get_feature_class()==r->get_feature_class() &&
80  {
81  SG_DEBUG("Initialising combined kernel's combined features with the "
82  "same instance from parameters\n");
83  /* construct combined features with each element being the parameter */
84  CCombinedFeatures* combined_l=new CCombinedFeatures();
85  CCombinedFeatures* combined_r=new CCombinedFeatures();
86  for (index_t i=0; i<get_num_subkernels(); ++i)
87  {
88  combined_l->append_feature_obj(l);
89  combined_r->append_feature_obj(r);
90  }
91 
92  /* recursive call with constructed combined kernel */
93  return init(combined_l, combined_r);
94  }
95 
96  CKernel::init(l,r);
97  REQUIRE(l->get_feature_class()==C_COMBINED, "%s::init(): LHS features are"
98  " of class %s but need to be combined features!\n",
99  get_name(), l->get_name());
100  REQUIRE(r->get_feature_class()==C_COMBINED, "%s::init(): RHS features are"
101  " of class %s but need to be combined features!\n",
102  get_name(), r->get_name());
105 
106  CFeatures* lf=NULL;
107  CFeatures* rf=NULL;
108  CKernel* k=NULL;
109 
110  bool result=true;
111  index_t f_idx = 0;
112 
113  SG_DEBUG("Starting for loop for kernels\n")
114  for (index_t k_idx=0; k_idx<get_num_kernels() && result; k_idx++)
115  {
116  k = get_kernel(k_idx);
117 
118  if (!k)
119  SG_ERROR("Kernel at position %d is NULL\n", k_idx);
120 
121  // skip over features - the custom kernel does not need any
122  if (k->get_kernel_type() != K_CUSTOM)
123  {
124  lf = ((CCombinedFeatures*) l)->get_feature_obj(f_idx);
125  rf = ((CCombinedFeatures*) r)->get_feature_obj(f_idx);
126  f_idx++;
127  if (!lf || !rf)
128  {
129  SG_UNREF(lf);
130  SG_UNREF(rf);
131  SG_UNREF(k);
132  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
133  }
134 
135  SG_DEBUG("Initializing 0x%p - \"%s\"\n", this, k->get_name())
136  result=k->init(lf,rf);
137  SG_UNREF(lf);
138  SG_UNREF(rf);
139 
140  if (!result)
141  break;
142  }
143  else
144  {
145  SG_DEBUG("Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n", this, k->get_name())
146  if (!k->has_features())
147  SG_ERROR("No kernel matrix was assigned to this Custom kernel\n")
148  if (k->get_num_vec_lhs() != num_lhs)
149  SG_ERROR("Number of lhs-feature vectors (%d) not match with number of rows (%d) of custom kernel\n", num_lhs, k->get_num_vec_lhs())
150  if (k->get_num_vec_rhs() != num_rhs)
151  SG_ERROR("Number of rhs-feature vectors (%d) not match with number of cols (%d) of custom kernel\n", num_rhs, k->get_num_vec_rhs())
152  }
153 
154  SG_UNREF(k);
155  }
156 
157  if (!result)
158  {
159  SG_INFO("CombinedKernel: Initialising the following kernel failed\n")
160  if (k)
161  {
162  k->list_kernel();
163  SG_UNREF(k);
164  }
165  else
166  SG_INFO("<NULL>\n")
167  return false;
168  }
169 
170  if ( ((CCombinedFeatures*) l)->get_num_feature_obj()<=0 ||
171  ((CCombinedFeatures*) l)->get_num_feature_obj() != ((CCombinedFeatures*) r)->get_num_feature_obj() )
172  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
173 
174  init_normalizer();
175  initialized=true;
176  return true;
177 }
178 
180 {
182 
183  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
184  {
185  CKernel* k = get_kernel(k_idx);
186  if (k->get_kernel_type() != K_CUSTOM)
187  k->remove_lhs();
188 
189  SG_UNREF(k);
190  }
192 
193  num_lhs=0;
194 }
195 
197 {
199 
200  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
201  {
202  CKernel* k = get_kernel(k_idx);
203  if (k->get_kernel_type() != K_CUSTOM)
204  k->remove_rhs();
205 
206  SG_UNREF(k);
207  }
209 
210  num_rhs=0;
211 }
212 
214 {
216 
217  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
218  {
219  CKernel* k = get_kernel(k_idx);
220  if (k->get_kernel_type() != K_CUSTOM)
221  k->remove_lhs_and_rhs();
222 
223  SG_UNREF(k);
224  }
225 
227 
228  num_lhs=0;
229  num_rhs=0;
230 }
231 
233 {
234  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
235  {
236  CKernel* k = get_kernel(k_idx);
237  k->cleanup();
238  SG_UNREF(k);
239  }
240 
242 
244 
245  num_lhs=0;
246  num_rhs=0;
247 }
248 
250 {
251  SG_INFO("BEGIN COMBINED KERNEL LIST - ")
252  this->list_kernel();
253 
254  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
255  {
256  CKernel* k = get_kernel(k_idx);
257  k->list_kernel();
258  SG_UNREF(k);
259  }
260  SG_INFO("END COMBINED KERNEL LIST - ")
261 }
262 
263 float64_t CCombinedKernel::compute(int32_t x, int32_t y)
264 {
265  float64_t result=0;
266  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
267  {
268  CKernel* k = get_kernel(k_idx);
269  if (k->get_combined_kernel_weight()!=0)
270  result += k->get_combined_kernel_weight() * k->kernel(x,y);
271  SG_UNREF(k);
272  }
273 
274  return result;
275 }
276 
278  int32_t count, int32_t *IDX, float64_t *weights)
279 {
280  SG_DEBUG("initializing CCombinedKernel optimization\n")
281 
283 
284  bool have_non_optimizable=false;
285 
286  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
287  {
288  CKernel* k = get_kernel(k_idx);
289 
290  bool ret=true;
291 
292  if (k && k->has_property(KP_LINADD))
293  ret=k->init_optimization(count, IDX, weights);
294  else
295  {
296  SG_WARNING("non-optimizable kernel 0x%X in kernel-list\n", k)
297  have_non_optimizable=true;
298  }
299 
300  if (!ret)
301  {
302  have_non_optimizable=true;
303  SG_WARNING("init_optimization of kernel 0x%X failed\n", k)
304  }
305 
306  SG_UNREF(k);
307  }
308 
309  if (have_non_optimizable)
310  {
311  SG_WARNING("some kernels in the kernel-list are not optimized\n")
312 
313  sv_idx=SG_MALLOC(int32_t, count);
314  sv_weight=SG_MALLOC(float64_t, count);
315  sv_count=count;
316  for (int32_t i=0; i<count; i++)
317  {
318  sv_idx[i]=IDX[i];
319  sv_weight[i]=weights[i];
320  }
321  }
322  set_is_initialized(true);
323 
324  return true;
325 }
326 
328 {
329  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
330  {
331  CKernel* k = get_kernel(k_idx);
332  if (k->has_property(KP_LINADD))
333  k->delete_optimization();
334 
335  SG_UNREF(k);
336  }
337 
338  SG_FREE(sv_idx);
339  sv_idx = NULL;
340 
341  SG_FREE(sv_weight);
342  sv_weight = NULL;
343 
344  sv_count = 0;
345  set_is_initialized(false);
346 
347  return true;
348 }
349 
351  int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec,
352  int32_t* IDX, float64_t* weights, float64_t factor)
353 {
354  ASSERT(num_vec<=get_num_vec_rhs())
355  ASSERT(num_vec>0)
356  ASSERT(vec_idx)
357  ASSERT(result)
358 
359  //we have to do the optimization business ourselves but lets
360  //make sure we start cleanly
362 
363  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
364  {
365  CKernel* k = get_kernel(k_idx);
366  if (k && k->has_property(KP_BATCHEVALUATION))
367  {
368  if (k->get_combined_kernel_weight()!=0)
369  k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight());
370  }
371  else
372  emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights);
373 
374  SG_UNREF(k);
375  }
376 
377  //clean up
379 }
380 
382  CKernel* k, int32_t num_vec, int32_t* vec_idx, float64_t* result,
383  int32_t num_suppvec, int32_t* IDX, float64_t* weights)
384 {
385  ASSERT(k)
386  ASSERT(result)
387 
388  if (k->has_property(KP_LINADD))
389  {
390  if (k->get_combined_kernel_weight()!=0)
391  {
392  k->init_optimization(num_suppvec, IDX, weights);
393 
394  #pragma omp parallel for
395  for (int32_t i=0; i<num_vec; ++i)
396  result[i] += k->get_combined_kernel_weight()*k->compute_optimized(vec_idx[i]);
397 
398  k->delete_optimization();
399  }
400  }
401  else
402  {
403  ASSERT(IDX!=NULL || num_suppvec==0)
404  ASSERT(weights!=NULL || num_suppvec==0)
405 
406  if (k->get_combined_kernel_weight()!=0)
407  { // compute the usual way for any non-optimized kernel
408  #pragma omp parallel for
409  for (int32_t i=0; i<num_vec; i++)
410  {
411  float64_t sub_result=0;
412  for (int32_t j=0; j<num_suppvec; j++)
413  sub_result += weights[j] * k->kernel(IDX[j], vec_idx[i]);
414 
415  result[i] += k->get_combined_kernel_weight()*sub_result;
416  }
417  }
418  }
419 }
420 
422 {
423  if (!get_is_initialized())
424  {
425  SG_ERROR("CCombinedKernel optimization not initialized\n")
426  return 0;
427  }
428 
429  float64_t result=0;
430 
431  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
432  {
433  CKernel* k = get_kernel(k_idx);
434  if (k->has_property(KP_LINADD) &&
435  k->get_is_initialized())
436  {
437  if (k->get_combined_kernel_weight()!=0)
438  {
439  result +=
441  }
442  }
443  else
444  {
445  ASSERT(sv_idx!=NULL || sv_count==0)
446  ASSERT(sv_weight!=NULL || sv_count==0)
447 
448  if (k->get_combined_kernel_weight()!=0)
449  { // compute the usual way for any non-optimized kernel
450  float64_t sub_result=0;
451  for (int32_t j=0; j<sv_count; j++)
452  sub_result += sv_weight[j] * k->kernel(sv_idx[j], idx);
453 
454  result += k->get_combined_kernel_weight()*sub_result;
455  }
456  }
457 
458  SG_UNREF(k);
459  }
460 
461  return result;
462 }
463 
464 void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight)
465 {
466  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
467  {
468  CKernel* k = get_kernel(k_idx);
469  k->add_to_normal(idx, weight);
470  SG_UNREF(k);
471  }
472  set_is_initialized(true) ;
473 }
474 
476 {
477  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
478  {
479  CKernel* k = get_kernel(k_idx);
480  k->clear_normal() ;
481  SG_UNREF(k);
482  }
483  set_is_initialized(true) ;
484 }
485 
487  int32_t idx, float64_t * subkernel_contrib)
488 {
490  {
491  int32_t i=0 ;
492  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
493  {
494  CKernel* k = get_kernel(k_idx);
495  int32_t num = -1 ;
496  k->get_subkernel_weights(num);
497  if (num>1)
498  k->compute_by_subkernel(idx, &subkernel_contrib[i]) ;
499  else
500  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
501 
502  SG_UNREF(k);
503  i += num ;
504  }
505  }
506  else
507  {
508  int32_t i=0 ;
509  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
510  {
511  CKernel* k = get_kernel(k_idx);
512  if (k->get_combined_kernel_weight()!=0)
513  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
514 
515  SG_UNREF(k);
516  i++ ;
517  }
518  }
519 }
520 
522 {
523  SG_DEBUG("entering CCombinedKernel::get_subkernel_weights()\n")
524 
525  num_weights = get_num_subkernels() ;
526  SG_FREE(subkernel_weights_buffer);
527  subkernel_weights_buffer = SG_MALLOC(float64_t, num_weights);
528 
530  {
531  SG_DEBUG("appending kernel weights\n")
532 
533  int32_t i=0 ;
534  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
535  {
536  CKernel* k = get_kernel(k_idx);
537  int32_t num = -1 ;
538  const float64_t *w = k->get_subkernel_weights(num);
539  ASSERT(num==k->get_num_subkernels())
540  for (int32_t j=0; j<num; j++)
541  subkernel_weights_buffer[i+j]=w[j] ;
542 
543  SG_UNREF(k);
544  i += num ;
545  }
546  }
547  else
548  {
549  SG_DEBUG("not appending kernel weights\n")
550  int32_t i=0 ;
551  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
552  {
553  CKernel* k = get_kernel(k_idx);
555 
556  SG_UNREF(k);
557  i++ ;
558  }
559  }
560 
561  SG_DEBUG("leaving CCombinedKernel::get_subkernel_weights()\n")
562  return subkernel_weights_buffer ;
563 }
564 
566 {
568  {
571  }
572 
573  int32_t num=0;
574  const float64_t* w=get_subkernel_weights(num);
575 
576  float64_t* weights = SG_MALLOC(float64_t, num);
577  for (int32_t i=0; i<num; i++)
578  weights[i] = w[i];
579 
580 
581  return SGVector<float64_t>(weights, num);
582 }
583 
585 {
587  {
588  int32_t i=0 ;
589  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
590  {
591  CKernel* k = get_kernel(k_idx);
592  int32_t num = k->get_num_subkernels() ;
593  ASSERT(i<weights.vlen)
594  k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num, false));
595 
596  SG_UNREF(k);
597  i += num ;
598  }
599  }
600  else
601  {
602  int32_t i=0 ;
603  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
604  {
605  CKernel* k = get_kernel(k_idx);
606  ASSERT(i<weights.vlen)
607  k->set_combined_kernel_weight(weights.vector[i]);
608 
609  SG_UNREF(k);
610  i++ ;
611  }
612  }
613 }
614 
616 {
617  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
618  {
619  CKernel* k = get_kernel(k_idx);
620  k->set_optimization_type(t);
621 
622  SG_UNREF(k);
623  }
624 
626 }
627 
629 {
630  if (get_num_kernels()==0)
631  return false;
632 
633  CDynamicObjectArray* new_kernel_array = new CDynamicObjectArray();
634 
635  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
636  {
637  CKernel* k = get_kernel(k_idx);
638  new_kernel_array->append_element(new CCustomKernel(k));
639 
640  SG_UNREF(k);
641  }
642 
644  kernel_array=new_kernel_array;
646 
647  return true;
648 }
649 
650 void CCombinedKernel::init()
651 {
652  sv_count=0;
653  sv_idx=NULL;
654  sv_weight=NULL;
656  initialized=false;
657 
661 
662  SG_ADD((CSGObject**) &kernel_array, "kernel_array", "Array of kernels.",
663  MS_AVAILABLE);
664  m_parameters->add_vector(&sv_idx, &sv_count, "sv_idx",
665  "Support vector index.");
666  m_parameters->add_vector(&sv_weight, &sv_count, "sv_weight",
667  "Support vector weights.");
668  SG_ADD(&append_subkernel_weights, "append_subkernel_weights",
669  "If subkernel weights are appended.", MS_AVAILABLE);
670  SG_ADD(&initialized, "initialized", "Whether kernel is ready to be used.",
672 
675  subkernel_log_weights[0] = 0;
676  SG_ADD(&subkernel_log_weights, "subkernel_log_weights",
677  "subkernel weights", MS_AVAILABLE, GRADIENT_AVAILABLE);
678  SG_ADD(&enable_subkernel_weight_opt, "enable_subkernel_weight_opt",
679  "enable subkernel weight opt", MS_NOT_AVAILABLE);
680 
681  weight_update = false;
682  SG_ADD(&weight_update, "weight_update",
683  "weight update", MS_NOT_AVAILABLE);
684 }
685 
687 {
688  weight_update = false;
693  for(index_t idx=0; idx<subkernel_log_weights.vlen; idx++)
694  {
695  ASSERT(subkernel_log_weights[idx]>0);//weight should be positive
696  subkernel_log_weights[idx]=CMath::log(subkernel_log_weights[idx]);//in log domain
697  }
698 }
699 
701  const TParameter* param, index_t index)
702 {
703  SGMatrix<float64_t> result;
704 
705  if (!strcmp(param->m_name, "combined_kernel_weight"))
706  {
708  {
709  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
710  {
711  CKernel* k=get_kernel(k_idx);
712  result=k->get_parameter_gradient(param, index);
713 
714  SG_UNREF(k);
715 
716  if (result.num_cols*result.num_rows>0)
717  return result;
718  }
719  }
720  else
721  {
722  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
723  {
724  CKernel* k=get_kernel(k_idx);
725  result=k->get_kernel_matrix();
726 
727  SG_UNREF(k);
728 
729  return result;
730  }
731  }
732  }
733  else
734  {
735  if (!strcmp(param->m_name, "subkernel_log_weights"))
736  {
738  {
739  ASSERT(index>=0 && index<subkernel_log_weights.vlen);
740  CKernel* k=get_kernel(index);
741  result=k->get_kernel_matrix();
742  SG_UNREF(k);
743  if (weight_update)
744  weight_update = false;
745  float64_t factor = 1.0;
747  // log_sum_exp trick
748  float64_t max_coeff = eigen_log_wt.maxCoeff();
749  VectorXd tmp = eigen_log_wt.array() - max_coeff;
750  float64_t log_sum = CMath::log(tmp.array().exp().sum());
751 
752  factor = subkernel_log_weights[index] - max_coeff - log_sum;
753  factor = CMath::exp(factor) - CMath::exp(factor*2.0);
754 
755  Map<MatrixXd> eigen_res(result.matrix, result.num_rows, result.num_cols);
756  eigen_res = eigen_res * factor;
757  }
758  else
759  {
760  CKernel* k=get_kernel(0);
761  result=k->get_kernel_matrix();
762  SG_UNREF(k);
763  result.zero();
764  }
765  return result;
766  }
767  else
768  {
769  float64_t coeff;
770  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
771  {
772  CKernel* k=get_kernel(k_idx);
773  SGMatrix<float64_t> derivative=
774  k->get_parameter_gradient(param, index);
775 
776  coeff=1.0;
777 
779  coeff=k->get_combined_kernel_weight();
780 
781  for (index_t g=0; g<derivative.num_rows; g++)
782  {
783  for (index_t h=0; h<derivative.num_cols; h++)
784  derivative(g,h)*=coeff;
785  }
786 
787  if (derivative.num_cols*derivative.num_rows>0)
788  {
789  if (result.num_cols==0 && result.num_rows==0)
790  result=derivative;
791  else
792  {
793  for (index_t g=0; g<derivative.num_rows; g++)
794  {
795  for (index_t h=0; h<derivative.num_cols; h++)
796  result(g,h)+=derivative(g,h);
797  }
798  }
799  }
800 
801  SG_UNREF(k);
802  }
803  }
804  }
805 
806  return result;
807 }
808 
810 {
811  if (kernel->get_kernel_type()!=K_COMBINED)
812  {
813  SG_SERROR("CCombinedKernel::obtain_from_generic(): provided kernel is "
814  "not of type CCombinedKernel!\n");
815  }
816 
817  /* since an additional reference is returned */
818  SG_REF(kernel);
819  return (CCombinedKernel*)kernel;
820 }
821 
823 {
824  CList* return_list = new CList(true);
825  SG_REF(return_list);
826 
827  if (!kernel_list)
828  return return_list;
829 
830  if (kernel_list->get_num_elements()==0)
831  return return_list;
832 
833  int32_t num_combinations = 1;
834  int32_t list_index = 0;
835 
836  /* calculation of total combinations */
837  CSGObject* list = kernel_list->get_first_element();
838  while (list)
839  {
840  CList* c_list= dynamic_cast<CList* >(list);
841  if (!c_list)
842  {
843  SG_SERROR("CCombinedKernel::combine_kernels() : Failed to cast list of type "
844  "%s to type CList\n", list->get_name());
845  }
846 
847  if (c_list->get_num_elements()==0)
848  {
849  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position %d "
850  "is empty.\n", list_index);
851  }
852 
853  num_combinations *= c_list->get_num_elements();
854 
855  if (kernel_list->get_delete_data())
856  SG_UNREF(list);
857 
858  list = kernel_list->get_next_element();
859  ++list_index;
860  }
861 
862  /* creation of CCombinedKernels */
863  CDynamicObjectArray kernel_array(num_combinations);
864  for (index_t i=0; i<num_combinations; ++i)
865  {
866  CCombinedKernel* c_kernel = new CCombinedKernel();
867  return_list->append_element(c_kernel);
868  kernel_array.push_back(c_kernel);
869  }
870 
871  /* first pass */
872  list = kernel_list->get_first_element();
873  CList* c_list = dynamic_cast<CList* >(list);
874 
875  /* kernel index in the list */
876  index_t kernel_index = 0;
877 
878  /* here we duplicate the first list in the following form
879  * a,b,c,d, a,b,c,d ...... a,b,c,d ---- for a total of num_combinations elements
880  */
881  EKernelType prev_kernel_type = K_UNKNOWN;
882  bool first_kernel = true;
883  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
884  {
885  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
886 
887  if (first_kernel)
888  first_kernel = false;
889  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
890  {
891  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
892  "0 contains different types of kernels\n");
893  }
894 
895  prev_kernel_type = c_kernel->get_kernel_type();
896 
897  for (index_t index=kernel_index; index<num_combinations; index+=c_list->get_num_elements())
898  {
899  CCombinedKernel* comb_kernel =
900  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(index));
901  comb_kernel->append_kernel(c_kernel);
902  SG_UNREF(comb_kernel);
903  }
904  ++kernel_index;
905  if (c_list->get_delete_data())
906  SG_UNREF(kernel);
907  }
908 
909  if (kernel_list->get_delete_data())
910  SG_UNREF(list);
911 
912  /* how often each kernel of the sub-list must appear */
913  int32_t freq = c_list->get_num_elements();
914 
915  /* in this loop we replicate each kernel freq times
916  * until we assign to all the CombinedKernels a sub-kernel from this list
917  * That is for num_combinations */
918  list = kernel_list->get_next_element();
919  list_index = 1;
920  while (list)
921  {
922  c_list = dynamic_cast<CList* >(list);
923 
924  /* index of kernel in the list */
925  kernel_index = 0;
926  first_kernel = true;
927  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
928  {
929  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
930 
931  if (first_kernel)
932  first_kernel = false;
933  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
934  {
935  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
936  "%d contains different types of kernels\n", list_index);
937  }
938 
939  prev_kernel_type = c_kernel->get_kernel_type();
940 
941  /* moves the index so that we keep filling in, the way we do, until we reach the end of the list of combinedkernels */
942  for (index_t base=kernel_index*freq; base<num_combinations; base+=c_list->get_num_elements()*freq)
943  {
944  /* inserts freq consecutives times the current kernel */
945  for (index_t index=0; index<freq; ++index)
946  {
947  CCombinedKernel* comb_kernel =
948  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(base+index));
949  comb_kernel->append_kernel(c_kernel);
950  SG_UNREF(comb_kernel);
951  }
952  }
953  ++kernel_index;
954 
955  if (c_list->get_delete_data())
956  SG_UNREF(kernel);
957  }
958 
959  freq *= c_list->get_num_elements();
960  if (kernel_list->get_delete_data())
961  SG_UNREF(list);
962  list = kernel_list->get_next_element();
963  ++list_index;
964  }
965 
966  return return_list;
967 }
virtual void clear_normal()
Definition: Kernel.cpp:834
virtual const char * get_name() const =0
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:96
virtual void compute_by_subkernel(int32_t idx, float64_t *subkernel_contrib)
#define SG_INFO(...)
Definition: SGIO.h:117
virtual void cleanup()
Definition: Kernel.cpp:171
virtual const char * get_name() const
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
CSGObject * get_next_element()
Definition: List.h:185
SGVector< float64_t > subkernel_log_weights
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:844
virtual void set_subkernel_weights(SGVector< float64_t > weights)
int32_t index_t
Definition: common.h:72
int32_t num_rhs
number of feature vectors on right hand side
The Custom Kernel allows for custom user provided kernel matrices.
Definition: CustomKernel.h:36
virtual bool init(CFeatures *lhs, CFeatures *rhs)
SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
bool append_kernel(CKernel *k)
Definition: SGMatrix.h:24
parameter struct
#define SG_ERROR(...)
Definition: SGIO.h:128
#define REQUIRE(x,...)
Definition: SGIO.h:205
void set_is_initialized(bool p_init)
virtual bool delete_optimization()
Definition: Kernel.cpp:810
CDynamicObjectArray * kernel_array
Parameter * m_parameters
Definition: SGObject.h:567
index_t num_cols
Definition: SGMatrix.h:465
float64_t kernel(int32_t idx_a, int32_t idx_b)
virtual void set_optimization_type(EOptimizationType t)
bool get_delete_data()
Definition: List.h:575
virtual void set_optimization_type(EOptimizationType t)
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:668
virtual int32_t get_num_vec_lhs()
SGMatrix< float64_t > get_kernel_matrix()
#define SG_REF(x)
Definition: SGObject.h:52
index_t num_rows
Definition: SGMatrix.h:463
bool get_is_initialized()
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:635
bool has_property(EKernelProperty p)
index_t vlen
Definition: SGVector.h:545
CSGObject * get_first_element()
Definition: List.h:151
virtual void remove_lhs_and_rhs()
virtual SGVector< float64_t > get_subkernel_weights()
#define ASSERT(x)
Definition: SGIO.h:200
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:125
CKernel * get_kernel(int32_t idx)
double float64_t
Definition: common.h:60
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
void set_combined_kernel_weight(float64_t nw)
virtual float64_t compute(int32_t x, int32_t y)
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:816
void list_kernel()
Definition: Kernel.cpp:683
float64_t get_combined_kernel_weight()
static CList * combine_kernels(CList *kernel_list)
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
Definition: Kernel.cpp:850
virtual EFeatureClass get_feature_class() const =0
int32_t get_num_elements()
Definition: List.h:145
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
int32_t num_lhs
number of feature vectors on left hand side
virtual int32_t get_num_vec_rhs()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:863
virtual bool init_normalizer()
Definition: Kernel.cpp:166
#define SG_UNREF(x)
Definition: SGObject.h:53
void add_vector(bool **param, index_t *length, const char *name, const char *description="")
Definition: Parameter.cpp:335
#define SG_DEBUG(...)
Definition: SGIO.h:106
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:822
T sum(const Container< T > &a, bool no_diag=false)
static CCombinedKernel * obtain_from_generic(CKernel *kernel)
EOptimizationType
Definition: kernel/Kernel.h:50
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:803
virtual bool delete_optimization()
The class Features is the base class of all feature objects.
Definition: Features.h:68
bool append_element(CSGObject *data)
Definition: List.h:331
#define SG_SERROR(...)
Definition: SGIO.h:178
static float64_t exp(float64_t x)
Definition: Math.h:616
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
virtual void add_to_normal(int32_t idx, float64_t weight)
void emulate_compute_batch(CKernel *k, int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *weights)
static float64_t log(float64_t v)
Definition: Math.h:917
virtual void remove_lhs()
Definition: Kernel.cpp:654
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:839
virtual float64_t compute_optimized(int32_t idx)
static float base
Definition: JLCoverTree.h:89
The Kernel base class.
CSGObject * get_element(int32_t index) const
float64_t * subkernel_weights_buffer
#define SG_WARNING(...)
Definition: SGIO.h:127
#define SG_ADD(...)
Definition: SGObject.h:94
virtual bool has_features()
The class CombinedFeatures is used to combine a number of of feature objects into a single CombinedFe...
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:829
virtual EFeatureType get_feature_type() const =0
Class List implements a doubly connected list for low-level-objects.
Definition: List.h:84
bool append_feature_obj(CFeatures *obj)
CCombinedKernel(int32_t size=10, bool append_subkernel_weights=false)
bool append_element(CSGObject *e)
virtual void init_subkernel_weights()
virtual void enable_subkernel_weight_learning()

SHOGUN Machine Learning Toolbox - Documentation