SHOGUN  5.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
CombinedKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/common.h>
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/Signal.h>
15 #include <shogun/base/Parallel.h>
17 #include <shogun/kernel/Kernel.h>
21 #include <string.h>
24 
25 
26 #ifndef WIN32
27 #include <pthread.h>
28 #endif
29 
30 using namespace shogun;
31 using namespace Eigen;
32 
33 #ifndef DOXYGEN_SHOULD_SKIP_THIS
34 struct S_THREAD_PARAM_COMBINED_KERNEL
35 {
36  CKernel* kernel;
37  float64_t* result;
38  int32_t* vec_idx;
39  int32_t start;
40  int32_t end;
42  float64_t* weights;
43  int32_t* IDX;
44  int32_t num_suppvec;
45 };
46 #endif // DOXYGEN_SHOULD_SKIP_THIS
47 
48 CCombinedKernel::CCombinedKernel(int32_t size, bool asw)
49 : CKernel(size), append_subkernel_weights(asw)
50 {
51  init();
52 
54  SG_INFO("(subkernel weights are appended)\n")
55 
56  SG_INFO("Combined kernel created (%p)\n", this)
57 }
58 
60 {
61  SG_FREE(subkernel_weights_buffer);
63 
64  cleanup();
66 
67  SG_INFO("Combined kernel deleted (%p).\n", this)
68 }
69 
71 {
72  weight_update=true;
75 
76  Map<VectorXd> eigen_wt(wt.vector, wt.vlen);
78 
79  // log_sum_exp trick
80  float64_t max_coeff=eigen_log_wt.maxCoeff();
81  VectorXd tmp = eigen_log_wt.array() - max_coeff;
82  float64_t sum = CMath::log(tmp.array().exp().sum());
83  eigen_wt = tmp.array() - sum;
84  eigen_wt = eigen_wt.array().exp();
86 }
87 
88 bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
89 {
91  {
93  }
94 
95  /* if the specified features are not combined features, but a single other
96  * feature type, assume that the caller wants to use all kernels on these */
97  if (l && r && l->get_feature_class()==r->get_feature_class() &&
100  {
101  SG_DEBUG("Initialising combined kernel's combined features with the "
102  "same instance from parameters\n");
103  /* construct combined features with each element being the parameter */
104  CCombinedFeatures* combined_l=new CCombinedFeatures();
105  CCombinedFeatures* combined_r=new CCombinedFeatures();
106  for (index_t i=0; i<get_num_subkernels(); ++i)
107  {
108  combined_l->append_feature_obj(l);
109  combined_r->append_feature_obj(r);
110  }
111 
112  /* recursive call with constructed combined kernel */
113  return init(combined_l, combined_r);
114  }
115 
116  CKernel::init(l,r);
117  REQUIRE(l->get_feature_class()==C_COMBINED, "%s::init(): LHS features are"
118  " of class %s but need to be combined features!\n",
119  get_name(), l->get_name());
120  REQUIRE(r->get_feature_class()==C_COMBINED, "%s::init(): RHS features are"
121  " of class %s but need to be combined features!\n",
122  get_name(), r->get_name());
125 
126  CFeatures* lf=NULL;
127  CFeatures* rf=NULL;
128  CKernel* k=NULL;
129 
130  bool result=true;
131  index_t f_idx = 0;
132 
133  SG_DEBUG("Starting for loop for kernels\n")
134  for (index_t k_idx=0; k_idx<get_num_kernels() && result; k_idx++)
135  {
136  k = get_kernel(k_idx);
137 
138  if (!k)
139  SG_ERROR("Kernel at position %d is NULL\n", k_idx);
140 
141  // skip over features - the custom kernel does not need any
142  if (k->get_kernel_type() != K_CUSTOM)
143  {
144  lf = ((CCombinedFeatures*) l)->get_feature_obj(f_idx);
145  rf = ((CCombinedFeatures*) r)->get_feature_obj(f_idx);
146  f_idx++;
147  if (!lf || !rf)
148  {
149  SG_UNREF(lf);
150  SG_UNREF(rf);
151  SG_UNREF(k);
152  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
153  }
154 
155  SG_DEBUG("Initializing 0x%p - \"%s\"\n", this, k->get_name())
156  result=k->init(lf,rf);
157  SG_UNREF(lf);
158  SG_UNREF(rf);
159 
160  if (!result)
161  break;
162  }
163  else
164  {
165  SG_DEBUG("Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n", this, k->get_name())
166  if (!k->has_features())
167  SG_ERROR("No kernel matrix was assigned to this Custom kernel\n")
168  if (k->get_num_vec_lhs() != num_lhs)
169  SG_ERROR("Number of lhs-feature vectors (%d) not match with number of rows (%d) of custom kernel\n", num_lhs, k->get_num_vec_lhs())
170  if (k->get_num_vec_rhs() != num_rhs)
171  SG_ERROR("Number of rhs-feature vectors (%d) not match with number of cols (%d) of custom kernel\n", num_rhs, k->get_num_vec_rhs())
172  }
173 
174  SG_UNREF(k);
175  }
176 
177  if (!result)
178  {
179  SG_INFO("CombinedKernel: Initialising the following kernel failed\n")
180  if (k)
181  {
182  k->list_kernel();
183  SG_UNREF(k);
184  }
185  else
186  SG_INFO("<NULL>\n")
187  return false;
188  }
189 
190  if ( ((CCombinedFeatures*) l)->get_num_feature_obj()<=0 ||
191  ((CCombinedFeatures*) l)->get_num_feature_obj() != ((CCombinedFeatures*) r)->get_num_feature_obj() )
192  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
193 
194  init_normalizer();
195  initialized=true;
196  return true;
197 }
198 
200 {
202 
203  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
204  {
205  CKernel* k = get_kernel(k_idx);
206  if (k->get_kernel_type() != K_CUSTOM)
207  k->remove_lhs();
208 
209  SG_UNREF(k);
210  }
212 
213  num_lhs=0;
214 }
215 
217 {
219 
220  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
221  {
222  CKernel* k = get_kernel(k_idx);
223  if (k->get_kernel_type() != K_CUSTOM)
224  k->remove_rhs();
225 
226  SG_UNREF(k);
227  }
229 
230  num_rhs=0;
231 }
232 
234 {
236 
237  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
238  {
239  CKernel* k = get_kernel(k_idx);
240  if (k->get_kernel_type() != K_CUSTOM)
241  k->remove_lhs_and_rhs();
242 
243  SG_UNREF(k);
244  }
245 
247 
248  num_lhs=0;
249  num_rhs=0;
250 }
251 
253 {
254  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
255  {
256  CKernel* k = get_kernel(k_idx);
257  k->cleanup();
258  SG_UNREF(k);
259  }
260 
262 
264 
265  num_lhs=0;
266  num_rhs=0;
267 }
268 
270 {
271  SG_INFO("BEGIN COMBINED KERNEL LIST - ")
272  this->list_kernel();
273 
274  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
275  {
276  CKernel* k = get_kernel(k_idx);
277  k->list_kernel();
278  SG_UNREF(k);
279  }
280  SG_INFO("END COMBINED KERNEL LIST - ")
281 }
282 
283 float64_t CCombinedKernel::compute(int32_t x, int32_t y)
284 {
285  float64_t result=0;
286  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
287  {
288  CKernel* k = get_kernel(k_idx);
289  if (k->get_combined_kernel_weight()!=0)
290  result += k->get_combined_kernel_weight() * k->kernel(x,y);
291  SG_UNREF(k);
292  }
293 
294  return result;
295 }
296 
298  int32_t count, int32_t *IDX, float64_t *weights)
299 {
300  SG_DEBUG("initializing CCombinedKernel optimization\n")
301 
303 
304  bool have_non_optimizable=false;
305 
306  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
307  {
308  CKernel* k = get_kernel(k_idx);
309 
310  bool ret=true;
311 
312  if (k && k->has_property(KP_LINADD))
313  ret=k->init_optimization(count, IDX, weights);
314  else
315  {
316  SG_WARNING("non-optimizable kernel 0x%X in kernel-list\n", k)
317  have_non_optimizable=true;
318  }
319 
320  if (!ret)
321  {
322  have_non_optimizable=true;
323  SG_WARNING("init_optimization of kernel 0x%X failed\n", k)
324  }
325 
326  SG_UNREF(k);
327  }
328 
329  if (have_non_optimizable)
330  {
331  SG_WARNING("some kernels in the kernel-list are not optimized\n")
332 
333  sv_idx=SG_MALLOC(int32_t, count);
334  sv_weight=SG_MALLOC(float64_t, count);
335  sv_count=count;
336  for (int32_t i=0; i<count; i++)
337  {
338  sv_idx[i]=IDX[i];
339  sv_weight[i]=weights[i];
340  }
341  }
342  set_is_initialized(true);
343 
344  return true;
345 }
346 
348 {
349  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
350  {
351  CKernel* k = get_kernel(k_idx);
352  if (k->has_property(KP_LINADD))
353  k->delete_optimization();
354 
355  SG_UNREF(k);
356  }
357 
358  SG_FREE(sv_idx);
359  sv_idx = NULL;
360 
361  SG_FREE(sv_weight);
362  sv_weight = NULL;
363 
364  sv_count = 0;
365  set_is_initialized(false);
366 
367  return true;
368 }
369 
371  int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec,
372  int32_t* IDX, float64_t* weights, float64_t factor)
373 {
374  ASSERT(num_vec<=get_num_vec_rhs())
375  ASSERT(num_vec>0)
376  ASSERT(vec_idx)
377  ASSERT(result)
378 
379  //we have to do the optimization business ourselves but lets
380  //make sure we start cleanly
382 
383  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
384  {
385  CKernel* k = get_kernel(k_idx);
386  if (k && k->has_property(KP_BATCHEVALUATION))
387  {
388  if (k->get_combined_kernel_weight()!=0)
389  k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight());
390  }
391  else
392  emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights);
393 
394  SG_UNREF(k);
395  }
396 
397  //clean up
399 }
400 
402 {
403  S_THREAD_PARAM_COMBINED_KERNEL* params= (S_THREAD_PARAM_COMBINED_KERNEL*) p;
404  int32_t* vec_idx=params->vec_idx;
405  CKernel* k=params->kernel;
406  float64_t* result=params->result;
407 
408  for (int32_t i=params->start; i<params->end; i++)
409  result[i] += k->get_combined_kernel_weight()*k->compute_optimized(vec_idx[i]);
410 
411  return NULL;
412 }
413 
415 {
416  S_THREAD_PARAM_COMBINED_KERNEL* params= (S_THREAD_PARAM_COMBINED_KERNEL*) p;
417  int32_t* vec_idx=params->vec_idx;
418  CKernel* k=params->kernel;
419  float64_t* result=params->result;
420  float64_t* weights=params->weights;
421  int32_t* IDX=params->IDX;
422  int32_t num_suppvec=params->num_suppvec;
423 
424  for (int32_t i=params->start; i<params->end; i++)
425  {
426  float64_t sub_result=0;
427  for (int32_t j=0; j<num_suppvec; j++)
428  sub_result += weights[j] * k->kernel(IDX[j], vec_idx[i]);
429 
430  result[i] += k->get_combined_kernel_weight()*sub_result;
431  }
432 
433  return NULL;
434 }
435 
437  CKernel* k, int32_t num_vec, int32_t* vec_idx, float64_t* result,
438  int32_t num_suppvec, int32_t* IDX, float64_t* weights)
439 {
440  ASSERT(k)
441  ASSERT(result)
442 
443  if (k->has_property(KP_LINADD))
444  {
445  if (k->get_combined_kernel_weight()!=0)
446  {
447  k->init_optimization(num_suppvec, IDX, weights);
448 
449  int32_t num_threads=parallel->get_num_threads();
450  ASSERT(num_threads>0)
451 
452  if (num_threads < 2)
453  {
454  S_THREAD_PARAM_COMBINED_KERNEL params;
455  params.kernel=k;
456  params.result=result;
457  params.start=0;
458  params.end=num_vec;
459  params.vec_idx = vec_idx;
460  compute_optimized_kernel_helper((void*) &params);
461  }
462 #ifdef HAVE_PTHREAD
463  else
464  {
465  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
466  S_THREAD_PARAM_COMBINED_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_COMBINED_KERNEL, num_threads);
467  int32_t step= num_vec/num_threads;
468 
469  int32_t t;
470 
471  for (t=0; t<num_threads-1; t++)
472  {
473  params[t].kernel = k;
474  params[t].result = result;
475  params[t].start = t*step;
476  params[t].end = (t+1)*step;
477  params[t].vec_idx = vec_idx;
478  pthread_create(&threads[t], NULL, CCombinedKernel::compute_optimized_kernel_helper, (void*)&params[t]);
479  }
480 
481  params[t].kernel = k;
482  params[t].result = result;
483  params[t].start = t*step;
484  params[t].end = num_vec;
485  params[t].vec_idx = vec_idx;
486  compute_optimized_kernel_helper((void*) &params[t]);
487 
488  for (t=0; t<num_threads-1; t++)
489  pthread_join(threads[t], NULL);
490 
491  SG_FREE(params);
492  SG_FREE(threads);
493  }
494 #endif /* HAVE_PTHREAD */
495 
496  k->delete_optimization();
497  }
498  }
499  else
500  {
501  ASSERT(IDX!=NULL || num_suppvec==0)
502  ASSERT(weights!=NULL || num_suppvec==0)
503 
504  if (k->get_combined_kernel_weight()!=0)
505  { // compute the usual way for any non-optimized kernel
506  int32_t num_threads=parallel->get_num_threads();
507  ASSERT(num_threads>0)
508 
509  if (num_threads < 2)
510  {
511  S_THREAD_PARAM_COMBINED_KERNEL params;
512  params.kernel=k;
513  params.result=result;
514  params.start=0;
515  params.end=num_vec;
516  params.vec_idx = vec_idx;
517  params.IDX = IDX;
518  params.weights = weights;
519  params.num_suppvec = num_suppvec;
520  compute_kernel_helper((void*) &params);
521  }
522 #ifdef HAVE_PTHREAD
523  else
524  {
525  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
526  S_THREAD_PARAM_COMBINED_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_COMBINED_KERNEL, num_threads);
527  int32_t step= num_vec/num_threads;
528 
529  int32_t t;
530 
531  for (t=0; t<num_threads-1; t++)
532  {
533  params[t].kernel = k;
534  params[t].result = result;
535  params[t].start = t*step;
536  params[t].end = (t+1)*step;
537  params[t].vec_idx = vec_idx;
538  params[t].IDX = IDX;
539  params[t].weights = weights;
540  params[t].num_suppvec = num_suppvec;
541  pthread_create(&threads[t], NULL, CCombinedKernel::compute_kernel_helper, (void*)&params[t]);
542  }
543 
544  params[t].kernel = k;
545  params[t].result = result;
546  params[t].start = t*step;
547  params[t].end = num_vec;
548  params[t].vec_idx = vec_idx;
549  params[t].IDX = IDX;
550  params[t].weights = weights;
551  params[t].num_suppvec = num_suppvec;
552  compute_kernel_helper(&params[t]);
553 
554  for (t=0; t<num_threads-1; t++)
555  pthread_join(threads[t], NULL);
556 
557  SG_FREE(params);
558  SG_FREE(threads);
559  }
560 #endif /* HAVE_PTHREAD */
561  }
562  }
563 }
564 
566 {
567  if (!get_is_initialized())
568  {
569  SG_ERROR("CCombinedKernel optimization not initialized\n")
570  return 0;
571  }
572 
573  float64_t result=0;
574 
575  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
576  {
577  CKernel* k = get_kernel(k_idx);
578  if (k->has_property(KP_LINADD) &&
579  k->get_is_initialized())
580  {
581  if (k->get_combined_kernel_weight()!=0)
582  {
583  result +=
585  }
586  }
587  else
588  {
589  ASSERT(sv_idx!=NULL || sv_count==0)
590  ASSERT(sv_weight!=NULL || sv_count==0)
591 
592  if (k->get_combined_kernel_weight()!=0)
593  { // compute the usual way for any non-optimized kernel
594  float64_t sub_result=0;
595  for (int32_t j=0; j<sv_count; j++)
596  sub_result += sv_weight[j] * k->kernel(sv_idx[j], idx);
597 
598  result += k->get_combined_kernel_weight()*sub_result;
599  }
600  }
601 
602  SG_UNREF(k);
603  }
604 
605  return result;
606 }
607 
608 void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight)
609 {
610  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
611  {
612  CKernel* k = get_kernel(k_idx);
613  k->add_to_normal(idx, weight);
614  SG_UNREF(k);
615  }
616  set_is_initialized(true) ;
617 }
618 
620 {
621  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
622  {
623  CKernel* k = get_kernel(k_idx);
624  k->clear_normal() ;
625  SG_UNREF(k);
626  }
627  set_is_initialized(true) ;
628 }
629 
631  int32_t idx, float64_t * subkernel_contrib)
632 {
634  {
635  int32_t i=0 ;
636  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
637  {
638  CKernel* k = get_kernel(k_idx);
639  int32_t num = -1 ;
640  k->get_subkernel_weights(num);
641  if (num>1)
642  k->compute_by_subkernel(idx, &subkernel_contrib[i]) ;
643  else
644  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
645 
646  SG_UNREF(k);
647  i += num ;
648  }
649  }
650  else
651  {
652  int32_t i=0 ;
653  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
654  {
655  CKernel* k = get_kernel(k_idx);
656  if (k->get_combined_kernel_weight()!=0)
657  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
658 
659  SG_UNREF(k);
660  i++ ;
661  }
662  }
663 }
664 
666 {
667  SG_DEBUG("entering CCombinedKernel::get_subkernel_weights()\n")
668 
669  num_weights = get_num_subkernels() ;
670  SG_FREE(subkernel_weights_buffer);
671  subkernel_weights_buffer = SG_MALLOC(float64_t, num_weights);
672 
674  {
675  SG_DEBUG("appending kernel weights\n")
676 
677  int32_t i=0 ;
678  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
679  {
680  CKernel* k = get_kernel(k_idx);
681  int32_t num = -1 ;
682  const float64_t *w = k->get_subkernel_weights(num);
683  ASSERT(num==k->get_num_subkernels())
684  for (int32_t j=0; j<num; j++)
685  subkernel_weights_buffer[i+j]=w[j] ;
686 
687  SG_UNREF(k);
688  i += num ;
689  }
690  }
691  else
692  {
693  SG_DEBUG("not appending kernel weights\n")
694  int32_t i=0 ;
695  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
696  {
697  CKernel* k = get_kernel(k_idx);
699 
700  SG_UNREF(k);
701  i++ ;
702  }
703  }
704 
705  SG_DEBUG("leaving CCombinedKernel::get_subkernel_weights()\n")
706  return subkernel_weights_buffer ;
707 }
708 
710 {
712  {
715  }
716 
717  int32_t num=0;
718  const float64_t* w=get_subkernel_weights(num);
719 
720  float64_t* weights = SG_MALLOC(float64_t, num);
721  for (int32_t i=0; i<num; i++)
722  weights[i] = w[i];
723 
724 
725  return SGVector<float64_t>(weights, num);
726 }
727 
729 {
731  {
732  int32_t i=0 ;
733  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
734  {
735  CKernel* k = get_kernel(k_idx);
736  int32_t num = k->get_num_subkernels() ;
737  ASSERT(i<weights.vlen)
738  k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num, false));
739 
740  SG_UNREF(k);
741  i += num ;
742  }
743  }
744  else
745  {
746  int32_t i=0 ;
747  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
748  {
749  CKernel* k = get_kernel(k_idx);
750  ASSERT(i<weights.vlen)
751  k->set_combined_kernel_weight(weights.vector[i]);
752 
753  SG_UNREF(k);
754  i++ ;
755  }
756  }
757 }
758 
760 {
761  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
762  {
763  CKernel* k = get_kernel(k_idx);
764  k->set_optimization_type(t);
765 
766  SG_UNREF(k);
767  }
768 
770 }
771 
773 {
774  if (get_num_kernels()==0)
775  return false;
776 
777  CDynamicObjectArray* new_kernel_array = new CDynamicObjectArray();
778 
779  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
780  {
781  CKernel* k = get_kernel(k_idx);
782  new_kernel_array->append_element(new CCustomKernel(k));
783 
784  SG_UNREF(k);
785  }
786 
788  kernel_array=new_kernel_array;
790 
791  return true;
792 }
793 
794 void CCombinedKernel::init()
795 {
796  sv_count=0;
797  sv_idx=NULL;
798  sv_weight=NULL;
800  initialized=false;
801 
805 
806  SG_ADD((CSGObject**) &kernel_array, "kernel_array", "Array of kernels.",
807  MS_AVAILABLE);
808  m_parameters->add_vector(&sv_idx, &sv_count, "sv_idx",
809  "Support vector index.");
810  m_parameters->add_vector(&sv_weight, &sv_count, "sv_weight",
811  "Support vector weights.");
812  SG_ADD(&append_subkernel_weights, "append_subkernel_weights",
813  "If subkernel weights are appended.", MS_AVAILABLE);
814  SG_ADD(&initialized, "initialized", "Whether kernel is ready to be used.",
816 
819  subkernel_log_weights[0] = 0;
820  SG_ADD(&subkernel_log_weights, "subkernel_log_weights",
821  "subkernel weights", MS_AVAILABLE, GRADIENT_AVAILABLE);
822  SG_ADD(&enable_subkernel_weight_opt, "enable_subkernel_weight_opt",
823  "enable subkernel weight opt", MS_NOT_AVAILABLE);
824 
825  weight_update = false;
826  SG_ADD(&weight_update, "weight_update",
827  "weight update", MS_NOT_AVAILABLE);
828 }
829 
831 {
832  weight_update = false;
837  for(index_t idx=0; idx<subkernel_log_weights.vlen; idx++)
838  {
839  ASSERT(subkernel_log_weights[idx]>0);//weight should be positive
840  subkernel_log_weights[idx]=CMath::log(subkernel_log_weights[idx]);//in log domain
841  }
842 }
843 
845  const TParameter* param, index_t index)
846 {
847  SGMatrix<float64_t> result;
848 
849  if (!strcmp(param->m_name, "combined_kernel_weight"))
850  {
852  {
853  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
854  {
855  CKernel* k=get_kernel(k_idx);
856  result=k->get_parameter_gradient(param, index);
857 
858  SG_UNREF(k);
859 
860  if (result.num_cols*result.num_rows>0)
861  return result;
862  }
863  }
864  else
865  {
866  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
867  {
868  CKernel* k=get_kernel(k_idx);
869  result=k->get_kernel_matrix();
870 
871  SG_UNREF(k);
872 
873  return result;
874  }
875  }
876  }
877  else
878  {
879  if (!strcmp(param->m_name, "subkernel_log_weights"))
880  {
882  {
883  ASSERT(index>=0 && index<subkernel_log_weights.vlen);
884  CKernel* k=get_kernel(index);
885  result=k->get_kernel_matrix();
886  SG_UNREF(k);
887  if (weight_update)
888  weight_update = false;
889  float64_t factor = 1.0;
891  // log_sum_exp trick
892  float64_t max_coeff = eigen_log_wt.maxCoeff();
893  VectorXd tmp = eigen_log_wt.array() - max_coeff;
894  float64_t log_sum = CMath::log(tmp.array().exp().sum());
895 
896  factor = subkernel_log_weights[index] - max_coeff - log_sum;
897  factor = CMath::exp(factor) - CMath::exp(factor*2.0);
898 
899  Map<MatrixXd> eigen_res(result.matrix, result.num_rows, result.num_cols);
900  eigen_res = eigen_res * factor;
901  }
902  else
903  {
904  CKernel* k=get_kernel(0);
905  result=k->get_kernel_matrix();
906  SG_UNREF(k);
907  result.zero();
908  }
909  return result;
910  }
911  else
912  {
913  float64_t coeff;
914  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
915  {
916  CKernel* k=get_kernel(k_idx);
917  SGMatrix<float64_t> derivative=
918  k->get_parameter_gradient(param, index);
919 
920  coeff=1.0;
921 
923  coeff=k->get_combined_kernel_weight();
924 
925  for (index_t g=0; g<derivative.num_rows; g++)
926  {
927  for (index_t h=0; h<derivative.num_cols; h++)
928  derivative(g,h)*=coeff;
929  }
930 
931  if (derivative.num_cols*derivative.num_rows>0)
932  {
933  if (result.num_cols==0 && result.num_rows==0)
934  result=derivative;
935  else
936  {
937  for (index_t g=0; g<derivative.num_rows; g++)
938  {
939  for (index_t h=0; h<derivative.num_cols; h++)
940  result(g,h)+=derivative(g,h);
941  }
942  }
943  }
944 
945  SG_UNREF(k);
946  }
947  }
948  }
949 
950  return result;
951 }
952 
954 {
955  if (kernel->get_kernel_type()!=K_COMBINED)
956  {
957  SG_SERROR("CCombinedKernel::obtain_from_generic(): provided kernel is "
958  "not of type CGaussianKernel!\n");
959  }
960 
961  /* since an additional reference is returned */
962  SG_REF(kernel);
963  return (CCombinedKernel*)kernel;
964 }
965 
967 {
968  CList* return_list = new CList(true);
969  SG_REF(return_list);
970 
971  if (!kernel_list)
972  return return_list;
973 
974  if (kernel_list->get_num_elements()==0)
975  return return_list;
976 
977  int32_t num_combinations = 1;
978  int32_t list_index = 0;
979 
980  /* calculation of total combinations */
981  CSGObject* list = kernel_list->get_first_element();
982  while (list)
983  {
984  CList* c_list= dynamic_cast<CList* >(list);
985  if (!c_list)
986  {
987  SG_SERROR("CCombinedKernel::combine_kernels() : Failed to cast list of type "
988  "%s to type CList\n", list->get_name());
989  }
990 
991  if (c_list->get_num_elements()==0)
992  {
993  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position %d "
994  "is empty.\n", list_index);
995  }
996 
997  num_combinations *= c_list->get_num_elements();
998 
999  if (kernel_list->get_delete_data())
1000  SG_UNREF(list);
1001 
1002  list = kernel_list->get_next_element();
1003  ++list_index;
1004  }
1005 
1006  /* creation of CCombinedKernels */
1007  CDynamicObjectArray kernel_array(num_combinations);
1008  for (index_t i=0; i<num_combinations; ++i)
1009  {
1010  CCombinedKernel* c_kernel = new CCombinedKernel();
1011  return_list->append_element(c_kernel);
1012  kernel_array.push_back(c_kernel);
1013  }
1014 
1015  /* first pass */
1016  list = kernel_list->get_first_element();
1017  CList* c_list = dynamic_cast<CList* >(list);
1018 
1019  /* kernel index in the list */
1020  index_t kernel_index = 0;
1021 
1022  /* here we duplicate the first list in the following form
1023  * a,b,c,d, a,b,c,d ...... a,b,c,d ---- for a total of num_combinations elements
1024  */
1025  EKernelType prev_kernel_type = K_UNKNOWN;
1026  bool first_kernel = true;
1027  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
1028  {
1029  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
1030 
1031  if (first_kernel)
1032  first_kernel = false;
1033  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
1034  {
1035  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
1036  "0 contains different types of kernels\n");
1037  }
1038 
1039  prev_kernel_type = c_kernel->get_kernel_type();
1040 
1041  for (index_t index=kernel_index; index<num_combinations; index+=c_list->get_num_elements())
1042  {
1043  CCombinedKernel* comb_kernel =
1044  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(index));
1045  comb_kernel->append_kernel(c_kernel);
1046  SG_UNREF(comb_kernel);
1047  }
1048  ++kernel_index;
1049  if (c_list->get_delete_data())
1050  SG_UNREF(kernel);
1051  }
1052 
1053  if (kernel_list->get_delete_data())
1054  SG_UNREF(list);
1055 
1056  /* how often each kernel of the sub-list must appear */
1057  int32_t freq = c_list->get_num_elements();
1058 
1059  /* in this loop we replicate each kernel freq times
1060  * until we assign to all the CombinedKernels a sub-kernel from this list
1061  * That is for num_combinations */
1062  list = kernel_list->get_next_element();
1063  list_index = 1;
1064  while (list)
1065  {
1066  c_list = dynamic_cast<CList* >(list);
1067 
1068  /* index of kernel in the list */
1069  kernel_index = 0;
1070  first_kernel = true;
1071  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
1072  {
1073  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
1074 
1075  if (first_kernel)
1076  first_kernel = false;
1077  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
1078  {
1079  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
1080  "%d contains different types of kernels\n", list_index);
1081  }
1082 
1083  prev_kernel_type = c_kernel->get_kernel_type();
1084 
1085  /* moves the index so that we keep filling in, the way we do, until we reach the end of the list of combinedkernels */
1086  for (index_t base=kernel_index*freq; base<num_combinations; base+=c_list->get_num_elements()*freq)
1087  {
1088  /* inserts freq consecutives times the current kernel */
1089  for (index_t index=0; index<freq; ++index)
1090  {
1091  CCombinedKernel* comb_kernel =
1092  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(base+index));
1093  comb_kernel->append_kernel(c_kernel);
1094  SG_UNREF(comb_kernel);
1095  }
1096  }
1097  ++kernel_index;
1098 
1099  if (c_list->get_delete_data())
1100  SG_UNREF(kernel);
1101  }
1102 
1103  freq *= c_list->get_num_elements();
1104  if (kernel_list->get_delete_data())
1105  SG_UNREF(list);
1106  list = kernel_list->get_next_element();
1107  ++list_index;
1108  }
1109 
1110  return return_list;
1111 }
virtual void clear_normal()
Definition: Kernel.cpp:859
virtual const char * get_name() const =0
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
virtual void compute_by_subkernel(int32_t idx, float64_t *subkernel_contrib)
#define SG_INFO(...)
Definition: SGIO.h:118
virtual void cleanup()
Definition: Kernel.cpp:173
virtual const char * get_name() const
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
CSGObject * get_next_element()
Definition: List.h:185
SGVector< float64_t > subkernel_log_weights
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:869
EKernelType
Definition: Kernel.h:57
virtual void set_subkernel_weights(SGVector< float64_t > weights)
int32_t get_num_threads() const
Definition: Parallel.cpp:78
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:1070
The Custom Kernel allows for custom user provided kernel matrices.
Definition: CustomKernel.h:36
virtual bool init(CFeatures *lhs, CFeatures *rhs)
SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
bool append_kernel(CKernel *k)
Definition: SGMatrix.h:20
parameter struct
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
void set_is_initialized(bool p_init)
Definition: Kernel.h:900
virtual bool delete_optimization()
Definition: Kernel.cpp:835
CDynamicObjectArray * kernel_array
Parameter * m_parameters
Definition: SGObject.h:546
index_t num_cols
Definition: SGMatrix.h:376
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:207
virtual void set_optimization_type(EOptimizationType t)
Definition: Kernel.h:748
uint64_t properties
Definition: Kernel.h:1083
bool get_delete_data()
Definition: List.h:575
virtual void set_optimization_type(EOptimizationType t)
Parallel * parallel
Definition: SGObject.h:540
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:693
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:517
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:220
#define SG_REF(x)
Definition: SGObject.h:54
index_t num_rows
Definition: SGMatrix.h:374
bool get_is_initialized()
Definition: Kernel.h:754
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:660
bool has_property(EKernelProperty p)
Definition: Kernel.h:724
static void * compute_optimized_kernel_helper(void *p)
index_t vlen
Definition: SGVector.h:494
CSGObject * get_first_element()
Definition: List.h:151
virtual void remove_lhs_and_rhs()
virtual SGVector< float64_t > get_subkernel_weights()
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:115
CKernel * get_kernel(int32_t idx)
double float64_t
Definition: common.h:50
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
void set_combined_kernel_weight(float64_t nw)
Definition: Kernel.h:809
virtual float64_t compute(int32_t x, int32_t y)
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:841
void list_kernel()
Definition: Kernel.cpp:708
float64_t get_combined_kernel_weight()
Definition: Kernel.h:803
static CList * combine_kernels(CList *kernel_list)
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
Definition: Kernel.cpp:875
virtual EFeatureClass get_feature_class() const =0
int32_t get_num_elements()
Definition: List.h:145
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
static void * compute_kernel_helper(void *p)
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:1068
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:526
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:888
virtual bool init_normalizer()
Definition: Kernel.cpp:168
#define SG_UNREF(x)
Definition: SGObject.h:55
void add_vector(bool **param, index_t *length, const char *name, const char *description="")
Definition: Parameter.cpp:334
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:847
static CCombinedKernel * obtain_from_generic(CKernel *kernel)
EOptimizationType
Definition: Kernel.h:50
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:828
virtual bool delete_optimization()
The class Features is the base class of all feature objects.
Definition: Features.h:68
bool append_element(CSGObject *data)
Definition: List.h:331
#define SG_SERROR(...)
Definition: SGIO.h:179
static float64_t exp(float64_t x)
Definition: Math.h:621
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
Definition: Kernel.h:851
virtual void add_to_normal(int32_t idx, float64_t weight)
void emulate_compute_batch(CKernel *k, int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *weights)
static float64_t log(float64_t v)
Definition: Math.h:922
virtual void remove_lhs()
Definition: Kernel.cpp:679
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:864
virtual float64_t compute_optimized(int32_t idx)
static float base
Definition: JLCoverTree.h:85
The Kernel base class.
Definition: Kernel.h:159
CSGObject * get_element(int32_t index) const
float64_t * subkernel_weights_buffer
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:84
virtual bool has_features()
Definition: Kernel.h:535
The class CombinedFeatures is used to combine a number of of feature objects into a single CombinedFe...
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:854
virtual EFeatureType get_feature_type() const =0
Class List implements a doubly connected list for low-level-objects.
Definition: List.h:84
bool append_feature_obj(CFeatures *obj)
CCombinedKernel(int32_t size=10, bool append_subkernel_weights=false)
bool append_element(CSGObject *e)
virtual void init_subkernel_weights()
virtual void enable_subkernel_weight_learning()

SHOGUN Machine Learning Toolbox - Documentation