SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
CombinedKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/common.h>
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/Signal.h>
15 #include <shogun/base/Parallel.h>
17 #include <shogun/kernel/Kernel.h>
21 #include <string.h>
22 
23 #ifndef WIN32
24 #include <pthread.h>
25 #endif
26 
27 using namespace shogun;
28 
29 #ifndef DOXYGEN_SHOULD_SKIP_THIS
30 struct S_THREAD_PARAM_COMBINED_KERNEL
31 {
32  CKernel* kernel;
33  float64_t* result;
34  int32_t* vec_idx;
35  int32_t start;
36  int32_t end;
38  float64_t* weights;
39  int32_t* IDX;
40  int32_t num_suppvec;
41 };
42 #endif // DOXYGEN_SHOULD_SKIP_THIS
43 
44 CCombinedKernel::CCombinedKernel(int32_t size, bool asw)
45 : CKernel(size), append_subkernel_weights(asw)
46 {
47  init();
48 
50  SG_INFO("(subkernel weights are appended)\n")
51 
52  SG_INFO("Combined kernel created (%p)\n", this)
53 }
54 
56 {
57  SG_FREE(subkernel_weights_buffer);
59 
60  cleanup();
62 
63  SG_INFO("Combined kernel deleted (%p).\n", this)
64 }
65 
66 bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
67 {
68  /* if the specified features are not combined features, but a single other
69  * feature type, assume that the caller wants to use all kernels on these */
70  if (l && r && l->get_feature_class()==r->get_feature_class() &&
73  {
74  SG_DEBUG("Initialising combined kernel's combined features with the "
75  "same instance from parameters\n");
76  /* construct combined features with each element being the parameter */
77  CCombinedFeatures* combined_l=new CCombinedFeatures();
78  CCombinedFeatures* combined_r=new CCombinedFeatures();
79  for (index_t i=0; i<get_num_subkernels(); ++i)
80  {
81  combined_l->append_feature_obj(l);
82  combined_r->append_feature_obj(r);
83  }
84 
85  /* recursive call with constructed combined kernel */
86  return init(combined_l, combined_r);
87  }
88 
89  CKernel::init(l,r);
90  REQUIRE(l->get_feature_class()==C_COMBINED, "%s::init(): LHS features are"
91  " of class %s but need to be combined features!\n",
92  get_name(), l->get_name());
93  REQUIRE(r->get_feature_class()==C_COMBINED, "%s::init(): RHS features are"
94  " of class %s but need to be combined features!\n",
95  get_name(), r->get_name());
98 
99  CFeatures* lf=NULL;
100  CFeatures* rf=NULL;
101  CKernel* k=NULL;
102 
103  bool result=true;
104  index_t f_idx = 0;
105 
106  SG_DEBUG("Starting for loop for kernels\n")
107  for (index_t k_idx=0; k_idx<get_num_kernels() && result; k_idx++)
108  {
109  k = get_kernel(k_idx);
110 
111  if (!k)
112  SG_ERROR("Kernel at position %d is NULL\n", k_idx);
113 
114  // skip over features - the custom kernel does not need any
115  if (k->get_kernel_type() != K_CUSTOM)
116  {
117  lf = ((CCombinedFeatures*) l)->get_feature_obj(f_idx);
118  rf = ((CCombinedFeatures*) r)->get_feature_obj(f_idx);
119  f_idx++;
120  if (!lf || !rf)
121  {
122  SG_UNREF(lf);
123  SG_UNREF(rf);
124  SG_UNREF(k);
125  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
126  }
127 
128  SG_DEBUG("Initializing 0x%p - \"%s\"\n", this, k->get_name())
129  result=k->init(lf,rf);
130  SG_UNREF(lf);
131  SG_UNREF(rf);
132 
133  if (!result)
134  break;
135  }
136  else
137  {
138  SG_DEBUG("Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n", this, k->get_name())
139  if (!k->has_features())
140  SG_ERROR("No kernel matrix was assigned to this Custom kernel\n")
141  if (k->get_num_vec_lhs() != num_lhs)
142  SG_ERROR("Number of lhs-feature vectors (%d) not match with number of rows (%d) of custom kernel\n", num_lhs, k->get_num_vec_lhs())
143  if (k->get_num_vec_rhs() != num_rhs)
144  SG_ERROR("Number of rhs-feature vectors (%d) not match with number of cols (%d) of custom kernel\n", num_rhs, k->get_num_vec_rhs())
145  }
146 
147  SG_UNREF(k);
148  }
149 
150  if (!result)
151  {
152  SG_INFO("CombinedKernel: Initialising the following kernel failed\n")
153  if (k)
154  {
155  k->list_kernel();
156  SG_UNREF(k);
157  }
158  else
159  SG_INFO("<NULL>\n")
160  return false;
161  }
162 
163  if ( (f_idx!=((CCombinedFeatures*) l)->get_num_feature_obj()) ||
164  (f_idx!=((CCombinedFeatures*) r)->get_num_feature_obj()) )
165  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
166 
167  init_normalizer();
168  initialized=true;
169  return true;
170 }
171 
173 {
175 
176  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
177  {
178  CKernel* k = get_kernel(k_idx);
179  if (k->get_kernel_type() != K_CUSTOM)
180  k->remove_lhs();
181 
182  SG_UNREF(k);
183  }
185 
186  num_lhs=0;
187 }
188 
190 {
192 
193  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
194  {
195  CKernel* k = get_kernel(k_idx);
196  if (k->get_kernel_type() != K_CUSTOM)
197  k->remove_rhs();
198 
199  SG_UNREF(k);
200  }
202 
203  num_rhs=0;
204 }
205 
207 {
209 
210  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
211  {
212  CKernel* k = get_kernel(k_idx);
213  if (k->get_kernel_type() != K_CUSTOM)
214  k->remove_lhs_and_rhs();
215 
216  SG_UNREF(k);
217  }
218 
220 
221  num_lhs=0;
222  num_rhs=0;
223 }
224 
226 {
227  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
228  {
229  CKernel* k = get_kernel(k_idx);
230  k->cleanup();
231  SG_UNREF(k);
232  }
233 
235 
237 
238  num_lhs=0;
239  num_rhs=0;
240 }
241 
243 {
244  SG_INFO("BEGIN COMBINED KERNEL LIST - ")
245  this->list_kernel();
246 
247  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
248  {
249  CKernel* k = get_kernel(k_idx);
250  k->list_kernel();
251  SG_UNREF(k);
252  }
253  SG_INFO("END COMBINED KERNEL LIST - ")
254 }
255 
256 float64_t CCombinedKernel::compute(int32_t x, int32_t y)
257 {
258  float64_t result=0;
259  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
260  {
261  CKernel* k = get_kernel(k_idx);
262  if (k->get_combined_kernel_weight()!=0)
263  result += k->get_combined_kernel_weight() * k->kernel(x,y);
264  SG_UNREF(k);
265  }
266 
267  return result;
268 }
269 
271  int32_t count, int32_t *IDX, float64_t *weights)
272 {
273  SG_DEBUG("initializing CCombinedKernel optimization\n")
274 
276 
277  bool have_non_optimizable=false;
278 
279  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
280  {
281  CKernel* k = get_kernel(k_idx);
282 
283  bool ret=true;
284 
285  if (k && k->has_property(KP_LINADD))
286  ret=k->init_optimization(count, IDX, weights);
287  else
288  {
289  SG_WARNING("non-optimizable kernel 0x%X in kernel-list\n", k)
290  have_non_optimizable=true;
291  }
292 
293  if (!ret)
294  {
295  have_non_optimizable=true;
296  SG_WARNING("init_optimization of kernel 0x%X failed\n", k)
297  }
298 
299  SG_UNREF(k);
300  }
301 
302  if (have_non_optimizable)
303  {
304  SG_WARNING("some kernels in the kernel-list are not optimized\n")
305 
306  sv_idx=SG_MALLOC(int32_t, count);
307  sv_weight=SG_MALLOC(float64_t, count);
308  sv_count=count;
309  for (int32_t i=0; i<count; i++)
310  {
311  sv_idx[i]=IDX[i];
312  sv_weight[i]=weights[i];
313  }
314  }
315  set_is_initialized(true);
316 
317  return true;
318 }
319 
321 {
322  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
323  {
324  CKernel* k = get_kernel(k_idx);
325  if (k->has_property(KP_LINADD))
326  k->delete_optimization();
327 
328  SG_UNREF(k);
329  }
330 
331  SG_FREE(sv_idx);
332  sv_idx = NULL;
333 
334  SG_FREE(sv_weight);
335  sv_weight = NULL;
336 
337  sv_count = 0;
338  set_is_initialized(false);
339 
340  return true;
341 }
342 
344  int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec,
345  int32_t* IDX, float64_t* weights, float64_t factor)
346 {
347  ASSERT(num_vec<=get_num_vec_rhs())
348  ASSERT(num_vec>0)
349  ASSERT(vec_idx)
350  ASSERT(result)
351 
352  //we have to do the optimization business ourselves but lets
353  //make sure we start cleanly
355 
356  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
357  {
358  CKernel* k = get_kernel(k_idx);
359  if (k && k->has_property(KP_BATCHEVALUATION))
360  {
361  if (k->get_combined_kernel_weight()!=0)
362  k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight());
363  }
364  else
365  emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights);
366 
367  SG_UNREF(k);
368  }
369 
370  //clean up
372 }
373 
375 {
376  S_THREAD_PARAM_COMBINED_KERNEL* params= (S_THREAD_PARAM_COMBINED_KERNEL*) p;
377  int32_t* vec_idx=params->vec_idx;
378  CKernel* k=params->kernel;
379  float64_t* result=params->result;
380 
381  for (int32_t i=params->start; i<params->end; i++)
382  result[i] += k->get_combined_kernel_weight()*k->compute_optimized(vec_idx[i]);
383 
384  return NULL;
385 }
386 
388 {
389  S_THREAD_PARAM_COMBINED_KERNEL* params= (S_THREAD_PARAM_COMBINED_KERNEL*) p;
390  int32_t* vec_idx=params->vec_idx;
391  CKernel* k=params->kernel;
392  float64_t* result=params->result;
393  float64_t* weights=params->weights;
394  int32_t* IDX=params->IDX;
395  int32_t num_suppvec=params->num_suppvec;
396 
397  for (int32_t i=params->start; i<params->end; i++)
398  {
399  float64_t sub_result=0;
400  for (int32_t j=0; j<num_suppvec; j++)
401  sub_result += weights[j] * k->kernel(IDX[j], vec_idx[i]);
402 
403  result[i] += k->get_combined_kernel_weight()*sub_result;
404  }
405 
406  return NULL;
407 }
408 
410  CKernel* k, int32_t num_vec, int32_t* vec_idx, float64_t* result,
411  int32_t num_suppvec, int32_t* IDX, float64_t* weights)
412 {
413  ASSERT(k)
414  ASSERT(result)
415 
416  if (k->has_property(KP_LINADD))
417  {
418  if (k->get_combined_kernel_weight()!=0)
419  {
420  k->init_optimization(num_suppvec, IDX, weights);
421 
422  int32_t num_threads=parallel->get_num_threads();
423  ASSERT(num_threads>0)
424 
425  if (num_threads < 2)
426  {
427  S_THREAD_PARAM_COMBINED_KERNEL params;
428  params.kernel=k;
429  params.result=result;
430  params.start=0;
431  params.end=num_vec;
432  params.vec_idx = vec_idx;
433  compute_optimized_kernel_helper((void*) &params);
434  }
435 #ifdef HAVE_PTHREAD
436  else
437  {
438  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
439  S_THREAD_PARAM_COMBINED_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_COMBINED_KERNEL, num_threads);
440  int32_t step= num_vec/num_threads;
441 
442  int32_t t;
443 
444  for (t=0; t<num_threads-1; t++)
445  {
446  params[t].kernel = k;
447  params[t].result = result;
448  params[t].start = t*step;
449  params[t].end = (t+1)*step;
450  params[t].vec_idx = vec_idx;
451  pthread_create(&threads[t], NULL, CCombinedKernel::compute_optimized_kernel_helper, (void*)&params[t]);
452  }
453 
454  params[t].kernel = k;
455  params[t].result = result;
456  params[t].start = t*step;
457  params[t].end = num_vec;
458  params[t].vec_idx = vec_idx;
459  compute_optimized_kernel_helper((void*) &params[t]);
460 
461  for (t=0; t<num_threads-1; t++)
462  pthread_join(threads[t], NULL);
463 
464  SG_FREE(params);
465  SG_FREE(threads);
466  }
467 #endif /* HAVE_PTHREAD */
468 
469  k->delete_optimization();
470  }
471  }
472  else
473  {
474  ASSERT(IDX!=NULL || num_suppvec==0)
475  ASSERT(weights!=NULL || num_suppvec==0)
476 
477  if (k->get_combined_kernel_weight()!=0)
478  { // compute the usual way for any non-optimized kernel
479  int32_t num_threads=parallel->get_num_threads();
480  ASSERT(num_threads>0)
481 
482  if (num_threads < 2)
483  {
484  S_THREAD_PARAM_COMBINED_KERNEL params;
485  params.kernel=k;
486  params.result=result;
487  params.start=0;
488  params.end=num_vec;
489  params.vec_idx = vec_idx;
490  params.IDX = IDX;
491  params.weights = weights;
492  params.num_suppvec = num_suppvec;
493  compute_kernel_helper((void*) &params);
494  }
495 #ifdef HAVE_PTHREAD
496  else
497  {
498  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
499  S_THREAD_PARAM_COMBINED_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_COMBINED_KERNEL, num_threads);
500  int32_t step= num_vec/num_threads;
501 
502  int32_t t;
503 
504  for (t=0; t<num_threads-1; t++)
505  {
506  params[t].kernel = k;
507  params[t].result = result;
508  params[t].start = t*step;
509  params[t].end = (t+1)*step;
510  params[t].vec_idx = vec_idx;
511  params[t].IDX = IDX;
512  params[t].weights = weights;
513  params[t].num_suppvec = num_suppvec;
514  pthread_create(&threads[t], NULL, CCombinedKernel::compute_kernel_helper, (void*)&params[t]);
515  }
516 
517  params[t].kernel = k;
518  params[t].result = result;
519  params[t].start = t*step;
520  params[t].end = num_vec;
521  params[t].vec_idx = vec_idx;
522  params[t].IDX = IDX;
523  params[t].weights = weights;
524  params[t].num_suppvec = num_suppvec;
525  compute_kernel_helper(&params[t]);
526 
527  for (t=0; t<num_threads-1; t++)
528  pthread_join(threads[t], NULL);
529 
530  SG_FREE(params);
531  SG_FREE(threads);
532  }
533 #endif /* HAVE_PTHREAD */
534  }
535  }
536 }
537 
539 {
540  if (!get_is_initialized())
541  {
542  SG_ERROR("CCombinedKernel optimization not initialized\n")
543  return 0;
544  }
545 
546  float64_t result=0;
547 
548  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
549  {
550  CKernel* k = get_kernel(k_idx);
551  if (k->has_property(KP_LINADD) &&
552  k->get_is_initialized())
553  {
554  if (k->get_combined_kernel_weight()!=0)
555  {
556  result +=
558  }
559  }
560  else
561  {
562  ASSERT(sv_idx!=NULL || sv_count==0)
563  ASSERT(sv_weight!=NULL || sv_count==0)
564 
565  if (k->get_combined_kernel_weight()!=0)
566  { // compute the usual way for any non-optimized kernel
567  float64_t sub_result=0;
568  for (int32_t j=0; j<sv_count; j++)
569  sub_result += sv_weight[j] * k->kernel(sv_idx[j], idx);
570 
571  result += k->get_combined_kernel_weight()*sub_result;
572  }
573  }
574 
575  SG_UNREF(k);
576  }
577 
578  return result;
579 }
580 
581 void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight)
582 {
583  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
584  {
585  CKernel* k = get_kernel(k_idx);
586  k->add_to_normal(idx, weight);
587  SG_UNREF(k);
588  }
589  set_is_initialized(true) ;
590 }
591 
593 {
594  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
595  {
596  CKernel* k = get_kernel(k_idx);
597  k->clear_normal() ;
598  SG_UNREF(k);
599  }
600  set_is_initialized(true) ;
601 }
602 
604  int32_t idx, float64_t * subkernel_contrib)
605 {
607  {
608  int32_t i=0 ;
609  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
610  {
611  CKernel* k = get_kernel(k_idx);
612  int32_t num = -1 ;
613  k->get_subkernel_weights(num);
614  if (num>1)
615  k->compute_by_subkernel(idx, &subkernel_contrib[i]) ;
616  else
617  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
618 
619  SG_UNREF(k);
620  i += num ;
621  }
622  }
623  else
624  {
625  int32_t i=0 ;
626  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
627  {
628  CKernel* k = get_kernel(k_idx);
629  if (k->get_combined_kernel_weight()!=0)
630  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
631 
632  SG_UNREF(k);
633  i++ ;
634  }
635  }
636 }
637 
639 {
640  SG_DEBUG("entering CCombinedKernel::get_subkernel_weights()\n")
641 
642  num_weights = get_num_subkernels() ;
643  SG_FREE(subkernel_weights_buffer);
644  subkernel_weights_buffer = SG_MALLOC(float64_t, num_weights);
645 
647  {
648  SG_DEBUG("appending kernel weights\n")
649 
650  int32_t i=0 ;
651  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
652  {
653  CKernel* k = get_kernel(k_idx);
654  int32_t num = -1 ;
655  const float64_t *w = k->get_subkernel_weights(num);
656  ASSERT(num==k->get_num_subkernels())
657  for (int32_t j=0; j<num; j++)
658  subkernel_weights_buffer[i+j]=w[j] ;
659 
660  SG_UNREF(k);
661  i += num ;
662  }
663  }
664  else
665  {
666  SG_DEBUG("not appending kernel weights\n")
667  int32_t i=0 ;
668  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
669  {
670  CKernel* k = get_kernel(k_idx);
672 
673  SG_UNREF(k);
674  i++ ;
675  }
676  }
677 
678  SG_DEBUG("leaving CCombinedKernel::get_subkernel_weights()\n")
679  return subkernel_weights_buffer ;
680 }
681 
683 {
684  int32_t num=0;
685  const float64_t* w=get_subkernel_weights(num);
686 
687  float64_t* weights = SG_MALLOC(float64_t, num);
688  for (int32_t i=0; i<num; i++)
689  weights[i] = w[i];
690 
691  return SGVector<float64_t>(weights, num);
692 }
693 
695 {
697  {
698  int32_t i=0 ;
699  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
700  {
701  CKernel* k = get_kernel(k_idx);
702  int32_t num = k->get_num_subkernels() ;
703  ASSERT(i<weights.vlen)
704  k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num, false));
705 
706  SG_UNREF(k);
707  i += num ;
708  }
709  }
710  else
711  {
712  int32_t i=0 ;
713  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
714  {
715  CKernel* k = get_kernel(k_idx);
716  ASSERT(i<weights.vlen)
717  k->set_combined_kernel_weight(weights.vector[i]);
718 
719  SG_UNREF(k);
720  i++ ;
721  }
722  }
723 }
724 
726 {
727  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
728  {
729  CKernel* k = get_kernel(k_idx);
730  k->set_optimization_type(t);
731 
732  SG_UNREF(k);
733  }
734 
736 }
737 
739 {
740  if (get_num_kernels()==0)
741  return false;
742 
743  CDynamicObjectArray* new_kernel_array = new CDynamicObjectArray();
744 
745  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
746  {
747  CKernel* k = get_kernel(k_idx);
748  new_kernel_array->append_element(new CCustomKernel(k));
749 
750  SG_UNREF(k);
751  }
752 
754  kernel_array=new_kernel_array;
756 
757  return true;
758 }
759 
760 void CCombinedKernel::init()
761 {
762  sv_count=0;
763  sv_idx=NULL;
764  sv_weight=NULL;
766  initialized=false;
767 
771 
772  SG_ADD((CSGObject**) &kernel_array, "kernel_array", "Array of kernels.",
773  MS_AVAILABLE);
774  m_parameters->add_vector(&sv_idx, &sv_count, "sv_idx",
775  "Support vector index.");
776  m_parameters->add_vector(&sv_weight, &sv_count, "sv_weight",
777  "Support vector weights.");
778  SG_ADD(&append_subkernel_weights, "append_subkernel_weights",
779  "If subkernel weights are appended.", MS_AVAILABLE);
780  SG_ADD(&initialized, "initialized", "Whether kernel is ready to be used.",
782 }
783 
785  const TParameter* param, index_t index)
786 {
787  SGMatrix<float64_t> result;
788 
789  if (!strcmp(param->m_name, "combined_kernel_weight"))
790  {
792  {
793  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
794  {
795  CKernel* k=get_kernel(k_idx);
796  result=k->get_parameter_gradient(param, index);
797 
798  SG_UNREF(k);
799 
800  if (result.num_cols*result.num_rows>0)
801  return result;
802  }
803  }
804  else
805  {
806  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
807  {
808  CKernel* k=get_kernel(k_idx);
809  result=k->get_kernel_matrix();
810 
811  SG_UNREF(k);
812 
813  return result;
814  }
815  }
816  }
817  else
818  {
819  float64_t coeff;
820  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
821  {
822  CKernel* k=get_kernel(k_idx);
823  SGMatrix<float64_t> derivative=
824  k->get_parameter_gradient(param, index);
825 
826  coeff=1.0;
827 
829  coeff=k->get_combined_kernel_weight();
830 
831  for (index_t g=0; g<derivative.num_rows; g++)
832  {
833  for (index_t h=0; h<derivative.num_cols; h++)
834  derivative(g,h)*=coeff;
835  }
836 
837  if (derivative.num_cols*derivative.num_rows>0)
838  {
839  if (result.num_cols==0 && result.num_rows==0)
840  result=derivative;
841  else
842  {
843  for (index_t g=0; g<derivative.num_rows; g++)
844  {
845  for (index_t h=0; h<derivative.num_cols; h++)
846  result(g,h)+=derivative(g,h);
847  }
848  }
849  }
850 
851  SG_UNREF(k);
852  }
853  }
854 
855  return result;
856 }
857 
859 {
860  if (kernel->get_kernel_type()!=K_COMBINED)
861  {
862  SG_SERROR("CCombinedKernel::obtain_from_generic(): provided kernel is "
863  "not of type CGaussianKernel!\n");
864  }
865 
866  /* since an additional reference is returned */
867  SG_REF(kernel);
868  return (CCombinedKernel*)kernel;
869 }
870 
872 {
873  CList* return_list = new CList(true);
874  SG_REF(return_list);
875 
876  if (!kernel_list)
877  return return_list;
878 
879  if (kernel_list->get_num_elements()==0)
880  return return_list;
881 
882  int32_t num_combinations = 1;
883  int32_t list_index = 0;
884 
885  /* calculation of total combinations */
886  CSGObject* list = kernel_list->get_first_element();
887  while (list)
888  {
889  CList* c_list= dynamic_cast<CList* >(list);
890  if (!c_list)
891  {
892  SG_SERROR("CCombinedKernel::combine_kernels() : Failed to cast list of type "
893  "%s to type CList\n", list->get_name());
894  }
895 
896  if (c_list->get_num_elements()==0)
897  {
898  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position %d "
899  "is empty.\n", list_index);
900  }
901 
902  num_combinations *= c_list->get_num_elements();
903 
904  if (kernel_list->get_delete_data())
905  SG_UNREF(list);
906 
907  list = kernel_list->get_next_element();
908  ++list_index;
909  }
910 
911  /* creation of CCombinedKernels */
912  CDynamicObjectArray kernel_array(num_combinations);
913  for (index_t i=0; i<num_combinations; ++i)
914  {
915  CCombinedKernel* c_kernel = new CCombinedKernel();
916  return_list->append_element(c_kernel);
917  kernel_array.push_back(c_kernel);
918  }
919 
920  /* first pass */
921  list = kernel_list->get_first_element();
922  CList* c_list = dynamic_cast<CList* >(list);
923 
924  /* kernel index in the list */
925  index_t kernel_index = 0;
926 
927  /* here we duplicate the first list in the following form
928  * a,b,c,d, a,b,c,d ...... a,b,c,d ---- for a total of num_combinations elements
929  */
930  EKernelType prev_kernel_type = K_UNKNOWN;
931  bool first_kernel = true;
932  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
933  {
934  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
935 
936  if (first_kernel)
937  first_kernel = false;
938  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
939  {
940  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
941  "0 contains different types of kernels\n");
942  }
943 
944  prev_kernel_type = c_kernel->get_kernel_type();
945 
946  for (index_t index=kernel_index; index<num_combinations; index+=c_list->get_num_elements())
947  {
948  CCombinedKernel* comb_kernel =
949  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(index));
950  comb_kernel->append_kernel(c_kernel);
951  SG_UNREF(comb_kernel);
952  }
953  ++kernel_index;
954  if (c_list->get_delete_data())
955  SG_UNREF(kernel);
956  }
957 
958  if (kernel_list->get_delete_data())
959  SG_UNREF(list);
960 
961  /* how often each kernel of the sub-list must appear */
962  int32_t freq = c_list->get_num_elements();
963 
964  /* in this loop we replicate each kernel freq times
965  * until we assign to all the CombinedKernels a sub-kernel from this list
966  * That is for num_combinations */
967  list = kernel_list->get_next_element();
968  list_index = 1;
969  while (list)
970  {
971  c_list = dynamic_cast<CList* >(list);
972 
973  /* index of kernel in the list */
974  kernel_index = 0;
975  first_kernel = true;
976  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
977  {
978  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
979 
980  if (first_kernel)
981  first_kernel = false;
982  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
983  {
984  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
985  "%d contains different types of kernels\n", list_index);
986  }
987 
988  prev_kernel_type = c_kernel->get_kernel_type();
989 
990  /* moves the index so that we keep filling in, the way we do, until we reach the end of the list of combinedkernels */
991  for (index_t base=kernel_index*freq; base<num_combinations; base+=c_list->get_num_elements()*freq)
992  {
993  /* inserts freq consecutives times the current kernel */
994  for (index_t index=0; index<freq; ++index)
995  {
996  CCombinedKernel* comb_kernel =
997  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(base+index));
998  comb_kernel->append_kernel(c_kernel);
999  SG_UNREF(comb_kernel);
1000  }
1001  }
1002  ++kernel_index;
1003 
1004  if (c_list->get_delete_data())
1005  SG_UNREF(kernel);
1006  }
1007 
1008  freq *= c_list->get_num_elements();
1009  if (kernel_list->get_delete_data())
1010  SG_UNREF(list);
1011  list = kernel_list->get_next_element();
1012  ++list_index;
1013  }
1014 
1015  return return_list;
1016 }

SHOGUN Machine Learning Toolbox - Documentation