SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * EXCEPT FOR THE KERNEL CACHING FUNCTIONS WHICH ARE (W) THORSTEN JOACHIMS
3  * COPYRIGHT (C) 1999 UNIVERSITAET DORTMUND - ALL RIGHTS RESERVED
4  *
5  * this program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * Written (W) 1999-2009 Soeren Sonnenburg
11  * Written (W) 1999-2008 Gunnar Raetsch
12  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
13  */
14 
15 #include <shogun/lib/config.h>
16 #include <shogun/lib/common.h>
17 #include <shogun/io/SGIO.h>
18 #include <shogun/io/File.h>
19 #include <shogun/lib/Time.h>
20 #include <shogun/lib/Signal.h>
21 
22 #include <shogun/base/Parallel.h>
23 
24 #include <shogun/kernel/Kernel.h>
27 #include <shogun/base/Parameter.h>
28 
30 
31 #include <string.h>
32 #include <unistd.h>
34 
35 #ifdef HAVE_PTHREAD
36 #include <pthread.h>
37 #endif
38 
39 using namespace shogun;
40 
42 {
43  init();
45 }
46 
47 CKernel::CKernel(int32_t size) : CSGObject()
48 {
49  init();
50 
51  if (size<10)
52  size=10;
53 
54  cache_size=size;
56 }
57 
58 
59 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
60 {
61  init();
62 
63  if (size<10)
64  size=10;
65 
66  cache_size=size;
67 
69  init(p_lhs, p_rhs);
71 }
72 
74 {
75  if (get_is_initialized())
76  SG_ERROR("Kernel still initialized on destruction.\n")
77 
80 
81  SG_INFO("Kernel deleted (%p).\n", this)
82 }
83 
84 #ifdef USE_SVMLIGHT
85 void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)
86 {
87  if (size<10)
88  size=10;
89 
91  cache_size=size;
92 
93  if (has_features() && get_num_vec_lhs())
94  kernel_cache_init(cache_size, regression_hack);
95 }
96 #endif //USE_SVMLIGHT
97 
98 bool CKernel::init(CFeatures* l, CFeatures* r)
99 {
100  /* make sure that features are not deleted if same ones are used */
101  SG_REF(l);
102  SG_REF(r);
103 
104  //make sure features were indeed supplied
105  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
106  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
107 
108  //make sure features are compatible
109  if (l->support_compatible_class())
110  {
112  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
113  l->get_name(), r->get_name());
114  }
115  else
116  {
118  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
119  l->get_name(), r->get_name())
120  }
122 
123  //remove references to previous features
125 
126  //increase reference counts
127  SG_REF(l);
128  if (l==r)
129  lhs_equals_rhs=true;
130  else // l!=r
131  SG_REF(r);
132 
133  lhs=l;
134  rhs=r;
135 
138 
141 
142  /* unref "safety" refs from beginning */
143  SG_UNREF(r);
144  SG_UNREF(l);
145 
146  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
147  return true;
148 }
149 
151 {
152  SG_REF(n);
153  if (lhs && rhs)
154  n->init(this);
155 
157  normalizer=n;
158 
159  return (normalizer!=NULL);
160 }
161 
163 {
165  return normalizer;
166 }
167 
169 {
170  return normalizer->init(this);
171 }
172 
174 {
176 }
177 
178 #ifdef USE_SVMLIGHT
179 /****************************** Cache handling *******************************/
180 
181 void CKernel::kernel_cache_init(int32_t buffsize, bool regression_hack)
182 {
183  int32_t totdoc=get_num_vec_lhs();
184  if (totdoc<=0)
185  {
186  SG_ERROR("kernel has zero rows: num_lhs=%d num_rhs=%d\n",
188  }
189  uint64_t buffer_size=0;
190  int32_t i;
191 
192  //in regression the additional constraints are made by doubling the training data
193  if (regression_hack)
194  totdoc*=2;
195 
196  buffer_size=((uint64_t) buffsize)*1024*1024/sizeof(KERNELCACHE_ELEM);
197  if (buffer_size>((uint64_t) totdoc)*totdoc)
198  buffer_size=((uint64_t) totdoc)*totdoc;
199 
200  SG_INFO("using a kernel cache of size %lld MB (%lld bytes) for %s Kernel\n", buffer_size*sizeof(KERNELCACHE_ELEM)/1024/1024, buffer_size*sizeof(KERNELCACHE_ELEM), get_name())
201 
202  //make sure it fits in the *signed* KERNELCACHE_IDX type
203  ASSERT(buffer_size < (((uint64_t) 1) << (sizeof(KERNELCACHE_IDX)*8-1)))
204 
205  kernel_cache.index = SG_MALLOC(int32_t, totdoc);
206  kernel_cache.occu = SG_MALLOC(int32_t, totdoc);
207  kernel_cache.lru = SG_MALLOC(int32_t, totdoc);
208  kernel_cache.invindex = SG_MALLOC(int32_t, totdoc);
209  kernel_cache.active2totdoc = SG_MALLOC(int32_t, totdoc);
210  kernel_cache.totdoc2active = SG_MALLOC(int32_t, totdoc);
211  kernel_cache.buffer = SG_MALLOC(KERNELCACHE_ELEM, buffer_size);
212  kernel_cache.buffsize=buffer_size;
213  kernel_cache.max_elems=(int32_t) (kernel_cache.buffsize/totdoc);
214 
215  if(kernel_cache.max_elems>totdoc) {
216  kernel_cache.max_elems=totdoc;
217  }
218 
219  kernel_cache.elems=0; // initialize cache
220  for(i=0;i<totdoc;i++) {
221  kernel_cache.index[i]=-1;
222  kernel_cache.lru[i]=0;
223  }
224  for(i=0;i<totdoc;i++) {
225  kernel_cache.occu[i]=0;
226  kernel_cache.invindex[i]=-1;
227  }
228 
229  kernel_cache.activenum=totdoc;;
230  for(i=0;i<totdoc;i++) {
231  kernel_cache.active2totdoc[i]=i;
232  kernel_cache.totdoc2active[i]=i;
233  }
234 
235  kernel_cache.time=0;
236 }
237 
239  int32_t docnum, int32_t *active2dnum, float64_t *buffer, bool full_line)
240 {
241  int32_t i,j;
242  KERNELCACHE_IDX start;
243 
244  int32_t num_vectors = get_num_vec_lhs();
245  if (docnum>=num_vectors)
246  docnum=2*num_vectors-1-docnum;
247 
248  /* is cached? */
249  if(kernel_cache.index[docnum] != -1)
250  {
251  kernel_cache.lru[kernel_cache.index[docnum]]=kernel_cache.time; /* lru */
252  start=((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[docnum];
253 
254  if (full_line)
255  {
256  for(j=0;j<get_num_vec_lhs();j++)
257  {
258  if(kernel_cache.totdoc2active[j] >= 0)
259  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
260  else
261  buffer[j]=(float64_t) kernel(docnum, j);
262  }
263  }
264  else
265  {
266  for(i=0;(j=active2dnum[i])>=0;i++)
267  {
268  if(kernel_cache.totdoc2active[j] >= 0)
269  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
270  else
271  {
272  int32_t k=j;
273  if (k>=num_vectors)
274  k=2*num_vectors-1-k;
275  buffer[j]=(float64_t) kernel(docnum, k);
276  }
277  }
278  }
279  }
280  else
281  {
282  if (full_line)
283  {
284  for(j=0;j<get_num_vec_lhs();j++)
285  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, j);
286  }
287  else
288  {
289  for(i=0;(j=active2dnum[i])>=0;i++)
290  {
291  int32_t k=j;
292  if (k>=num_vectors)
293  k=2*num_vectors-1-k;
294  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, k);
295  }
296  }
297  }
298 }
299 
300 
301 // Fills cache for the row m
303 {
304  register int32_t j,k,l;
305  register KERNELCACHE_ELEM *cache;
306 
307  int32_t num_vectors = get_num_vec_lhs();
308 
309  if (m>=num_vectors)
310  m=2*num_vectors-1-m;
311 
312  if(!kernel_cache_check(m)) // not cached yet
313  {
314  cache = kernel_cache_clean_and_malloc(m);
315  if(cache) {
316  l=kernel_cache.totdoc2active[m];
317 
318  for(j=0;j<kernel_cache.activenum;j++) // fill cache
319  {
320  k=kernel_cache.active2totdoc[j];
321 
322  if((kernel_cache.index[k] != -1) && (l != -1) && (k != m)) {
323  cache[j]=kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)
324  *kernel_cache.index[k]+l];
325  }
326  else
327  {
328  if (k>=num_vectors)
329  k=2*num_vectors-1-k;
330 
331  cache[j]=kernel(m, k);
332  }
333  }
334  }
335  else
336  perror("Error: Kernel cache full! => increase cache size");
337  }
338 }
339 
340 
341 void* CKernel::cache_multiple_kernel_row_helper(void* p)
342 {
343  int32_t j,k,l;
344  S_KTHREAD_PARAM* params = (S_KTHREAD_PARAM*) p;
345 
346  for (int32_t i=params->start; i<params->end; i++)
347  {
348  KERNELCACHE_ELEM* cache=params->cache[i];
349  int32_t m = params->uncached_rows[i];
350  l=params->kernel_cache->totdoc2active[m];
351 
352  for(j=0;j<params->kernel_cache->activenum;j++) // fill cache
353  {
354  k=params->kernel_cache->active2totdoc[j];
355 
356  if((params->kernel_cache->index[k] != -1) && (l != -1) && (!params->needs_computation[k])) {
357  cache[j]=params->kernel_cache->buffer[((KERNELCACHE_IDX) params->kernel_cache->activenum)
358  *params->kernel_cache->index[k]+l];
359  }
360  else
361  {
362  if (k>=params->num_vectors)
363  k=2*params->num_vectors-1-k;
364 
365  cache[j]=params->kernel->kernel(m, k);
366  }
367  }
368 
369  //now line m is cached
370  params->needs_computation[m]=0;
371  }
372  return NULL;
373 }
374 
375 // Fills cache for the rows in key
376 void CKernel::cache_multiple_kernel_rows(int32_t* rows, int32_t num_rows)
377 {
378 #ifdef HAVE_PTHREAD
379  int32_t nthreads=parallel->get_num_threads();
380 
381  if (nthreads<2)
382  {
383 #endif
384  for(int32_t i=0;i<num_rows;i++)
385  cache_kernel_row(rows[i]);
386 #ifdef HAVE_PTHREAD
387  }
388  else
389  {
390  // fill up kernel cache
391  int32_t* uncached_rows = SG_MALLOC(int32_t, num_rows);
392  KERNELCACHE_ELEM** cache = SG_MALLOC(KERNELCACHE_ELEM*, num_rows);
393  pthread_t* threads = SG_MALLOC(pthread_t, nthreads-1);
394  S_KTHREAD_PARAM* params = SG_MALLOC(S_KTHREAD_PARAM, nthreads-1);
395  int32_t num_threads=nthreads-1;
396  int32_t num_vec=get_num_vec_lhs();
397  ASSERT(num_vec>0)
398  uint8_t* needs_computation=SG_CALLOC(uint8_t, num_vec);
399 
400  int32_t step=0;
401  int32_t num=0;
402  int32_t end=0;
403 
404  // allocate cachelines if necessary
405  for (int32_t i=0; i<num_rows; i++)
406  {
407  int32_t idx=rows[i];
408  if (idx>=num_vec)
409  idx=2*num_vec-1-idx;
410 
411  if (kernel_cache_check(idx))
412  continue;
413 
414  needs_computation[idx]=1;
415  uncached_rows[num]=idx;
416  cache[num]= kernel_cache_clean_and_malloc(idx);
417 
418  if (!cache[num])
419  SG_ERROR("Kernel cache full! => increase cache size\n")
420 
421  num++;
422  }
423 
424  if (num>0)
425  {
426  step= num/nthreads;
427 
428  if (step<1)
429  {
430  num_threads=num-1;
431  step=1;
432  }
433 
434  for (int32_t t=0; t<num_threads; t++)
435  {
436  params[t].kernel = this;
437  params[t].kernel_cache = &kernel_cache;
438  params[t].cache = cache;
439  params[t].uncached_rows = uncached_rows;
440  params[t].needs_computation = needs_computation;
441  params[t].num_uncached = num;
442  params[t].start = t*step;
443  params[t].end = (t+1)*step;
444  params[t].num_vectors = get_num_vec_lhs();
445  end=params[t].end;
446 
447  int code=pthread_create(&threads[t], NULL,
448  CKernel::cache_multiple_kernel_row_helper, (void*)&params[t]);
449 
450  if (code != 0)
451  {
452  SG_WARNING("Thread creation failed (thread %d of %d) "
453  "with error:'%s'\n",t, num_threads, strerror(code));
454  num_threads=t;
455  end=t*step;
456  break;
457  }
458  }
459  }
460  else
461  num_threads=-1;
462 
463 
464  S_KTHREAD_PARAM last_param;
465  last_param.kernel = this;
466  last_param.kernel_cache = &kernel_cache;
467  last_param.cache = cache;
468  last_param.uncached_rows = uncached_rows;
469  last_param.needs_computation = needs_computation;
470  last_param.start = end;
471  last_param.num_uncached = num;
472  last_param.end = num;
473  last_param.num_vectors = get_num_vec_lhs();
474 
475  cache_multiple_kernel_row_helper(&last_param);
476 
477 
478  for (int32_t t=0; t<num_threads; t++)
479  {
480  if (pthread_join(threads[t], NULL) != 0)
481  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
482  }
483 
484  SG_FREE(needs_computation);
485  SG_FREE(params);
486  SG_FREE(threads);
487  SG_FREE(cache);
488  SG_FREE(uncached_rows);
489  }
490 #endif
491 }
492 
493 // remove numshrink columns in the cache
494 // which correspond to examples marked
496  int32_t totdoc, int32_t numshrink, int32_t *after)
497 {
498  ASSERT(totdoc > 0);
499  register int32_t i,j,jj,scount; // 0 in after.
500  KERNELCACHE_IDX from=0,to=0;
501  int32_t *keep;
502 
503  keep=SG_MALLOC(int32_t, totdoc);
504  for(j=0;j<totdoc;j++) {
505  keep[j]=1;
506  }
507  scount=0;
508  for(jj=0;(jj<kernel_cache.activenum) && (scount<numshrink);jj++) {
509  j=kernel_cache.active2totdoc[jj];
510  if(!after[j]) {
511  scount++;
512  keep[j]=0;
513  }
514  }
515 
516  for(i=0;i<kernel_cache.max_elems;i++) {
517  for(jj=0;jj<kernel_cache.activenum;jj++) {
518  j=kernel_cache.active2totdoc[jj];
519  if(!keep[j]) {
520  from++;
521  }
522  else {
523  kernel_cache.buffer[to]=kernel_cache.buffer[from];
524  to++;
525  from++;
526  }
527  }
528  }
529 
530  kernel_cache.activenum=0;
531  for(j=0;j<totdoc;j++) {
532  if((keep[j]) && (kernel_cache.totdoc2active[j] != -1)) {
533  kernel_cache.active2totdoc[kernel_cache.activenum]=j;
534  kernel_cache.totdoc2active[j]=kernel_cache.activenum;
535  kernel_cache.activenum++;
536  }
537  else {
538  kernel_cache.totdoc2active[j]=-1;
539  }
540  }
541 
542  kernel_cache.max_elems= (int32_t) kernel_cache.buffsize;
543 
544  if (kernel_cache.activenum>0)
545  kernel_cache.buffsize/=kernel_cache.activenum;
546 
547  if(kernel_cache.max_elems>totdoc)
548  kernel_cache.max_elems=totdoc;
549 
550  SG_FREE(keep);
551 
552 }
553 
555 {
556  int32_t maxlru=0,k;
557 
558  for(k=0;k<kernel_cache.max_elems;k++) {
559  if(maxlru < kernel_cache.lru[k])
560  maxlru=kernel_cache.lru[k];
561  }
562  for(k=0;k<kernel_cache.max_elems;k++) {
563  kernel_cache.lru[k]-=maxlru;
564  }
565 }
566 
568 {
569  SG_FREE(kernel_cache.index);
570  SG_FREE(kernel_cache.occu);
571  SG_FREE(kernel_cache.lru);
572  SG_FREE(kernel_cache.invindex);
573  SG_FREE(kernel_cache.active2totdoc);
574  SG_FREE(kernel_cache.totdoc2active);
575  SG_FREE(kernel_cache.buffer);
576  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
577 }
578 
579 int32_t CKernel::kernel_cache_malloc()
580 {
581  int32_t i;
582 
584  for(i=0;i<kernel_cache.max_elems;i++) {
585  if(!kernel_cache.occu[i]) {
586  kernel_cache.occu[i]=1;
587  kernel_cache.elems++;
588  return(i);
589  }
590  }
591  }
592  return(-1);
593 }
594 
595 void CKernel::kernel_cache_free(int32_t cacheidx)
596 {
597  kernel_cache.occu[cacheidx]=0;
598  kernel_cache.elems--;
599 }
600 
601 // remove least recently used cache
602 // element
603 int32_t CKernel::kernel_cache_free_lru()
604 {
605  register int32_t k,least_elem=-1,least_time;
606 
607  least_time=kernel_cache.time+1;
608  for(k=0;k<kernel_cache.max_elems;k++) {
609  if(kernel_cache.invindex[k] != -1) {
610  if(kernel_cache.lru[k]<least_time) {
611  least_time=kernel_cache.lru[k];
612  least_elem=k;
613  }
614  }
615  }
616 
617  if(least_elem != -1) {
618  kernel_cache_free(least_elem);
619  kernel_cache.index[kernel_cache.invindex[least_elem]]=-1;
620  kernel_cache.invindex[least_elem]=-1;
621  return(1);
622  }
623  return(0);
624 }
625 
626 // Get a free cache entry. In case cache is full, the lru
627 // element is removed.
628 KERNELCACHE_ELEM* CKernel::kernel_cache_clean_and_malloc(int32_t cacheidx)
629 {
630  int32_t result;
631  if((result = kernel_cache_malloc()) == -1) {
632  if(kernel_cache_free_lru()) {
633  result = kernel_cache_malloc();
634  }
635  }
636  kernel_cache.index[cacheidx]=result;
637  if(result == -1) {
638  return(0);
639  }
640  kernel_cache.invindex[result]=cacheidx;
641  kernel_cache.lru[kernel_cache.index[cacheidx]]=kernel_cache.time; // lru
642  return &kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[cacheidx]];
643 }
644 #endif //USE_SVMLIGHT
645 
646 void CKernel::load(CFile* loader)
647 {
650 }
651 
652 void CKernel::save(CFile* writer)
653 {
654  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
656  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
658 }
659 
661 {
662  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
663  if (rhs!=lhs)
664  SG_UNREF(rhs);
665  rhs = NULL;
666  num_rhs=0;
667 
668  SG_UNREF(lhs);
669  lhs = NULL;
670  num_lhs=0;
671  lhs_equals_rhs=false;
672 
673 #ifdef USE_SVMLIGHT
674  cache_reset();
675 #endif //USE_SVMLIGHT
676  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
677 }
678 
680 {
681  if (rhs==lhs)
682  rhs=NULL;
683  SG_UNREF(lhs);
684  lhs = NULL;
685  num_lhs=0;
686  lhs_equals_rhs=false;
687 #ifdef USE_SVMLIGHT
688  cache_reset();
689 #endif //USE_SVMLIGHT
690 }
691 
694 {
695  if (rhs!=lhs)
696  SG_UNREF(rhs);
697  rhs = NULL;
698  num_rhs=0;
699  lhs_equals_rhs=false;
700 
701 #ifdef USE_SVMLIGHT
702  cache_reset();
703 #endif //USE_SVMLIGHT
704 }
705 
706 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
707 
709 {
710  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
712  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
713  "SLOWBUTMEMEFFICIENT");
714 
715  switch (get_kernel_type())
716  {
778  }
779 
780  switch (get_feature_class())
781  {
792  ENUM_CASE(C_WD)
804  }
805 
806  switch (get_feature_type())
807  {
822  }
823  SG_INFO("\n")
824 }
825 #undef ENUM_CASE
826 
828  int32_t count, int32_t *IDX, float64_t * weights)
829 {
830  SG_ERROR("kernel does not support linadd optimization\n")
831  return false ;
832 }
833 
835 {
836  SG_ERROR("kernel does not support linadd optimization\n")
837  return false;
838 }
839 
841 {
842  SG_ERROR("kernel does not support linadd optimization\n")
843  return 0;
844 }
845 
847  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
848  int32_t* IDX, float64_t* weights, float64_t factor)
849 {
850  SG_ERROR("kernel does not support batch computation\n")
851 }
852 
853 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
854 {
855  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
856 }
857 
859 {
860  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
861 }
862 
864 {
865  return 1;
866 }
867 
869  int32_t vector_idx, float64_t * subkernel_contrib)
870 {
871  SG_ERROR("kernel compute_by_subkernel not implemented\n")
872 }
873 
874 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
875 {
876  num_weights=1 ;
877  return &combined_kernel_weight ;
878 }
879 
881 {
882  int num_weights = 1;
883  const float64_t* weight = get_subkernel_weights(num_weights);
884  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
885 }
886 
888 {
889  ASSERT(weights.vector)
890  if (weights.vlen!=1)
891  SG_ERROR("number of subkernel weights should be one ...\n")
892 
893  combined_kernel_weight = weights.vector[0] ;
894 }
895 
897 {
898  if (kernel)
899  {
900  CKernel* casted=dynamic_cast<CKernel*>(kernel);
901  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
902  " of class \"%s\" is not a subclass of CKernel!\n",
903  kernel->get_name());
904  return casted;
905  }
906  else
907  return NULL;
908 }
909 
911 {
912  int32_t num_suppvec=svm->get_num_support_vectors();
913  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
914  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
915 
916  for (int32_t i=0; i<num_suppvec; i++)
917  {
918  sv_idx[i] = svm->get_support_vector(i);
919  sv_weight[i] = svm->get_alpha(i);
920  }
921  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
922 
923  SG_FREE(sv_idx);
924  SG_FREE(sv_weight);
925  return ret;
926 }
927 
929 {
931  if (lhs_equals_rhs)
932  rhs=lhs;
933 }
934 
936 {
938 
939  if (lhs_equals_rhs)
940  rhs=NULL;
941 }
942 
944 {
946 
947  if (lhs_equals_rhs)
948  rhs=lhs;
949 }
950 
952  SG_ADD(&cache_size, "cache_size",
953  "Cache size in MB.", MS_NOT_AVAILABLE);
954  SG_ADD((CSGObject**) &lhs, "lhs",
955  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
956  SG_ADD((CSGObject**) &rhs, "rhs",
957  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
958  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
959  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
960  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
962  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
964  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
965  "Combined kernel weight.", MS_AVAILABLE);
966  SG_ADD(&optimization_initialized, "optimization_initialized",
967  "Optimization is initialized.", MS_NOT_AVAILABLE);
968  SG_ADD((machine_int_t*) &opt_type, "opt_type",
969  "Optimization type.", MS_NOT_AVAILABLE);
970  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
971  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
972  MS_AVAILABLE);
973 }
974 
975 
976 void CKernel::init()
977 {
978  cache_size=10;
979  kernel_matrix=NULL;
980  lhs=NULL;
981  rhs=NULL;
982  num_lhs=0;
983  num_rhs=0;
984  lhs_equals_rhs=false;
989  normalizer=NULL;
990 
991 #ifdef USE_SVMLIGHT
992  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
993 #endif //USE_SVMLIGHT
994 
996 }
997 
998 namespace shogun
999 {
1001 template <class T> struct K_THREAD_PARAM
1002 {
1006  int32_t start;
1008  int32_t end;
1010  int64_t total_start;
1012  int64_t total_end;
1014  int32_t m;
1016  int32_t n;
1022  bool verbose;
1023 };
1024 }
1025 
1027  bool no_diag)
1028 {
1029  SG_DEBUG("Entering\n");
1030 
1031  REQUIRE(has_features(), "No features assigned to kernel\n")
1032  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1033  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1034  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1035  REQUIRE(block_begin+block_size<=num_rhs,
1036  "Invalid block size (%d) at starting index (%d, %d)! "
1037  "Please use smaller blocks!", block_size, block_begin, block_begin)
1038  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1039 
1040  float64_t sum=0.0;
1041 
1042  // since the block is symmetric with main diagonal inside, we can save half
1043  // the computation with using only the upper triangular part.
1044  // this can be done in parallel
1045 #pragma omp parallel for
1046  for (index_t i=0; i<block_size; ++i)
1047  {
1048  // compute the kernel values on the upper triangular part of the kernel
1049  // matrix and compute sum on the fly
1050  for (index_t j=i+1; j<block_size; ++j)
1051  {
1052  float64_t k=kernel(i+block_begin, j+block_begin);
1053 #pragma omp atomic
1054  sum+=k;
1055  }
1056  }
1057 
1058  // the actual sum would be twice of what we computed
1059  sum*=2;
1060 
1061  // add the diagonal elements if required - keeping this check
1062  // outside of the loop to save cycles
1063  if (!no_diag)
1064  {
1065 #pragma omp parallel for
1066  for (index_t i=0; i<block_size; ++i)
1067  {
1068  float64_t diag=kernel(i+block_begin, i+block_begin);
1069 #pragma omp atomic
1070  sum+=diag;
1071  }
1072  }
1073 
1074  SG_DEBUG("Leaving\n");
1075 
1076  return sum;
1077 }
1078 
1079 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
1080  index_t block_size_row, index_t block_size_col, bool no_diag)
1081 {
1082  SG_DEBUG("Entering\n");
1083 
1084  REQUIRE(has_features(), "No features assigned to kernel\n")
1085  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1086  block_begin_col>=0 && block_begin_col<num_rhs,
1087  "Invalid block begin index (%d, %d)!\n",
1088  block_begin_row, block_begin_col)
1089  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1090  block_begin_col+block_size_col<=num_rhs,
1091  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1092  "Please use smaller blocks!", block_size_row, block_size_col,
1093  block_begin_row, block_begin_col)
1094  REQUIRE(block_size_row>=1 && block_size_col>=1,
1095  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1096 
1097  // check if removal of diagonal is required/valid
1098  if (no_diag && block_size_row!=block_size_col)
1099  {
1100  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1101  no_diag=false;
1102  }
1103 
1104  float64_t sum=0.0;
1105 
1106  // this can be done in parallel for the rows/cols
1107 #pragma omp parallel for
1108  for (index_t i=0; i<block_size_row; ++i)
1109  {
1110  // compute the kernel values and compute sum on the fly
1111  for (index_t j=0; j<block_size_col; ++j)
1112  {
1113  float64_t k=no_diag && i==j ? 0 :
1114  kernel(i+block_begin_row, j+block_begin_col);
1115 #pragma omp atomic
1116  sum+=k;
1117  }
1118  }
1119 
1120  SG_DEBUG("Leaving\n");
1121 
1122  return sum;
1123 }
1124 
1126  index_t block_size, bool no_diag)
1127 {
1128  SG_DEBUG("Entering\n");
1129 
1130  REQUIRE(has_features(), "No features assigned to kernel\n")
1131  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1132  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1133  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1134  REQUIRE(block_begin+block_size<=num_rhs,
1135  "Invalid block size (%d) at starting index (%d, %d)! "
1136  "Please use smaller blocks!", block_size, block_begin, block_begin)
1137  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1138 
1139  // initialize the vector that accumulates the row/col-wise sum on the go
1140  SGVector<float64_t> row_sum(block_size);
1141  row_sum.set_const(0.0);
1142 
1143  // since the block is symmetric with main diagonal inside, we can save half
1144  // the computation with using only the upper triangular part.
1145  // this can be done in parallel for the rows/cols
1146 #pragma omp parallel for
1147  for (index_t i=0; i<block_size; ++i)
1148  {
1149  // compute the kernel values on the upper triangular part of the kernel
1150  // matrix and compute row-wise sum on the fly
1151  for (index_t j=i+1; j<block_size; ++j)
1152  {
1153  float64_t k=kernel(i+block_begin, j+block_begin);
1154 #pragma omp critical
1155  {
1156  row_sum[i]+=k;
1157  row_sum[j]+=k;
1158  }
1159  }
1160  }
1161 
1162  // add the diagonal elements if required - keeping this check
1163  // outside of the loop to save cycles
1164  if (!no_diag)
1165  {
1166 #pragma omp parallel for
1167  for (index_t i=0; i<block_size; ++i)
1168  {
1169  float64_t diag=kernel(i+block_begin, i+block_begin);
1170  row_sum[i]+=diag;
1171  }
1172  }
1173 
1174  SG_DEBUG("Leaving\n");
1175 
1176  return row_sum;
1177 }
1178 
1180  block_begin, index_t block_size, bool no_diag)
1181 {
1182  SG_DEBUG("Entering\n");
1183 
1184  REQUIRE(has_features(), "No features assigned to kernel\n")
1185  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1186  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1187  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1188  REQUIRE(block_begin+block_size<=num_rhs,
1189  "Invalid block size (%d) at starting index (%d, %d)! "
1190  "Please use smaller blocks!", block_size, block_begin, block_begin)
1191  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1192 
1193  // initialize the matrix that accumulates the row/col-wise sum on the go
1194  // the first column stores the sum of kernel values
1195  // the second column stores the sum of squared kernel values
1196  SGMatrix<float64_t> row_sum(block_size, 2);
1197  row_sum.set_const(0.0);
1198 
1199  // since the block is symmetric with main diagonal inside, we can save half
1200  // the computation with using only the upper triangular part
1201  // this can be done in parallel for the rows/cols
1202 #pragma omp parallel for
1203  for (index_t i=0; i<block_size; ++i)
1204  {
1205  // compute the kernel values on the upper triangular part of the kernel
1206  // matrix and compute row-wise sum and squared sum on the fly
1207  for (index_t j=i+1; j<block_size; ++j)
1208  {
1209  float64_t k=kernel(i+block_begin, j+block_begin);
1210 #pragma omp critical
1211  {
1212  row_sum(i, 0)+=k;
1213  row_sum(j, 0)+=k;
1214  row_sum(i, 1)+=k*k;
1215  row_sum(j, 1)+=k*k;
1216  }
1217  }
1218  }
1219 
1220  // add the diagonal elements if required - keeping this check
1221  // outside of the loop to save cycles
1222  if (!no_diag)
1223  {
1224 #pragma omp parallel for
1225  for (index_t i=0; i<block_size; ++i)
1226  {
1227  float64_t diag=kernel(i+block_begin, i+block_begin);
1228  row_sum(i, 0)+=diag;
1229  row_sum(i, 1)+=diag*diag;
1230  }
1231  }
1232 
1233  SG_DEBUG("Leaving\n");
1234 
1235  return row_sum;
1236 }
1237 
1239  index_t block_begin_col, index_t block_size_row,
1240  index_t block_size_col, bool no_diag)
1241 {
1242  SG_DEBUG("Entering\n");
1243 
1244  REQUIRE(has_features(), "No features assigned to kernel\n")
1245  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1246  block_begin_col>=0 && block_begin_col<num_rhs,
1247  "Invalid block begin index (%d, %d)!\n",
1248  block_begin_row, block_begin_col)
1249  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1250  block_begin_col+block_size_col<=num_rhs,
1251  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1252  "Please use smaller blocks!", block_size_row, block_size_col,
1253  block_begin_row, block_begin_col)
1254  REQUIRE(block_size_row>=1 && block_size_col>=1,
1255  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1256 
1257  // check if removal of diagonal is required/valid
1258  if (no_diag && block_size_row!=block_size_col)
1259  {
1260  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1261  no_diag=false;
1262  }
1263 
1264  // initialize the vector that accumulates the row/col-wise sum on the go
1265  // the first block_size_row entries store the row-wise sum of kernel values
1266  // the nextt block_size_col entries store the col-wise sum of kernel values
1267  SGVector<float64_t> sum(block_size_row+block_size_col);
1268  sum.set_const(0.0);
1269 
1270  // this can be done in parallel for the rows/cols
1271 #pragma omp parallel for
1272  for (index_t i=0; i<block_size_row; ++i)
1273  {
1274  // compute the kernel values and compute sum on the fly
1275  for (index_t j=0; j<block_size_col; ++j)
1276  {
1277  float64_t k=no_diag && i==j ? 0 :
1278  kernel(i+block_begin_row, j+block_begin_col);
1279 #pragma omp critical
1280  {
1281  sum[i]+=k;
1282  sum[j+block_size_row]+=k;
1283  }
1284  }
1285  }
1286 
1287  SG_DEBUG("Leaving\n");
1288 
1289  return sum;
1290 }
1291 
1292 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
1293 {
1294  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
1295  int32_t i_start=params->start;
1296  int32_t i_end=params->end;
1297  CKernel* k=params->kernel;
1298  T* result=params->result;
1299  bool symmetric=params->symmetric;
1300  int32_t n=params->n;
1301  int32_t m=params->m;
1302  bool verbose=params->verbose;
1303  int64_t total_start=params->total_start;
1304  int64_t total_end=params->total_end;
1305  int64_t total=total_start;
1306 
1307  for (int32_t i=i_start; i<i_end; i++)
1308  {
1309  int32_t j_start=0;
1310 
1311  if (symmetric)
1312  j_start=i;
1313 
1314  for (int32_t j=j_start; j<n; j++)
1315  {
1316  float64_t v=k->kernel(i,j);
1317  result[i+j*m]=v;
1318 
1319  if (symmetric && i!=j)
1320  result[j+i*m]=v;
1321 
1322  if (verbose)
1323  {
1324  total++;
1325 
1326  if (symmetric && i!=j)
1327  total++;
1328 
1329  if (total%100 == 0)
1330  SG_OBJ_PROGRESS(k, total, total_start, total_end)
1331 
1333  break;
1334  }
1335  }
1336 
1337  }
1338 
1339  return NULL;
1340 }
1341 
1342 template <class T>
1344 {
1345  T* result = NULL;
1346 
1347  REQUIRE(has_features(), "no features assigned to kernel\n")
1348 
1349  int32_t m=get_num_vec_lhs();
1350  int32_t n=get_num_vec_rhs();
1351 
1352  int64_t total_num = int64_t(m)*n;
1353 
1354  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
1355  bool symmetric= (lhs && lhs==rhs && m==n);
1356 
1357  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
1358 
1359  result=SG_MALLOC(T, total_num);
1360 
1361  int32_t num_threads=parallel->get_num_threads();
1362  if (num_threads < 2)
1363  {
1364  K_THREAD_PARAM<T> params;
1365  params.kernel=this;
1366  params.result=result;
1367  params.start=0;
1368  params.end=m;
1369  params.total_start=0;
1370  params.total_end=total_num;
1371  params.n=n;
1372  params.m=m;
1373  params.symmetric=symmetric;
1374  params.verbose=true;
1375  get_kernel_matrix_helper<T>((void*) &params);
1376  }
1377  else
1378  {
1379  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
1380  K_THREAD_PARAM<T>* params = SG_MALLOC(K_THREAD_PARAM<T>, num_threads);
1381  int64_t step= total_num/num_threads;
1382 
1383  int32_t t;
1384 
1385  num_threads--;
1386  for (t=0; t<num_threads; t++)
1387  {
1388  params[t].kernel = this;
1389  params[t].result = result;
1390  params[t].start = compute_row_start(t*step, n, symmetric);
1391  params[t].end = compute_row_start((t+1)*step, n, symmetric);
1392  params[t].total_start=t*step;
1393  params[t].total_end=(t+1)*step;
1394  params[t].n=n;
1395  params[t].m=m;
1396  params[t].symmetric=symmetric;
1397  params[t].verbose=false;
1398 
1399  int code=pthread_create(&threads[t], NULL,
1400  CKernel::get_kernel_matrix_helper<T>, (void*)&params[t]);
1401 
1402  if (code != 0)
1403  {
1404  SG_WARNING("Thread creation failed (thread %d of %d) "
1405  "with error:'%s'\n",t, num_threads, strerror(code));
1406  num_threads=t;
1407  break;
1408  }
1409  }
1410 
1411  params[t].kernel = this;
1412  params[t].result = result;
1413  params[t].start = compute_row_start(t*step, n, symmetric);
1414  params[t].end = m;
1415  params[t].total_start=t*step;
1416  params[t].total_end=total_num;
1417  params[t].n=n;
1418  params[t].m=m;
1419  params[t].symmetric=symmetric;
1420  params[t].verbose=true;
1421  get_kernel_matrix_helper<T>(&params[t]);
1422 
1423  for (t=0; t<num_threads; t++)
1424  {
1425  if (pthread_join(threads[t], NULL) != 0)
1426  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
1427  }
1428 
1429  SG_FREE(params);
1430  SG_FREE(threads);
1431  }
1432 
1433  SG_DONE()
1434 
1435  return SGMatrix<T>(result,m,n,true);
1436 }
1437 
1438 
1439 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
1440 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
1441 
1442 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
1443 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
1444 
virtual void clear_normal()
Definition: Kernel.cpp:858
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:928
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
virtual bool support_compatible_class() const
Definition: Features.h:323
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
Definition: Kernel.h:919
#define SG_INFO(...)
Definition: SGIO.h:118
virtual void cleanup()
Definition: Kernel.cpp:173
#define SG_RESET_LOCALE
Definition: SGIO.h:86
#define SG_DONE()
Definition: SGIO.h:157
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:126
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:868
void cache_multiple_kernel_rows(int32_t *key, int32_t varnum)
Definition: Kernel.cpp:376
virtual bool get_feature_class_compatibility(EFeatureClass rhs) const
Definition: Features.cpp:355
int32_t get_num_threads() const
Definition: Parallel.cpp:64
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:1069
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:1292
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:150
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1079
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:1068
#define SG_ERROR(...)
Definition: SGIO.h:129
void cache_reset()
Definition: Kernel.h:602
#define REQUIRE(x,...)
Definition: SGIO.h:206
virtual bool delete_optimization()
Definition: Kernel.cpp:834
int64_t KERNELCACHE_IDX
Definition: Kernel.h:46
int32_t kernel_cache_space_available()
Definition: Kernel.h:698
index_t num_cols
Definition: SGMatrix.h:378
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:206
#define ENUM_CASE(n)
Definition: Kernel.cpp:706
uint64_t properties
Definition: Kernel.h:1082
Parallel * parallel
Definition: SGObject.h:499
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:693
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:516
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:219
#define SG_REF(x)
Definition: SGObject.h:51
#define SG_SET_LOCALE_C
Definition: SGIO.h:85
int32_t cache_size
cache_size in MB
Definition: Kernel.h:1047
index_t num_rows
Definition: SGMatrix.h:376
void kernel_cache_shrink(int32_t totdoc, int32_t num_shrink, int32_t *after)
Definition: Kernel.cpp:495
bool get_is_initialized()
Definition: Kernel.h:753
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1179
float64_t combined_kernel_weight
Definition: Kernel.h:1072
virtual void register_params()
Definition: Kernel.cpp:951
void save(CFile *writer)
Definition: Kernel.cpp:652
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:660
index_t vlen
Definition: SGVector.h:494
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:162
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1238
void cache_kernel_row(int32_t x)
Definition: Kernel.cpp:302
#define SG_OBJ_PROGRESS(o,...)
Definition: SGIO.h:147
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1026
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:880
double float64_t
Definition: common.h:50
KERNEL_CACHE kernel_cache
kernel cache
Definition: Kernel.h:1051
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
Definition: Kernel.h:1056
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:943
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:840
EOptimizationType get_optimization_type()
Definition: Kernel.h:741
virtual void save_serializable_post()
Definition: SGObject.cpp:1073
void list_kernel()
Definition: Kernel.cpp:708
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
Definition: Kernel.h:802
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1125
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:1067
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:525
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:887
virtual bool init_normalizer()
Definition: Kernel.cpp:168
bool optimization_initialized
Definition: Kernel.h:1075
EOptimizationType opt_type
Definition: Kernel.h:1079
void load(CFile *loader)
Definition: Kernel.cpp:646
virtual void load_serializable_post()
Definition: SGObject.cpp:1063
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:1061
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:896
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:846
bool lhs_equals_rhs
lhs
Definition: Kernel.h:1064
int machine_int_t
Definition: common.h:59
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:827
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:1059
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual void save_serializable_pre()
Definition: Kernel.cpp:935
void kernel_cache_cleanup()
Definition: Kernel.cpp:567
virtual void remove_lhs()
Definition: Kernel.cpp:679
int32_t kernel_cache_check(int32_t cacheidx)
Definition: Kernel.h:689
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:863
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:910
A generic Support Vector Machine Interface.
Definition: SVM.h:49
void kernel_cache_reset_lru()
Definition: Kernel.cpp:554
The Kernel base class.
Definition: Kernel.h:158
CKernelNormalizer * normalizer
Definition: Kernel.h:1086
virtual SGVector< float64_t > get_kernel_row(int32_t i)
Definition: Kernel.h:279
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
virtual bool has_features()
Definition: Kernel.h:534
void kernel_cache_init(int32_t size, bool regression_hack=false)
Definition: Kernel.cpp:181
virtual ~CKernel()
Definition: Kernel.cpp:73
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:853
void set_const(T const_elem)
Definition: SGMatrix.cpp:133
float64_t KERNELCACHE_ELEM
Definition: Kernel.h:35
void set_const(T const_elem)
Definition: SGVector.cpp:152
void resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack=false)
Definition: Kernel.cpp:85
virtual EFeatureType get_feature_type() const =0
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation