SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * EXCEPT FOR THE KERNEL CACHING FUNCTIONS WHICH ARE (W) THORSTEN JOACHIMS
3  * COPYRIGHT (C) 1999 UNIVERSITAET DORTMUND - ALL RIGHTS RESERVED
4  *
5  * this program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * Written (W) 1999-2009 Soeren Sonnenburg
11  * Written (W) 1999-2008 Gunnar Raetsch
12  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
13  */
14 
15 #include <shogun/lib/config.h>
16 #include <shogun/lib/common.h>
17 #include <shogun/io/SGIO.h>
18 #include <shogun/io/File.h>
19 #include <shogun/lib/Time.h>
20 #include <shogun/lib/Signal.h>
21 
22 #include <shogun/base/Parallel.h>
23 
24 #include <shogun/kernel/Kernel.h>
27 #include <shogun/base/Parameter.h>
28 
30 
31 #include <string.h>
32 #include <unistd.h>
34 
35 #ifdef HAVE_PTHREAD
36 #include <pthread.h>
37 #endif
38 
39 using namespace shogun;
40 
42 {
43  init();
45 }
46 
47 CKernel::CKernel(int32_t size) : CSGObject()
48 {
49  init();
50 
51  if (size<10)
52  size=10;
53 
54  cache_size=size;
56 }
57 
58 
59 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
60 {
61  init();
62 
63  if (size<10)
64  size=10;
65 
66  cache_size=size;
67 
69  init(p_lhs, p_rhs);
71 }
72 
74 {
75  if (get_is_initialized())
76  SG_ERROR("Kernel still initialized on destruction.\n")
77 
80 
81  SG_INFO("Kernel deleted (%p).\n", this)
82 }
83 
84 #ifdef USE_SVMLIGHT
85 void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)
86 {
87  if (size<10)
88  size=10;
89 
91  cache_size=size;
92 
93  if (has_features() && get_num_vec_lhs())
94  kernel_cache_init(cache_size, regression_hack);
95 }
96 #endif //USE_SVMLIGHT
97 
98 bool CKernel::init(CFeatures* l, CFeatures* r)
99 {
100  /* make sure that features are not deleted if same ones are used */
101  SG_REF(l);
102  SG_REF(r);
103 
104  //make sure features were indeed supplied
105  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
106  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
107 
108  //make sure features are compatible
111 
112  //remove references to previous features
114 
115  //increase reference counts
116  SG_REF(l);
117  if (l==r)
118  lhs_equals_rhs=true;
119  else // l!=r
120  SG_REF(r);
121 
122  lhs=l;
123  rhs=r;
124 
127 
130 
131  /* unref "safety" refs from beginning */
132  SG_UNREF(r);
133  SG_UNREF(l);
134 
135  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
136  return true;
137 }
138 
140 {
141  SG_REF(n);
142  if (lhs && rhs)
143  n->init(this);
144 
146  normalizer=n;
147 
148  return (normalizer!=NULL);
149 }
150 
152 {
154  return normalizer;
155 }
156 
158 {
159  return normalizer->init(this);
160 }
161 
163 {
165 }
166 
167 #ifdef USE_SVMLIGHT
168 /****************************** Cache handling *******************************/
169 
170 void CKernel::kernel_cache_init(int32_t buffsize, bool regression_hack)
171 {
172  int32_t totdoc=get_num_vec_lhs();
173  if (totdoc<=0)
174  {
175  SG_ERROR("kernel has zero rows: num_lhs=%d num_rhs=%d\n",
177  }
178  uint64_t buffer_size=0;
179  int32_t i;
180 
181  //in regression the additional constraints are made by doubling the training data
182  if (regression_hack)
183  totdoc*=2;
184 
185  buffer_size=((uint64_t) buffsize)*1024*1024/sizeof(KERNELCACHE_ELEM);
186  if (buffer_size>((uint64_t) totdoc)*totdoc)
187  buffer_size=((uint64_t) totdoc)*totdoc;
188 
189  SG_INFO("using a kernel cache of size %lld MB (%lld bytes) for %s Kernel\n", buffer_size*sizeof(KERNELCACHE_ELEM)/1024/1024, buffer_size*sizeof(KERNELCACHE_ELEM), get_name())
190 
191  //make sure it fits in the *signed* KERNELCACHE_IDX type
192  ASSERT(buffer_size < (((uint64_t) 1) << (sizeof(KERNELCACHE_IDX)*8-1)))
193 
194  kernel_cache.index = SG_MALLOC(int32_t, totdoc);
195  kernel_cache.occu = SG_MALLOC(int32_t, totdoc);
196  kernel_cache.lru = SG_MALLOC(int32_t, totdoc);
197  kernel_cache.invindex = SG_MALLOC(int32_t, totdoc);
198  kernel_cache.active2totdoc = SG_MALLOC(int32_t, totdoc);
199  kernel_cache.totdoc2active = SG_MALLOC(int32_t, totdoc);
200  kernel_cache.buffer = SG_MALLOC(KERNELCACHE_ELEM, buffer_size);
201  kernel_cache.buffsize=buffer_size;
202  kernel_cache.max_elems=(int32_t) (kernel_cache.buffsize/totdoc);
203 
204  if(kernel_cache.max_elems>totdoc) {
205  kernel_cache.max_elems=totdoc;
206  }
207 
208  kernel_cache.elems=0; // initialize cache
209  for(i=0;i<totdoc;i++) {
210  kernel_cache.index[i]=-1;
211  kernel_cache.lru[i]=0;
212  }
213  for(i=0;i<totdoc;i++) {
214  kernel_cache.occu[i]=0;
215  kernel_cache.invindex[i]=-1;
216  }
217 
218  kernel_cache.activenum=totdoc;;
219  for(i=0;i<totdoc;i++) {
220  kernel_cache.active2totdoc[i]=i;
221  kernel_cache.totdoc2active[i]=i;
222  }
223 
224  kernel_cache.time=0;
225 }
226 
228  int32_t docnum, int32_t *active2dnum, float64_t *buffer, bool full_line)
229 {
230  int32_t i,j;
231  KERNELCACHE_IDX start;
232 
233  int32_t num_vectors = get_num_vec_lhs();
234  if (docnum>=num_vectors)
235  docnum=2*num_vectors-1-docnum;
236 
237  /* is cached? */
238  if(kernel_cache.index[docnum] != -1)
239  {
240  kernel_cache.lru[kernel_cache.index[docnum]]=kernel_cache.time; /* lru */
241  start=((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[docnum];
242 
243  if (full_line)
244  {
245  for(j=0;j<get_num_vec_lhs();j++)
246  {
247  if(kernel_cache.totdoc2active[j] >= 0)
248  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
249  else
250  buffer[j]=(float64_t) kernel(docnum, j);
251  }
252  }
253  else
254  {
255  for(i=0;(j=active2dnum[i])>=0;i++)
256  {
257  if(kernel_cache.totdoc2active[j] >= 0)
258  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
259  else
260  {
261  int32_t k=j;
262  if (k>=num_vectors)
263  k=2*num_vectors-1-k;
264  buffer[j]=(float64_t) kernel(docnum, k);
265  }
266  }
267  }
268  }
269  else
270  {
271  if (full_line)
272  {
273  for(j=0;j<get_num_vec_lhs();j++)
274  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, j);
275  }
276  else
277  {
278  for(i=0;(j=active2dnum[i])>=0;i++)
279  {
280  int32_t k=j;
281  if (k>=num_vectors)
282  k=2*num_vectors-1-k;
283  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, k);
284  }
285  }
286  }
287 }
288 
289 
290 // Fills cache for the row m
292 {
293  register int32_t j,k,l;
294  register KERNELCACHE_ELEM *cache;
295 
296  int32_t num_vectors = get_num_vec_lhs();
297 
298  if (m>=num_vectors)
299  m=2*num_vectors-1-m;
300 
301  if(!kernel_cache_check(m)) // not cached yet
302  {
303  cache = kernel_cache_clean_and_malloc(m);
304  if(cache) {
305  l=kernel_cache.totdoc2active[m];
306 
307  for(j=0;j<kernel_cache.activenum;j++) // fill cache
308  {
309  k=kernel_cache.active2totdoc[j];
310 
311  if((kernel_cache.index[k] != -1) && (l != -1) && (k != m)) {
312  cache[j]=kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)
313  *kernel_cache.index[k]+l];
314  }
315  else
316  {
317  if (k>=num_vectors)
318  k=2*num_vectors-1-k;
319 
320  cache[j]=kernel(m, k);
321  }
322  }
323  }
324  else
325  perror("Error: Kernel cache full! => increase cache size");
326  }
327 }
328 
329 
330 void* CKernel::cache_multiple_kernel_row_helper(void* p)
331 {
332  int32_t j,k,l;
333  S_KTHREAD_PARAM* params = (S_KTHREAD_PARAM*) p;
334 
335  for (int32_t i=params->start; i<params->end; i++)
336  {
337  KERNELCACHE_ELEM* cache=params->cache[i];
338  int32_t m = params->uncached_rows[i];
339  l=params->kernel_cache->totdoc2active[m];
340 
341  for(j=0;j<params->kernel_cache->activenum;j++) // fill cache
342  {
343  k=params->kernel_cache->active2totdoc[j];
344 
345  if((params->kernel_cache->index[k] != -1) && (l != -1) && (!params->needs_computation[k])) {
346  cache[j]=params->kernel_cache->buffer[((KERNELCACHE_IDX) params->kernel_cache->activenum)
347  *params->kernel_cache->index[k]+l];
348  }
349  else
350  {
351  if (k>=params->num_vectors)
352  k=2*params->num_vectors-1-k;
353 
354  cache[j]=params->kernel->kernel(m, k);
355  }
356  }
357 
358  //now line m is cached
359  params->needs_computation[m]=0;
360  }
361  return NULL;
362 }
363 
364 // Fills cache for the rows in key
365 void CKernel::cache_multiple_kernel_rows(int32_t* rows, int32_t num_rows)
366 {
367 #ifdef HAVE_PTHREAD
368  int32_t nthreads=parallel->get_num_threads();
369 
370  if (nthreads<2)
371  {
372 #endif
373  for(int32_t i=0;i<num_rows;i++)
374  cache_kernel_row(rows[i]);
375 #ifdef HAVE_PTHREAD
376  }
377  else
378  {
379  // fill up kernel cache
380  int32_t* uncached_rows = SG_MALLOC(int32_t, num_rows);
381  KERNELCACHE_ELEM** cache = SG_MALLOC(KERNELCACHE_ELEM*, num_rows);
382  pthread_t* threads = SG_MALLOC(pthread_t, nthreads-1);
383  S_KTHREAD_PARAM* params = SG_MALLOC(S_KTHREAD_PARAM, nthreads-1);
384  int32_t num_threads=nthreads-1;
385  int32_t num_vec=get_num_vec_lhs();
386  ASSERT(num_vec>0)
387  uint8_t* needs_computation=SG_CALLOC(uint8_t, num_vec);
388 
389  int32_t step=0;
390  int32_t num=0;
391  int32_t end=0;
392 
393  // allocate cachelines if necessary
394  for (int32_t i=0; i<num_rows; i++)
395  {
396  int32_t idx=rows[i];
397  if (idx>=num_vec)
398  idx=2*num_vec-1-idx;
399 
400  if (kernel_cache_check(idx))
401  continue;
402 
403  needs_computation[idx]=1;
404  uncached_rows[num]=idx;
405  cache[num]= kernel_cache_clean_and_malloc(idx);
406 
407  if (!cache[num])
408  SG_ERROR("Kernel cache full! => increase cache size\n")
409 
410  num++;
411  }
412 
413  if (num>0)
414  {
415  step= num/nthreads;
416 
417  if (step<1)
418  {
419  num_threads=num-1;
420  step=1;
421  }
422 
423  for (int32_t t=0; t<num_threads; t++)
424  {
425  params[t].kernel = this;
426  params[t].kernel_cache = &kernel_cache;
427  params[t].cache = cache;
428  params[t].uncached_rows = uncached_rows;
429  params[t].needs_computation = needs_computation;
430  params[t].num_uncached = num;
431  params[t].start = t*step;
432  params[t].end = (t+1)*step;
433  params[t].num_vectors = get_num_vec_lhs();
434  end=params[t].end;
435 
436  int code=pthread_create(&threads[t], NULL,
437  CKernel::cache_multiple_kernel_row_helper, (void*)&params[t]);
438 
439  if (code != 0)
440  {
441  SG_WARNING("Thread creation failed (thread %d of %d) "
442  "with error:'%s'\n",t, num_threads, strerror(code));
443  num_threads=t;
444  end=t*step;
445  break;
446  }
447  }
448  }
449  else
450  num_threads=-1;
451 
452 
453  S_KTHREAD_PARAM last_param;
454  last_param.kernel = this;
455  last_param.kernel_cache = &kernel_cache;
456  last_param.cache = cache;
457  last_param.uncached_rows = uncached_rows;
458  last_param.needs_computation = needs_computation;
459  last_param.start = end;
460  last_param.num_uncached = num;
461  last_param.end = num;
462  last_param.num_vectors = get_num_vec_lhs();
463 
464  cache_multiple_kernel_row_helper(&last_param);
465 
466 
467  for (int32_t t=0; t<num_threads; t++)
468  {
469  if (pthread_join(threads[t], NULL) != 0)
470  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
471  }
472 
473  SG_FREE(needs_computation);
474  SG_FREE(params);
475  SG_FREE(threads);
476  SG_FREE(cache);
477  SG_FREE(uncached_rows);
478  }
479 #endif
480 }
481 
482 // remove numshrink columns in the cache
483 // which correspond to examples marked
485  int32_t totdoc, int32_t numshrink, int32_t *after)
486 {
487  ASSERT(totdoc > 0);
488  register int32_t i,j,jj,scount; // 0 in after.
489  KERNELCACHE_IDX from=0,to=0;
490  int32_t *keep;
491 
492  keep=SG_MALLOC(int32_t, totdoc);
493  for(j=0;j<totdoc;j++) {
494  keep[j]=1;
495  }
496  scount=0;
497  for(jj=0;(jj<kernel_cache.activenum) && (scount<numshrink);jj++) {
498  j=kernel_cache.active2totdoc[jj];
499  if(!after[j]) {
500  scount++;
501  keep[j]=0;
502  }
503  }
504 
505  for(i=0;i<kernel_cache.max_elems;i++) {
506  for(jj=0;jj<kernel_cache.activenum;jj++) {
507  j=kernel_cache.active2totdoc[jj];
508  if(!keep[j]) {
509  from++;
510  }
511  else {
512  kernel_cache.buffer[to]=kernel_cache.buffer[from];
513  to++;
514  from++;
515  }
516  }
517  }
518 
519  kernel_cache.activenum=0;
520  for(j=0;j<totdoc;j++) {
521  if((keep[j]) && (kernel_cache.totdoc2active[j] != -1)) {
522  kernel_cache.active2totdoc[kernel_cache.activenum]=j;
523  kernel_cache.totdoc2active[j]=kernel_cache.activenum;
524  kernel_cache.activenum++;
525  }
526  else {
527  kernel_cache.totdoc2active[j]=-1;
528  }
529  }
530 
531  kernel_cache.max_elems= (int32_t) kernel_cache.buffsize;
532 
533  if (kernel_cache.activenum>0)
534  kernel_cache.buffsize/=kernel_cache.activenum;
535 
536  if(kernel_cache.max_elems>totdoc)
537  kernel_cache.max_elems=totdoc;
538 
539  SG_FREE(keep);
540 
541 }
542 
544 {
545  int32_t maxlru=0,k;
546 
547  for(k=0;k<kernel_cache.max_elems;k++) {
548  if(maxlru < kernel_cache.lru[k])
549  maxlru=kernel_cache.lru[k];
550  }
551  for(k=0;k<kernel_cache.max_elems;k++) {
552  kernel_cache.lru[k]-=maxlru;
553  }
554 }
555 
557 {
558  SG_FREE(kernel_cache.index);
559  SG_FREE(kernel_cache.occu);
560  SG_FREE(kernel_cache.lru);
561  SG_FREE(kernel_cache.invindex);
562  SG_FREE(kernel_cache.active2totdoc);
563  SG_FREE(kernel_cache.totdoc2active);
564  SG_FREE(kernel_cache.buffer);
565  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
566 }
567 
568 int32_t CKernel::kernel_cache_malloc()
569 {
570  int32_t i;
571 
573  for(i=0;i<kernel_cache.max_elems;i++) {
574  if(!kernel_cache.occu[i]) {
575  kernel_cache.occu[i]=1;
576  kernel_cache.elems++;
577  return(i);
578  }
579  }
580  }
581  return(-1);
582 }
583 
584 void CKernel::kernel_cache_free(int32_t cacheidx)
585 {
586  kernel_cache.occu[cacheidx]=0;
587  kernel_cache.elems--;
588 }
589 
590 // remove least recently used cache
591 // element
592 int32_t CKernel::kernel_cache_free_lru()
593 {
594  register int32_t k,least_elem=-1,least_time;
595 
596  least_time=kernel_cache.time+1;
597  for(k=0;k<kernel_cache.max_elems;k++) {
598  if(kernel_cache.invindex[k] != -1) {
599  if(kernel_cache.lru[k]<least_time) {
600  least_time=kernel_cache.lru[k];
601  least_elem=k;
602  }
603  }
604  }
605 
606  if(least_elem != -1) {
607  kernel_cache_free(least_elem);
608  kernel_cache.index[kernel_cache.invindex[least_elem]]=-1;
609  kernel_cache.invindex[least_elem]=-1;
610  return(1);
611  }
612  return(0);
613 }
614 
615 // Get a free cache entry. In case cache is full, the lru
616 // element is removed.
617 KERNELCACHE_ELEM* CKernel::kernel_cache_clean_and_malloc(int32_t cacheidx)
618 {
619  int32_t result;
620  if((result = kernel_cache_malloc()) == -1) {
621  if(kernel_cache_free_lru()) {
622  result = kernel_cache_malloc();
623  }
624  }
625  kernel_cache.index[cacheidx]=result;
626  if(result == -1) {
627  return(0);
628  }
629  kernel_cache.invindex[result]=cacheidx;
630  kernel_cache.lru[kernel_cache.index[cacheidx]]=kernel_cache.time; // lru
631  return &kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[cacheidx]];
632 }
633 #endif //USE_SVMLIGHT
634 
635 void CKernel::load(CFile* loader)
636 {
639 }
640 
641 void CKernel::save(CFile* writer)
642 {
643  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
645  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
647 }
648 
650 {
651  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
652  if (rhs!=lhs)
653  SG_UNREF(rhs);
654  rhs = NULL;
655  num_rhs=0;
656 
657  SG_UNREF(lhs);
658  lhs = NULL;
659  num_lhs=0;
660  lhs_equals_rhs=false;
661 
662 #ifdef USE_SVMLIGHT
663  cache_reset();
664 #endif //USE_SVMLIGHT
665  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
666 }
667 
669 {
670  if (rhs==lhs)
671  rhs=NULL;
672  SG_UNREF(lhs);
673  lhs = NULL;
674  num_lhs=0;
675  lhs_equals_rhs=false;
676 #ifdef USE_SVMLIGHT
677  cache_reset();
678 #endif //USE_SVMLIGHT
679 }
680 
683 {
684  if (rhs!=lhs)
685  SG_UNREF(rhs);
686  rhs = NULL;
687  num_rhs=0;
688  lhs_equals_rhs=false;
689 
690 #ifdef USE_SVMLIGHT
691  cache_reset();
692 #endif //USE_SVMLIGHT
693 }
694 
695 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
696 
698 {
699  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
701  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
702  "SLOWBUTMEMEFFICIENT");
703 
704  switch (get_kernel_type())
705  {
767  }
768 
769  switch (get_feature_class())
770  {
781  ENUM_CASE(C_WD)
793  }
794 
795  switch (get_feature_type())
796  {
811  }
812  SG_INFO("\n")
813 }
814 #undef ENUM_CASE
815 
817  int32_t count, int32_t *IDX, float64_t * weights)
818 {
819  SG_ERROR("kernel does not support linadd optimization\n")
820  return false ;
821 }
822 
824 {
825  SG_ERROR("kernel does not support linadd optimization\n")
826  return false;
827 }
828 
830 {
831  SG_ERROR("kernel does not support linadd optimization\n")
832  return 0;
833 }
834 
836  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
837  int32_t* IDX, float64_t* weights, float64_t factor)
838 {
839  SG_ERROR("kernel does not support batch computation\n")
840 }
841 
842 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
843 {
844  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
845 }
846 
848 {
849  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
850 }
851 
853 {
854  return 1;
855 }
856 
858  int32_t vector_idx, float64_t * subkernel_contrib)
859 {
860  SG_ERROR("kernel compute_by_subkernel not implemented\n")
861 }
862 
863 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
864 {
865  num_weights=1 ;
866  return &combined_kernel_weight ;
867 }
868 
870 {
871  int num_weights = 1;
872  const float64_t* weight = get_subkernel_weights(num_weights);
873  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
874 }
875 
877 {
878  ASSERT(weights.vector)
879  if (weights.vlen!=1)
880  SG_ERROR("number of subkernel weights should be one ...\n")
881 
882  combined_kernel_weight = weights.vector[0] ;
883 }
884 
886 {
887  if (kernel)
888  {
889  CKernel* casted=dynamic_cast<CKernel*>(kernel);
890  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
891  " of class \"%s\" is not a subclass of CKernel!\n",
892  kernel->get_name());
893  return casted;
894  }
895  else
896  return NULL;
897 }
898 
900 {
901  int32_t num_suppvec=svm->get_num_support_vectors();
902  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
903  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
904 
905  for (int32_t i=0; i<num_suppvec; i++)
906  {
907  sv_idx[i] = svm->get_support_vector(i);
908  sv_weight[i] = svm->get_alpha(i);
909  }
910  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
911 
912  SG_FREE(sv_idx);
913  SG_FREE(sv_weight);
914  return ret;
915 }
916 
918 {
920  if (lhs_equals_rhs)
921  rhs=lhs;
922 }
923 
925 {
927 
928  if (lhs_equals_rhs)
929  rhs=NULL;
930 }
931 
933 {
935 
936  if (lhs_equals_rhs)
937  rhs=lhs;
938 }
939 
941  SG_ADD(&cache_size, "cache_size",
942  "Cache size in MB.", MS_NOT_AVAILABLE);
943  SG_ADD((CSGObject**) &lhs, "lhs",
944  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
945  SG_ADD((CSGObject**) &rhs, "rhs",
946  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
947  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
948  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
949  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
951  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
953  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
954  "Combined kernel weight.", MS_AVAILABLE);
955  SG_ADD(&optimization_initialized, "optimization_initialized",
956  "Optimization is initialized.", MS_NOT_AVAILABLE);
957  SG_ADD((machine_int_t*) &opt_type, "opt_type",
958  "Optimization type.", MS_NOT_AVAILABLE);
959  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
960  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
961  MS_AVAILABLE);
962 }
963 
964 
965 void CKernel::init()
966 {
967  cache_size=10;
968  kernel_matrix=NULL;
969  lhs=NULL;
970  rhs=NULL;
971  num_lhs=0;
972  num_rhs=0;
973  lhs_equals_rhs=false;
978  normalizer=NULL;
979 
980 #ifdef USE_SVMLIGHT
981  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
982 #endif //USE_SVMLIGHT
983 
985 }
986 
987 namespace shogun
988 {
990 template <class T> struct K_THREAD_PARAM
991 {
995  int32_t start;
997  int32_t end;
999  int64_t total_start;
1001  int64_t total_end;
1003  int32_t m;
1005  int32_t n;
1011  bool verbose;
1012 };
1013 }
1014 
1016  bool no_diag)
1017 {
1018  SG_DEBUG("Entering\n");
1019 
1020  REQUIRE(has_features(), "No features assigned to kernel\n")
1021  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1022  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1023  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1024  REQUIRE(block_begin+block_size<=num_rhs,
1025  "Invalid block size (%d) at starting index (%d, %d)! "
1026  "Please use smaller blocks!", block_size, block_begin, block_begin)
1027  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1028 
1029  float64_t sum=0.0;
1030 
1031  // since the block is symmetric with main diagonal inside, we can save half
1032  // the computation with using only the upper triangular part.
1033  // this can be done in parallel
1034 #pragma omp parallel for
1035  for (index_t i=0; i<block_size; ++i)
1036  {
1037  // compute the kernel values on the upper triangular part of the kernel
1038  // matrix and compute sum on the fly
1039  for (index_t j=i+1; j<block_size; ++j)
1040  {
1041  float64_t k=kernel(i+block_begin, j+block_begin);
1042 #pragma omp atomic
1043  sum+=k;
1044  }
1045  }
1046 
1047  // the actual sum would be twice of what we computed
1048  sum*=2;
1049 
1050  // add the diagonal elements if required - keeping this check
1051  // outside of the loop to save cycles
1052  if (!no_diag)
1053  {
1054 #pragma omp parallel for
1055  for (index_t i=0; i<block_size; ++i)
1056  {
1057  float64_t diag=kernel(i+block_begin, i+block_begin);
1058 #pragma omp atomic
1059  sum+=diag;
1060  }
1061  }
1062 
1063  SG_DEBUG("Leaving\n");
1064 
1065  return sum;
1066 }
1067 
1068 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
1069  index_t block_size_row, index_t block_size_col, bool no_diag)
1070 {
1071  SG_DEBUG("Entering\n");
1072 
1073  REQUIRE(has_features(), "No features assigned to kernel\n")
1074  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1075  block_begin_col>=0 && block_begin_col<num_rhs,
1076  "Invalid block begin index (%d, %d)!\n",
1077  block_begin_row, block_begin_col)
1078  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1079  block_begin_col+block_size_col<=num_rhs,
1080  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1081  "Please use smaller blocks!", block_size_row, block_size_col,
1082  block_begin_row, block_begin_col)
1083  REQUIRE(block_size_row>=1 && block_size_col>=1,
1084  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1085 
1086  // check if removal of diagonal is required/valid
1087  if (no_diag && block_size_row!=block_size_col)
1088  {
1089  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1090  no_diag=false;
1091  }
1092 
1093  float64_t sum=0.0;
1094 
1095  // this can be done in parallel for the rows/cols
1096 #pragma omp parallel for
1097  for (index_t i=0; i<block_size_row; ++i)
1098  {
1099  // compute the kernel values and compute sum on the fly
1100  for (index_t j=0; j<block_size_col; ++j)
1101  {
1102  float64_t k=no_diag && i==j ? 0 :
1103  kernel(i+block_begin_row, j+block_begin_col);
1104 #pragma omp atomic
1105  sum+=k;
1106  }
1107  }
1108 
1109  SG_DEBUG("Leaving\n");
1110 
1111  return sum;
1112 }
1113 
1115  index_t block_size, bool no_diag)
1116 {
1117  SG_DEBUG("Entering\n");
1118 
1119  REQUIRE(has_features(), "No features assigned to kernel\n")
1120  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1121  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1122  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1123  REQUIRE(block_begin+block_size<=num_rhs,
1124  "Invalid block size (%d) at starting index (%d, %d)! "
1125  "Please use smaller blocks!", block_size, block_begin, block_begin)
1126  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1127 
1128  // initialize the vector that accumulates the row/col-wise sum on the go
1129  SGVector<float64_t> row_sum(block_size);
1130  row_sum.set_const(0.0);
1131 
1132  // since the block is symmetric with main diagonal inside, we can save half
1133  // the computation with using only the upper triangular part.
1134  // this can be done in parallel for the rows/cols
1135 #pragma omp parallel for
1136  for (index_t i=0; i<block_size; ++i)
1137  {
1138  // compute the kernel values on the upper triangular part of the kernel
1139  // matrix and compute row-wise sum on the fly
1140  for (index_t j=i+1; j<block_size; ++j)
1141  {
1142  float64_t k=kernel(i+block_begin, j+block_begin);
1143 #pragma omp critical
1144  {
1145  row_sum[i]+=k;
1146  row_sum[j]+=k;
1147  }
1148  }
1149  }
1150 
1151  // add the diagonal elements if required - keeping this check
1152  // outside of the loop to save cycles
1153  if (!no_diag)
1154  {
1155 #pragma omp parallel for
1156  for (index_t i=0; i<block_size; ++i)
1157  {
1158  float64_t diag=kernel(i+block_begin, i+block_begin);
1159  row_sum[i]+=diag;
1160  }
1161  }
1162 
1163  SG_DEBUG("Leaving\n");
1164 
1165  return row_sum;
1166 }
1167 
1169  block_begin, index_t block_size, bool no_diag)
1170 {
1171  SG_DEBUG("Entering\n");
1172 
1173  REQUIRE(has_features(), "No features assigned to kernel\n")
1174  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1175  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1176  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1177  REQUIRE(block_begin+block_size<=num_rhs,
1178  "Invalid block size (%d) at starting index (%d, %d)! "
1179  "Please use smaller blocks!", block_size, block_begin, block_begin)
1180  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1181 
1182  // initialize the matrix that accumulates the row/col-wise sum on the go
1183  // the first column stores the sum of kernel values
1184  // the second column stores the sum of squared kernel values
1185  SGMatrix<float64_t> row_sum(block_size, 2);
1186  row_sum.set_const(0.0);
1187 
1188  // since the block is symmetric with main diagonal inside, we can save half
1189  // the computation with using only the upper triangular part
1190  // this can be done in parallel for the rows/cols
1191 #pragma omp parallel for
1192  for (index_t i=0; i<block_size; ++i)
1193  {
1194  // compute the kernel values on the upper triangular part of the kernel
1195  // matrix and compute row-wise sum and squared sum on the fly
1196  for (index_t j=i+1; j<block_size; ++j)
1197  {
1198  float64_t k=kernel(i+block_begin, j+block_begin);
1199 #pragma omp critical
1200  {
1201  row_sum(i, 0)+=k;
1202  row_sum(j, 0)+=k;
1203  row_sum(i, 1)+=k*k;
1204  row_sum(j, 1)+=k*k;
1205  }
1206  }
1207  }
1208 
1209  // add the diagonal elements if required - keeping this check
1210  // outside of the loop to save cycles
1211  if (!no_diag)
1212  {
1213 #pragma omp parallel for
1214  for (index_t i=0; i<block_size; ++i)
1215  {
1216  float64_t diag=kernel(i+block_begin, i+block_begin);
1217  row_sum(i, 0)+=diag;
1218  row_sum(i, 1)+=diag*diag;
1219  }
1220  }
1221 
1222  SG_DEBUG("Leaving\n");
1223 
1224  return row_sum;
1225 }
1226 
1228  index_t block_begin_col, index_t block_size_row,
1229  index_t block_size_col, bool no_diag)
1230 {
1231  SG_DEBUG("Entering\n");
1232 
1233  REQUIRE(has_features(), "No features assigned to kernel\n")
1234  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1235  block_begin_col>=0 && block_begin_col<num_rhs,
1236  "Invalid block begin index (%d, %d)!\n",
1237  block_begin_row, block_begin_col)
1238  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1239  block_begin_col+block_size_col<=num_rhs,
1240  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1241  "Please use smaller blocks!", block_size_row, block_size_col,
1242  block_begin_row, block_begin_col)
1243  REQUIRE(block_size_row>=1 && block_size_col>=1,
1244  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1245 
1246  // check if removal of diagonal is required/valid
1247  if (no_diag && block_size_row!=block_size_col)
1248  {
1249  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1250  no_diag=false;
1251  }
1252 
1253  // initialize the vector that accumulates the row/col-wise sum on the go
1254  // the first block_size_row entries store the row-wise sum of kernel values
1255  // the nextt block_size_col entries store the col-wise sum of kernel values
1256  SGVector<float64_t> sum(block_size_row+block_size_col);
1257  sum.set_const(0.0);
1258 
1259  // this can be done in parallel for the rows/cols
1260 #pragma omp parallel for
1261  for (index_t i=0; i<block_size_row; ++i)
1262  {
1263  // compute the kernel values and compute sum on the fly
1264  for (index_t j=0; j<block_size_col; ++j)
1265  {
1266  float64_t k=no_diag && i==j ? 0 :
1267  kernel(i+block_begin_row, j+block_begin_col);
1268 #pragma omp critical
1269  {
1270  sum[i]+=k;
1271  sum[j+block_size_row]+=k;
1272  }
1273  }
1274  }
1275 
1276  SG_DEBUG("Leaving\n");
1277 
1278  return sum;
1279 }
1280 
1281 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
1282 {
1283  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
1284  int32_t i_start=params->start;
1285  int32_t i_end=params->end;
1286  CKernel* k=params->kernel;
1287  T* result=params->result;
1288  bool symmetric=params->symmetric;
1289  int32_t n=params->n;
1290  int32_t m=params->m;
1291  bool verbose=params->verbose;
1292  int64_t total_start=params->total_start;
1293  int64_t total_end=params->total_end;
1294  int64_t total=total_start;
1295 
1296  for (int32_t i=i_start; i<i_end; i++)
1297  {
1298  int32_t j_start=0;
1299 
1300  if (symmetric)
1301  j_start=i;
1302 
1303  for (int32_t j=j_start; j<n; j++)
1304  {
1305  float64_t v=k->kernel(i,j);
1306  result[i+j*m]=v;
1307 
1308  if (symmetric && i!=j)
1309  result[j+i*m]=v;
1310 
1311  if (verbose)
1312  {
1313  total++;
1314 
1315  if (symmetric && i!=j)
1316  total++;
1317 
1318  if (total%100 == 0)
1319  SG_OBJ_PROGRESS(k, total, total_start, total_end)
1320 
1322  break;
1323  }
1324  }
1325 
1326  }
1327 
1328  return NULL;
1329 }
1330 
1331 template <class T>
1333 {
1334  T* result = NULL;
1335 
1336  REQUIRE(has_features(), "no features assigned to kernel\n")
1337 
1338  int32_t m=get_num_vec_lhs();
1339  int32_t n=get_num_vec_rhs();
1340 
1341  int64_t total_num = int64_t(m)*n;
1342 
1343  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
1344  bool symmetric= (lhs && lhs==rhs && m==n);
1345 
1346  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
1347 
1348  result=SG_MALLOC(T, total_num);
1349 
1350  int32_t num_threads=parallel->get_num_threads();
1351  if (num_threads < 2)
1352  {
1353  K_THREAD_PARAM<T> params;
1354  params.kernel=this;
1355  params.result=result;
1356  params.start=0;
1357  params.end=m;
1358  params.total_start=0;
1359  params.total_end=total_num;
1360  params.n=n;
1361  params.m=m;
1362  params.symmetric=symmetric;
1363  params.verbose=true;
1364  get_kernel_matrix_helper<T>((void*) &params);
1365  }
1366  else
1367  {
1368  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
1369  K_THREAD_PARAM<T>* params = SG_MALLOC(K_THREAD_PARAM<T>, num_threads);
1370  int64_t step= total_num/num_threads;
1371 
1372  int32_t t;
1373 
1374  num_threads--;
1375  for (t=0; t<num_threads; t++)
1376  {
1377  params[t].kernel = this;
1378  params[t].result = result;
1379  params[t].start = compute_row_start(t*step, n, symmetric);
1380  params[t].end = compute_row_start((t+1)*step, n, symmetric);
1381  params[t].total_start=t*step;
1382  params[t].total_end=(t+1)*step;
1383  params[t].n=n;
1384  params[t].m=m;
1385  params[t].symmetric=symmetric;
1386  params[t].verbose=false;
1387 
1388  int code=pthread_create(&threads[t], NULL,
1389  CKernel::get_kernel_matrix_helper<T>, (void*)&params[t]);
1390 
1391  if (code != 0)
1392  {
1393  SG_WARNING("Thread creation failed (thread %d of %d) "
1394  "with error:'%s'\n",t, num_threads, strerror(code));
1395  num_threads=t;
1396  break;
1397  }
1398  }
1399 
1400  params[t].kernel = this;
1401  params[t].result = result;
1402  params[t].start = compute_row_start(t*step, n, symmetric);
1403  params[t].end = m;
1404  params[t].total_start=t*step;
1405  params[t].total_end=total_num;
1406  params[t].n=n;
1407  params[t].m=m;
1408  params[t].symmetric=symmetric;
1409  params[t].verbose=true;
1410  get_kernel_matrix_helper<T>(&params[t]);
1411 
1412  for (t=0; t<num_threads; t++)
1413  {
1414  if (pthread_join(threads[t], NULL) != 0)
1415  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
1416  }
1417 
1418  SG_FREE(params);
1419  SG_FREE(threads);
1420  }
1421 
1422  SG_DONE()
1423 
1424  return SGMatrix<T>(result,m,n,true);
1425 }
1426 
1427 
1428 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
1429 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
1430 
1431 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
1432 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
1433 
virtual void clear_normal()
Definition: Kernel.cpp:847
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:917
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
Definition: Kernel.h:919
#define SG_INFO(...)
Definition: SGIO.h:118
virtual void cleanup()
Definition: Kernel.cpp:162
#define SG_RESET_LOCALE
Definition: SGIO.h:86
#define SG_DONE()
Definition: SGIO.h:157
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:126
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:857
void cache_multiple_kernel_rows(int32_t *key, int32_t varnum)
Definition: Kernel.cpp:365
int32_t get_num_threads() const
Definition: Parallel.cpp:64
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:1069
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:1281
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:139
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1068
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:1068
#define SG_ERROR(...)
Definition: SGIO.h:129
void cache_reset()
Definition: Kernel.h:602
#define REQUIRE(x,...)
Definition: SGIO.h:206
virtual bool delete_optimization()
Definition: Kernel.cpp:823
int64_t KERNELCACHE_IDX
Definition: Kernel.h:46
int32_t kernel_cache_space_available()
Definition: Kernel.h:698
index_t num_cols
Definition: SGMatrix.h:378
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:206
#define ENUM_CASE(n)
Definition: Kernel.cpp:695
uint64_t properties
Definition: Kernel.h:1082
Parallel * parallel
Definition: SGObject.h:499
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:682
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:516
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:219
#define SG_REF(x)
Definition: SGObject.h:51
#define SG_SET_LOCALE_C
Definition: SGIO.h:85
int32_t cache_size
cache_size in MB
Definition: Kernel.h:1047
index_t num_rows
Definition: SGMatrix.h:376
void kernel_cache_shrink(int32_t totdoc, int32_t num_shrink, int32_t *after)
Definition: Kernel.cpp:484
bool get_is_initialized()
Definition: Kernel.h:753
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1168
float64_t combined_kernel_weight
Definition: Kernel.h:1072
virtual void register_params()
Definition: Kernel.cpp:940
void save(CFile *writer)
Definition: Kernel.cpp:641
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:649
index_t vlen
Definition: SGVector.h:494
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:151
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1227
void cache_kernel_row(int32_t x)
Definition: Kernel.cpp:291
#define SG_OBJ_PROGRESS(o,...)
Definition: SGIO.h:147
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1015
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:869
double float64_t
Definition: common.h:50
KERNEL_CACHE kernel_cache
kernel cache
Definition: Kernel.h:1051
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
Definition: Kernel.h:1056
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:932
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:829
EOptimizationType get_optimization_type()
Definition: Kernel.h:741
virtual void save_serializable_post()
Definition: SGObject.cpp:1073
void list_kernel()
Definition: Kernel.cpp:697
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
Definition: Kernel.h:802
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1114
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:1067
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:525
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:876
virtual bool init_normalizer()
Definition: Kernel.cpp:157
bool optimization_initialized
Definition: Kernel.h:1075
EOptimizationType opt_type
Definition: Kernel.h:1079
void load(CFile *loader)
Definition: Kernel.cpp:635
virtual void load_serializable_post()
Definition: SGObject.cpp:1063
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:1061
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:885
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:835
bool lhs_equals_rhs
lhs
Definition: Kernel.h:1064
int machine_int_t
Definition: common.h:59
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:816
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:1059
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual void save_serializable_pre()
Definition: Kernel.cpp:924
void kernel_cache_cleanup()
Definition: Kernel.cpp:556
virtual void remove_lhs()
Definition: Kernel.cpp:668
int32_t kernel_cache_check(int32_t cacheidx)
Definition: Kernel.h:689
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:852
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:899
A generic Support Vector Machine Interface.
Definition: SVM.h:49
void kernel_cache_reset_lru()
Definition: Kernel.cpp:543
The Kernel base class.
Definition: Kernel.h:158
CKernelNormalizer * normalizer
Definition: Kernel.h:1086
virtual SGVector< float64_t > get_kernel_row(int32_t i)
Definition: Kernel.h:279
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
virtual bool has_features()
Definition: Kernel.h:534
void kernel_cache_init(int32_t size, bool regression_hack=false)
Definition: Kernel.cpp:170
virtual ~CKernel()
Definition: Kernel.cpp:73
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:842
void set_const(T const_elem)
Definition: SGMatrix.cpp:133
float64_t KERNELCACHE_ELEM
Definition: Kernel.h:35
void set_const(T const_elem)
Definition: SGVector.cpp:152
void resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack=false)
Definition: Kernel.cpp:85
virtual EFeatureType get_feature_type() const =0
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation