SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * EXCEPT FOR THE KERNEL CACHING FUNCTIONS WHICH ARE (W) THORSTEN JOACHIMS
3  * COPYRIGHT (C) 1999 UNIVERSITAET DORTMUND - ALL RIGHTS RESERVED
4  *
5  * this program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * Written (W) 1999-2009 Soeren Sonnenburg
11  * Written (W) 1999-2008 Gunnar Raetsch
12  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
13  */
14 
15 #include <shogun/lib/config.h>
16 #include <shogun/lib/common.h>
17 #include <shogun/io/SGIO.h>
18 #include <shogun/io/File.h>
19 #include <shogun/lib/Time.h>
20 #include <shogun/lib/Signal.h>
21 
22 #include <shogun/base/Parallel.h>
23 
24 #include <shogun/kernel/Kernel.h>
27 #include <shogun/base/Parameter.h>
28 
30 
31 #include <string.h>
32 #include <unistd.h>
34 
35 #ifdef HAVE_PTHREAD
36 #include <pthread.h>
37 #endif
38 
39 using namespace shogun;
40 
42 {
43  init();
45 }
46 
47 CKernel::CKernel(int32_t size) : CSGObject()
48 {
49  init();
50 
51  if (size<10)
52  size=10;
53 
54  cache_size=size;
56 }
57 
58 
59 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
60 {
61  init();
62 
63  if (size<10)
64  size=10;
65 
66  cache_size=size;
67 
69  init(p_lhs, p_rhs);
71 }
72 
74 {
75  if (get_is_initialized())
76  SG_ERROR("Kernel still initialized on destruction.\n")
77 
80 
81  SG_INFO("Kernel deleted (%p).\n", this)
82 }
83 
84 #ifdef USE_SVMLIGHT
85 void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)
86 {
87  if (size<10)
88  size=10;
89 
91  cache_size=size;
92 
93  if (has_features() && get_num_vec_lhs())
94  kernel_cache_init(cache_size, regression_hack);
95 }
96 #endif //USE_SVMLIGHT
97 
98 bool CKernel::init(CFeatures* l, CFeatures* r)
99 {
100  /* make sure that features are not deleted if same ones are used */
101  SG_REF(l);
102  SG_REF(r);
103 
104  //make sure features were indeed supplied
105  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
106  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
107 
108  //make sure features are compatible
111 
112  //remove references to previous features
114 
115  //increase reference counts
116  SG_REF(l);
117  if (l==r)
118  lhs_equals_rhs=true;
119  else // l!=r
120  SG_REF(r);
121 
122  lhs=l;
123  rhs=r;
124 
127 
130 
131  /* unref "safety" refs from beginning */
132  SG_UNREF(r);
133  SG_UNREF(l);
134 
135  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
136  return true;
137 }
138 
140 {
141  SG_REF(n);
142  if (lhs && rhs)
143  n->init(this);
144 
146  normalizer=n;
147 
148  return (normalizer!=NULL);
149 }
150 
152 {
154  return normalizer;
155 }
156 
158 {
159  return normalizer->init(this);
160 }
161 
163 {
165 }
166 
167 #ifdef USE_SVMLIGHT
168 /****************************** Cache handling *******************************/
169 
170 void CKernel::kernel_cache_init(int32_t buffsize, bool regression_hack)
171 {
172  int32_t totdoc=get_num_vec_lhs();
173  if (totdoc<=0)
174  {
175  SG_ERROR("kernel has zero rows: num_lhs=%d num_rhs=%d\n",
177  }
178  uint64_t buffer_size=0;
179  int32_t i;
180 
181  //in regression the additional constraints are made by doubling the training data
182  if (regression_hack)
183  totdoc*=2;
184 
185  buffer_size=((uint64_t) buffsize)*1024*1024/sizeof(KERNELCACHE_ELEM);
186  if (buffer_size>((uint64_t) totdoc)*totdoc)
187  buffer_size=((uint64_t) totdoc)*totdoc;
188 
189  SG_INFO("using a kernel cache of size %lld MB (%lld bytes) for %s Kernel\n", buffer_size*sizeof(KERNELCACHE_ELEM)/1024/1024, buffer_size*sizeof(KERNELCACHE_ELEM), get_name())
190 
191  //make sure it fits in the *signed* KERNELCACHE_IDX type
192  ASSERT(buffer_size < (((uint64_t) 1) << (sizeof(KERNELCACHE_IDX)*8-1)))
193 
194  kernel_cache.index = SG_MALLOC(int32_t, totdoc);
195  kernel_cache.occu = SG_MALLOC(int32_t, totdoc);
196  kernel_cache.lru = SG_MALLOC(int32_t, totdoc);
197  kernel_cache.invindex = SG_MALLOC(int32_t, totdoc);
198  kernel_cache.active2totdoc = SG_MALLOC(int32_t, totdoc);
199  kernel_cache.totdoc2active = SG_MALLOC(int32_t, totdoc);
200  kernel_cache.buffer = SG_MALLOC(KERNELCACHE_ELEM, buffer_size);
201  kernel_cache.buffsize=buffer_size;
202  kernel_cache.max_elems=(int32_t) (kernel_cache.buffsize/totdoc);
203 
204  if(kernel_cache.max_elems>totdoc) {
205  kernel_cache.max_elems=totdoc;
206  }
207 
208  kernel_cache.elems=0; // initialize cache
209  for(i=0;i<totdoc;i++) {
210  kernel_cache.index[i]=-1;
211  kernel_cache.lru[i]=0;
212  }
213  for(i=0;i<totdoc;i++) {
214  kernel_cache.occu[i]=0;
215  kernel_cache.invindex[i]=-1;
216  }
217 
218  kernel_cache.activenum=totdoc;;
219  for(i=0;i<totdoc;i++) {
220  kernel_cache.active2totdoc[i]=i;
221  kernel_cache.totdoc2active[i]=i;
222  }
223 
224  kernel_cache.time=0;
225 }
226 
228  int32_t docnum, int32_t *active2dnum, float64_t *buffer, bool full_line)
229 {
230  int32_t i,j;
231  KERNELCACHE_IDX start;
232 
233  int32_t num_vectors = get_num_vec_lhs();
234  if (docnum>=num_vectors)
235  docnum=2*num_vectors-1-docnum;
236 
237  /* is cached? */
238  if(kernel_cache.index[docnum] != -1)
239  {
240  kernel_cache.lru[kernel_cache.index[docnum]]=kernel_cache.time; /* lru */
241  start=((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[docnum];
242 
243  if (full_line)
244  {
245  for(j=0;j<get_num_vec_lhs();j++)
246  {
247  if(kernel_cache.totdoc2active[j] >= 0)
248  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
249  else
250  buffer[j]=(float64_t) kernel(docnum, j);
251  }
252  }
253  else
254  {
255  for(i=0;(j=active2dnum[i])>=0;i++)
256  {
257  if(kernel_cache.totdoc2active[j] >= 0)
258  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
259  else
260  {
261  int32_t k=j;
262  if (k>=num_vectors)
263  k=2*num_vectors-1-k;
264  buffer[j]=(float64_t) kernel(docnum, k);
265  }
266  }
267  }
268  }
269  else
270  {
271  if (full_line)
272  {
273  for(j=0;j<get_num_vec_lhs();j++)
274  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, j);
275  }
276  else
277  {
278  for(i=0;(j=active2dnum[i])>=0;i++)
279  {
280  int32_t k=j;
281  if (k>=num_vectors)
282  k=2*num_vectors-1-k;
283  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, k);
284  }
285  }
286  }
287 }
288 
289 
290 // Fills cache for the row m
292 {
293  register int32_t j,k,l;
294  register KERNELCACHE_ELEM *cache;
295 
296  int32_t num_vectors = get_num_vec_lhs();
297 
298  if (m>=num_vectors)
299  m=2*num_vectors-1-m;
300 
301  if(!kernel_cache_check(m)) // not cached yet
302  {
303  cache = kernel_cache_clean_and_malloc(m);
304  if(cache) {
305  l=kernel_cache.totdoc2active[m];
306 
307  for(j=0;j<kernel_cache.activenum;j++) // fill cache
308  {
309  k=kernel_cache.active2totdoc[j];
310 
311  if((kernel_cache.index[k] != -1) && (l != -1) && (k != m)) {
312  cache[j]=kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)
313  *kernel_cache.index[k]+l];
314  }
315  else
316  {
317  if (k>=num_vectors)
318  k=2*num_vectors-1-k;
319 
320  cache[j]=kernel(m, k);
321  }
322  }
323  }
324  else
325  perror("Error: Kernel cache full! => increase cache size");
326  }
327 }
328 
329 
330 void* CKernel::cache_multiple_kernel_row_helper(void* p)
331 {
332  int32_t j,k,l;
333  S_KTHREAD_PARAM* params = (S_KTHREAD_PARAM*) p;
334 
335  for (int32_t i=params->start; i<params->end; i++)
336  {
337  KERNELCACHE_ELEM* cache=params->cache[i];
338  int32_t m = params->uncached_rows[i];
339  l=params->kernel_cache->totdoc2active[m];
340 
341  for(j=0;j<params->kernel_cache->activenum;j++) // fill cache
342  {
343  k=params->kernel_cache->active2totdoc[j];
344 
345  if((params->kernel_cache->index[k] != -1) && (l != -1) && (!params->needs_computation[k])) {
346  cache[j]=params->kernel_cache->buffer[((KERNELCACHE_IDX) params->kernel_cache->activenum)
347  *params->kernel_cache->index[k]+l];
348  }
349  else
350  {
351  if (k>=params->num_vectors)
352  k=2*params->num_vectors-1-k;
353 
354  cache[j]=params->kernel->kernel(m, k);
355  }
356  }
357 
358  //now line m is cached
359  params->needs_computation[m]=0;
360  }
361  return NULL;
362 }
363 
364 // Fills cache for the rows in key
365 void CKernel::cache_multiple_kernel_rows(int32_t* rows, int32_t num_rows)
366 {
367 #ifdef HAVE_PTHREAD
368  int32_t nthreads=parallel->get_num_threads();
369 
370  if (nthreads<2)
371  {
372 #endif
373  for(int32_t i=0;i<num_rows;i++)
374  cache_kernel_row(rows[i]);
375 #ifdef HAVE_PTHREAD
376  }
377  else
378  {
379  // fill up kernel cache
380  int32_t* uncached_rows = SG_MALLOC(int32_t, num_rows);
381  KERNELCACHE_ELEM** cache = SG_MALLOC(KERNELCACHE_ELEM*, num_rows);
382  pthread_t* threads = SG_MALLOC(pthread_t, nthreads-1);
383  S_KTHREAD_PARAM* params = SG_MALLOC(S_KTHREAD_PARAM, nthreads-1);
384  int32_t num_threads=nthreads-1;
385  int32_t num_vec=get_num_vec_lhs();
386  ASSERT(num_vec>0)
387  uint8_t* needs_computation=SG_CALLOC(uint8_t, num_vec);
388 
389  int32_t step=0;
390  int32_t num=0;
391  int32_t end=0;
392 
393  // allocate cachelines if necessary
394  for (int32_t i=0; i<num_rows; i++)
395  {
396  int32_t idx=rows[i];
397  if (idx>=num_vec)
398  idx=2*num_vec-1-idx;
399 
400  if (kernel_cache_check(idx))
401  continue;
402 
403  needs_computation[idx]=1;
404  uncached_rows[num]=idx;
405  cache[num]= kernel_cache_clean_and_malloc(idx);
406 
407  if (!cache[num])
408  SG_ERROR("Kernel cache full! => increase cache size\n")
409 
410  num++;
411  }
412 
413  if (num>0)
414  {
415  step= num/nthreads;
416 
417  if (step<1)
418  {
419  num_threads=num-1;
420  step=1;
421  }
422 
423  for (int32_t t=0; t<num_threads; t++)
424  {
425  params[t].kernel = this;
426  params[t].kernel_cache = &kernel_cache;
427  params[t].cache = cache;
428  params[t].uncached_rows = uncached_rows;
429  params[t].needs_computation = needs_computation;
430  params[t].num_uncached = num;
431  params[t].start = t*step;
432  params[t].end = (t+1)*step;
433  params[t].num_vectors = get_num_vec_lhs();
434  end=params[t].end;
435 
436  int code=pthread_create(&threads[t], NULL,
437  CKernel::cache_multiple_kernel_row_helper, (void*)&params[t]);
438 
439  if (code != 0)
440  {
441  SG_WARNING("Thread creation failed (thread %d of %d) "
442  "with error:'%s'\n",t, num_threads, strerror(code));
443  num_threads=t;
444  end=t*step;
445  break;
446  }
447  }
448  }
449  else
450  num_threads=-1;
451 
452 
453  S_KTHREAD_PARAM last_param;
454  last_param.kernel = this;
455  last_param.kernel_cache = &kernel_cache;
456  last_param.cache = cache;
457  last_param.uncached_rows = uncached_rows;
458  last_param.needs_computation = needs_computation;
459  last_param.start = end;
460  last_param.num_uncached = num;
461  last_param.end = num;
462  last_param.num_vectors = get_num_vec_lhs();
463 
464  cache_multiple_kernel_row_helper(&last_param);
465 
466 
467  for (int32_t t=0; t<num_threads; t++)
468  {
469  if (pthread_join(threads[t], NULL) != 0)
470  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
471  }
472 
473  SG_FREE(needs_computation);
474  SG_FREE(params);
475  SG_FREE(threads);
476  SG_FREE(cache);
477  SG_FREE(uncached_rows);
478  }
479 #endif
480 }
481 
482 // remove numshrink columns in the cache
483 // which correspond to examples marked
485  int32_t totdoc, int32_t numshrink, int32_t *after)
486 {
487  ASSERT(totdoc > 0);
488  register int32_t i,j,jj,scount; // 0 in after.
489  KERNELCACHE_IDX from=0,to=0;
490  int32_t *keep;
491 
492  keep=SG_MALLOC(int32_t, totdoc);
493  for(j=0;j<totdoc;j++) {
494  keep[j]=1;
495  }
496  scount=0;
497  for(jj=0;(jj<kernel_cache.activenum) && (scount<numshrink);jj++) {
498  j=kernel_cache.active2totdoc[jj];
499  if(!after[j]) {
500  scount++;
501  keep[j]=0;
502  }
503  }
504 
505  for(i=0;i<kernel_cache.max_elems;i++) {
506  for(jj=0;jj<kernel_cache.activenum;jj++) {
507  j=kernel_cache.active2totdoc[jj];
508  if(!keep[j]) {
509  from++;
510  }
511  else {
512  kernel_cache.buffer[to]=kernel_cache.buffer[from];
513  to++;
514  from++;
515  }
516  }
517  }
518 
519  kernel_cache.activenum=0;
520  for(j=0;j<totdoc;j++) {
521  if((keep[j]) && (kernel_cache.totdoc2active[j] != -1)) {
522  kernel_cache.active2totdoc[kernel_cache.activenum]=j;
523  kernel_cache.totdoc2active[j]=kernel_cache.activenum;
524  kernel_cache.activenum++;
525  }
526  else {
527  kernel_cache.totdoc2active[j]=-1;
528  }
529  }
530 
531  kernel_cache.max_elems= (int32_t) kernel_cache.buffsize;
532 
533  if (kernel_cache.activenum>0)
534  kernel_cache.buffsize/=kernel_cache.activenum;
535 
536  if(kernel_cache.max_elems>totdoc)
537  kernel_cache.max_elems=totdoc;
538 
539  SG_FREE(keep);
540 
541 }
542 
544 {
545  int32_t maxlru=0,k;
546 
547  for(k=0;k<kernel_cache.max_elems;k++) {
548  if(maxlru < kernel_cache.lru[k])
549  maxlru=kernel_cache.lru[k];
550  }
551  for(k=0;k<kernel_cache.max_elems;k++) {
552  kernel_cache.lru[k]-=maxlru;
553  }
554 }
555 
557 {
558  SG_FREE(kernel_cache.index);
559  SG_FREE(kernel_cache.occu);
560  SG_FREE(kernel_cache.lru);
561  SG_FREE(kernel_cache.invindex);
562  SG_FREE(kernel_cache.active2totdoc);
563  SG_FREE(kernel_cache.totdoc2active);
564  SG_FREE(kernel_cache.buffer);
565  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
566 }
567 
568 int32_t CKernel::kernel_cache_malloc()
569 {
570  int32_t i;
571 
573  for(i=0;i<kernel_cache.max_elems;i++) {
574  if(!kernel_cache.occu[i]) {
575  kernel_cache.occu[i]=1;
576  kernel_cache.elems++;
577  return(i);
578  }
579  }
580  }
581  return(-1);
582 }
583 
584 void CKernel::kernel_cache_free(int32_t cacheidx)
585 {
586  kernel_cache.occu[cacheidx]=0;
587  kernel_cache.elems--;
588 }
589 
590 // remove least recently used cache
591 // element
592 int32_t CKernel::kernel_cache_free_lru()
593 {
594  register int32_t k,least_elem=-1,least_time;
595 
596  least_time=kernel_cache.time+1;
597  for(k=0;k<kernel_cache.max_elems;k++) {
598  if(kernel_cache.invindex[k] != -1) {
599  if(kernel_cache.lru[k]<least_time) {
600  least_time=kernel_cache.lru[k];
601  least_elem=k;
602  }
603  }
604  }
605 
606  if(least_elem != -1) {
607  kernel_cache_free(least_elem);
608  kernel_cache.index[kernel_cache.invindex[least_elem]]=-1;
609  kernel_cache.invindex[least_elem]=-1;
610  return(1);
611  }
612  return(0);
613 }
614 
615 // Get a free cache entry. In case cache is full, the lru
616 // element is removed.
617 KERNELCACHE_ELEM* CKernel::kernel_cache_clean_and_malloc(int32_t cacheidx)
618 {
619  int32_t result;
620  if((result = kernel_cache_malloc()) == -1) {
621  if(kernel_cache_free_lru()) {
622  result = kernel_cache_malloc();
623  }
624  }
625  kernel_cache.index[cacheidx]=result;
626  if(result == -1) {
627  return(0);
628  }
629  kernel_cache.invindex[result]=cacheidx;
630  kernel_cache.lru[kernel_cache.index[cacheidx]]=kernel_cache.time; // lru
631  return &kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[cacheidx]];
632 }
633 #endif //USE_SVMLIGHT
634 
635 void CKernel::load(CFile* loader)
636 {
639 }
640 
641 void CKernel::save(CFile* writer)
642 {
643  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
645  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
647 }
648 
650 {
651  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
652  if (rhs!=lhs)
653  SG_UNREF(rhs);
654  rhs = NULL;
655  num_rhs=0;
656 
657  SG_UNREF(lhs);
658  lhs = NULL;
659  num_lhs=0;
660  lhs_equals_rhs=false;
661 
662 #ifdef USE_SVMLIGHT
663  cache_reset();
664 #endif //USE_SVMLIGHT
665  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
666 }
667 
669 {
670  if (rhs==lhs)
671  rhs=NULL;
672  SG_UNREF(lhs);
673  lhs = NULL;
674  num_lhs=0;
675  lhs_equals_rhs=false;
676 #ifdef USE_SVMLIGHT
677  cache_reset();
678 #endif //USE_SVMLIGHT
679 }
680 
683 {
684  if (rhs!=lhs)
685  SG_UNREF(rhs);
686  rhs = NULL;
687  num_rhs=0;
688  lhs_equals_rhs=false;
689 
690 #ifdef USE_SVMLIGHT
691  cache_reset();
692 #endif //USE_SVMLIGHT
693 }
694 
695 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
696 
698 {
699  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
701  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
702  "SLOWBUTMEMEFFICIENT");
703 
704  switch (get_kernel_type())
705  {
767  }
768 
769  switch (get_feature_class())
770  {
781  ENUM_CASE(C_WD)
792  }
793 
794  switch (get_feature_type())
795  {
810  }
811  SG_INFO("\n")
812 }
813 #undef ENUM_CASE
814 
816  int32_t count, int32_t *IDX, float64_t * weights)
817 {
818  SG_ERROR("kernel does not support linadd optimization\n")
819  return false ;
820 }
821 
823 {
824  SG_ERROR("kernel does not support linadd optimization\n")
825  return false;
826 }
827 
829 {
830  SG_ERROR("kernel does not support linadd optimization\n")
831  return 0;
832 }
833 
835  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
836  int32_t* IDX, float64_t* weights, float64_t factor)
837 {
838  SG_ERROR("kernel does not support batch computation\n")
839 }
840 
841 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
842 {
843  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
844 }
845 
847 {
848  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
849 }
850 
852 {
853  return 1;
854 }
855 
857  int32_t vector_idx, float64_t * subkernel_contrib)
858 {
859  SG_ERROR("kernel compute_by_subkernel not implemented\n")
860 }
861 
862 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
863 {
864  num_weights=1 ;
865  return &combined_kernel_weight ;
866 }
867 
869 {
870  int num_weights = 1;
871  const float64_t* weight = get_subkernel_weights(num_weights);
872  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
873 }
874 
876 {
877  ASSERT(weights.vector)
878  if (weights.vlen!=1)
879  SG_ERROR("number of subkernel weights should be one ...\n")
880 
881  combined_kernel_weight = weights.vector[0] ;
882 }
883 
885 {
886  if (kernel)
887  {
888  CKernel* casted=dynamic_cast<CKernel*>(kernel);
889  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
890  " of class \"%s\" is not a subclass of CKernel!\n",
891  kernel->get_name());
892  return casted;
893  }
894  else
895  return NULL;
896 }
897 
899 {
900  int32_t num_suppvec=svm->get_num_support_vectors();
901  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
902  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
903 
904  for (int32_t i=0; i<num_suppvec; i++)
905  {
906  sv_idx[i] = svm->get_support_vector(i);
907  sv_weight[i] = svm->get_alpha(i);
908  }
909  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
910 
911  SG_FREE(sv_idx);
912  SG_FREE(sv_weight);
913  return ret;
914 }
915 
917 {
919  if (lhs_equals_rhs)
920  rhs=lhs;
921 }
922 
924 {
926 
927  if (lhs_equals_rhs)
928  rhs=NULL;
929 }
930 
932 {
934 
935  if (lhs_equals_rhs)
936  rhs=lhs;
937 }
938 
940  SG_ADD(&cache_size, "cache_size",
941  "Cache size in MB.", MS_NOT_AVAILABLE);
942  SG_ADD((CSGObject**) &lhs, "lhs",
943  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
944  SG_ADD((CSGObject**) &rhs, "rhs",
945  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
946  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
947  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
948  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
950  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
952  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
953  "Combined kernel weight.", MS_AVAILABLE);
954  SG_ADD(&optimization_initialized, "optimization_initialized",
955  "Optimization is initialized.", MS_NOT_AVAILABLE);
956  SG_ADD((machine_int_t*) &opt_type, "opt_type",
957  "Optimization type.", MS_NOT_AVAILABLE);
958  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
959  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
960  MS_AVAILABLE);
961 }
962 
963 
964 void CKernel::init()
965 {
966  cache_size=10;
967  kernel_matrix=NULL;
968  lhs=NULL;
969  rhs=NULL;
970  num_lhs=0;
971  num_rhs=0;
972  lhs_equals_rhs=false;
977  normalizer=NULL;
978 
979 #ifdef USE_SVMLIGHT
980  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
981 #endif //USE_SVMLIGHT
982 
984 }
985 
986 namespace shogun
987 {
989 template <class T> struct K_THREAD_PARAM
990 {
994  int32_t start;
996  int32_t end;
998  int64_t total_start;
1000  int64_t total_end;
1002  int32_t m;
1004  int32_t n;
1010  bool verbose;
1011 };
1012 }
1013 
1015  bool no_diag)
1016 {
1017  SG_DEBUG("Entering\n");
1018 
1019  REQUIRE(has_features(), "No features assigned to kernel\n")
1020  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1021  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1022  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1023  REQUIRE(block_begin+block_size<=num_rhs,
1024  "Invalid block size (%d) at starting index (%d, %d)! "
1025  "Please use smaller blocks!", block_size, block_begin, block_begin)
1026  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1027 
1028  float64_t sum=0.0;
1029 
1030  // since the block is symmetric with main diagonal inside, we can save half
1031  // the computation with using only the upper triangular part.
1032  // this can be done in parallel
1033 #pragma omp parallel for
1034  for (index_t i=0; i<block_size; ++i)
1035  {
1036  // compute the kernel values on the upper triangular part of the kernel
1037  // matrix and compute sum on the fly
1038  for (index_t j=i+1; j<block_size; ++j)
1039  {
1040  float64_t k=kernel(i+block_begin, j+block_begin);
1041 #pragma omp atomic
1042  sum+=k;
1043  }
1044  }
1045 
1046  // the actual sum would be twice of what we computed
1047  sum*=2;
1048 
1049  // add the diagonal elements if required - keeping this check
1050  // outside of the loop to save cycles
1051  if (!no_diag)
1052  {
1053 #pragma omp parallel for
1054  for (index_t i=0; i<block_size; ++i)
1055  {
1056  float64_t diag=kernel(i+block_begin, i+block_begin);
1057 #pragma omp atomic
1058  sum+=diag;
1059  }
1060  }
1061 
1062  SG_DEBUG("Leaving\n");
1063 
1064  return sum;
1065 }
1066 
1067 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
1068  index_t block_size_row, index_t block_size_col, bool no_diag)
1069 {
1070  SG_DEBUG("Entering\n");
1071 
1072  REQUIRE(has_features(), "No features assigned to kernel\n")
1073  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1074  block_begin_col>=0 && block_begin_col<num_rhs,
1075  "Invalid block begin index (%d, %d)!\n",
1076  block_begin_row, block_begin_col)
1077  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1078  block_begin_col+block_size_col<=num_rhs,
1079  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1080  "Please use smaller blocks!", block_size_row, block_size_col,
1081  block_begin_row, block_begin_col)
1082  REQUIRE(block_size_row>=1 && block_size_col>=1,
1083  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1084 
1085  // check if removal of diagonal is required/valid
1086  if (no_diag && block_size_row!=block_size_col)
1087  {
1088  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1089  no_diag=false;
1090  }
1091 
1092  float64_t sum=0.0;
1093 
1094  // this can be done in parallel for the rows/cols
1095 #pragma omp parallel for
1096  for (index_t i=0; i<block_size_row; ++i)
1097  {
1098  // compute the kernel values and compute sum on the fly
1099  for (index_t j=0; j<block_size_col; ++j)
1100  {
1101  float64_t k=no_diag && i==j ? 0 :
1102  kernel(i+block_begin_row, j+block_begin_col);
1103 #pragma omp atomic
1104  sum+=k;
1105  }
1106  }
1107 
1108  SG_DEBUG("Leaving\n");
1109 
1110  return sum;
1111 }
1112 
1114  index_t block_size, bool no_diag)
1115 {
1116  SG_DEBUG("Entering\n");
1117 
1118  REQUIRE(has_features(), "No features assigned to kernel\n")
1119  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1120  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1121  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1122  REQUIRE(block_begin+block_size<=num_rhs,
1123  "Invalid block size (%d) at starting index (%d, %d)! "
1124  "Please use smaller blocks!", block_size, block_begin, block_begin)
1125  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1126 
1127  // initialize the vector that accumulates the row/col-wise sum on the go
1128  SGVector<float64_t> row_sum(block_size);
1129  row_sum.set_const(0.0);
1130 
1131  // since the block is symmetric with main diagonal inside, we can save half
1132  // the computation with using only the upper triangular part.
1133  // this can be done in parallel for the rows/cols
1134 #pragma omp parallel for
1135  for (index_t i=0; i<block_size; ++i)
1136  {
1137  // compute the kernel values on the upper triangular part of the kernel
1138  // matrix and compute row-wise sum on the fly
1139  for (index_t j=i+1; j<block_size; ++j)
1140  {
1141  float64_t k=kernel(i+block_begin, j+block_begin);
1142 #pragma omp critical
1143  {
1144  row_sum[i]+=k;
1145  row_sum[j]+=k;
1146  }
1147  }
1148  }
1149 
1150  // add the diagonal elements if required - keeping this check
1151  // outside of the loop to save cycles
1152  if (!no_diag)
1153  {
1154 #pragma omp parallel for
1155  for (index_t i=0; i<block_size; ++i)
1156  {
1157  float64_t diag=kernel(i+block_begin, i+block_begin);
1158  row_sum[i]+=diag;
1159  }
1160  }
1161 
1162  SG_DEBUG("Leaving\n");
1163 
1164  return row_sum;
1165 }
1166 
1168  block_begin, index_t block_size, bool no_diag)
1169 {
1170  SG_DEBUG("Entering\n");
1171 
1172  REQUIRE(has_features(), "No features assigned to kernel\n")
1173  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
1174  REQUIRE(block_begin>=0 && block_begin<num_rhs,
1175  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
1176  REQUIRE(block_begin+block_size<=num_rhs,
1177  "Invalid block size (%d) at starting index (%d, %d)! "
1178  "Please use smaller blocks!", block_size, block_begin, block_begin)
1179  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
1180 
1181  // initialize the matrix that accumulates the row/col-wise sum on the go
1182  // the first column stores the sum of kernel values
1183  // the second column stores the sum of squared kernel values
1184  SGMatrix<float64_t> row_sum(block_size, 2);
1185  row_sum.set_const(0.0);
1186 
1187  // since the block is symmetric with main diagonal inside, we can save half
1188  // the computation with using only the upper triangular part
1189  // this can be done in parallel for the rows/cols
1190 #pragma omp parallel for
1191  for (index_t i=0; i<block_size; ++i)
1192  {
1193  // compute the kernel values on the upper triangular part of the kernel
1194  // matrix and compute row-wise sum and squared sum on the fly
1195  for (index_t j=i+1; j<block_size; ++j)
1196  {
1197  float64_t k=kernel(i+block_begin, j+block_begin);
1198 #pragma omp critical
1199  {
1200  row_sum(i, 0)+=k;
1201  row_sum(j, 0)+=k;
1202  row_sum(i, 1)+=k*k;
1203  row_sum(j, 1)+=k*k;
1204  }
1205  }
1206  }
1207 
1208  // add the diagonal elements if required - keeping this check
1209  // outside of the loop to save cycles
1210  if (!no_diag)
1211  {
1212 #pragma omp parallel for
1213  for (index_t i=0; i<block_size; ++i)
1214  {
1215  float64_t diag=kernel(i+block_begin, i+block_begin);
1216  row_sum(i, 0)+=diag;
1217  row_sum(i, 1)+=diag*diag;
1218  }
1219  }
1220 
1221  SG_DEBUG("Leaving\n");
1222 
1223  return row_sum;
1224 }
1225 
1227  index_t block_begin_col, index_t block_size_row,
1228  index_t block_size_col, bool no_diag)
1229 {
1230  SG_DEBUG("Entering\n");
1231 
1232  REQUIRE(has_features(), "No features assigned to kernel\n")
1233  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
1234  block_begin_col>=0 && block_begin_col<num_rhs,
1235  "Invalid block begin index (%d, %d)!\n",
1236  block_begin_row, block_begin_col)
1237  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
1238  block_begin_col+block_size_col<=num_rhs,
1239  "Invalid block size (%d, %d) at starting index (%d, %d)! "
1240  "Please use smaller blocks!", block_size_row, block_size_col,
1241  block_begin_row, block_begin_col)
1242  REQUIRE(block_size_row>=1 && block_size_col>=1,
1243  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
1244 
1245  // check if removal of diagonal is required/valid
1246  if (no_diag && block_size_row!=block_size_col)
1247  {
1248  SG_WARNING("Not removing the main diagonal since block is not square!\n");
1249  no_diag=false;
1250  }
1251 
1252  // initialize the vector that accumulates the row/col-wise sum on the go
1253  // the first block_size_row entries store the row-wise sum of kernel values
1254  // the nextt block_size_col entries store the col-wise sum of kernel values
1255  SGVector<float64_t> sum(block_size_row+block_size_col);
1256  sum.set_const(0.0);
1257 
1258  // this can be done in parallel for the rows/cols
1259 #pragma omp parallel for
1260  for (index_t i=0; i<block_size_row; ++i)
1261  {
1262  // compute the kernel values and compute sum on the fly
1263  for (index_t j=0; j<block_size_col; ++j)
1264  {
1265  float64_t k=no_diag && i==j ? 0 :
1266  kernel(i+block_begin_row, j+block_begin_col);
1267 #pragma omp critical
1268  {
1269  sum[i]+=k;
1270  sum[j+block_size_row]+=k;
1271  }
1272  }
1273  }
1274 
1275  SG_DEBUG("Leaving\n");
1276 
1277  return sum;
1278 }
1279 
1280 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
1281 {
1282  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
1283  int32_t i_start=params->start;
1284  int32_t i_end=params->end;
1285  CKernel* k=params->kernel;
1286  T* result=params->result;
1287  bool symmetric=params->symmetric;
1288  int32_t n=params->n;
1289  int32_t m=params->m;
1290  bool verbose=params->verbose;
1291  int64_t total_start=params->total_start;
1292  int64_t total_end=params->total_end;
1293  int64_t total=total_start;
1294 
1295  for (int32_t i=i_start; i<i_end; i++)
1296  {
1297  int32_t j_start=0;
1298 
1299  if (symmetric)
1300  j_start=i;
1301 
1302  for (int32_t j=j_start; j<n; j++)
1303  {
1304  float64_t v=k->kernel(i,j);
1305  result[i+j*m]=v;
1306 
1307  if (symmetric && i!=j)
1308  result[j+i*m]=v;
1309 
1310  if (verbose)
1311  {
1312  total++;
1313 
1314  if (symmetric && i!=j)
1315  total++;
1316 
1317  if (total%100 == 0)
1318  SG_OBJ_PROGRESS(k, total, total_start, total_end)
1319 
1321  break;
1322  }
1323  }
1324 
1325  }
1326 
1327  return NULL;
1328 }
1329 
1330 template <class T>
1332 {
1333  T* result = NULL;
1334 
1335  REQUIRE(has_features(), "no features assigned to kernel\n")
1336 
1337  int32_t m=get_num_vec_lhs();
1338  int32_t n=get_num_vec_rhs();
1339 
1340  int64_t total_num = int64_t(m)*n;
1341 
1342  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
1343  bool symmetric= (lhs && lhs==rhs && m==n);
1344 
1345  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
1346 
1347  result=SG_MALLOC(T, total_num);
1348 
1349  int32_t num_threads=parallel->get_num_threads();
1350  if (num_threads < 2)
1351  {
1352  K_THREAD_PARAM<T> params;
1353  params.kernel=this;
1354  params.result=result;
1355  params.start=0;
1356  params.end=m;
1357  params.total_start=0;
1358  params.total_end=total_num;
1359  params.n=n;
1360  params.m=m;
1361  params.symmetric=symmetric;
1362  params.verbose=true;
1363  get_kernel_matrix_helper<T>((void*) &params);
1364  }
1365  else
1366  {
1367  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
1368  K_THREAD_PARAM<T>* params = SG_MALLOC(K_THREAD_PARAM<T>, num_threads);
1369  int64_t step= total_num/num_threads;
1370 
1371  int32_t t;
1372 
1373  num_threads--;
1374  for (t=0; t<num_threads; t++)
1375  {
1376  params[t].kernel = this;
1377  params[t].result = result;
1378  params[t].start = compute_row_start(t*step, n, symmetric);
1379  params[t].end = compute_row_start((t+1)*step, n, symmetric);
1380  params[t].total_start=t*step;
1381  params[t].total_end=(t+1)*step;
1382  params[t].n=n;
1383  params[t].m=m;
1384  params[t].symmetric=symmetric;
1385  params[t].verbose=false;
1386 
1387  int code=pthread_create(&threads[t], NULL,
1388  CKernel::get_kernel_matrix_helper<T>, (void*)&params[t]);
1389 
1390  if (code != 0)
1391  {
1392  SG_WARNING("Thread creation failed (thread %d of %d) "
1393  "with error:'%s'\n",t, num_threads, strerror(code));
1394  num_threads=t;
1395  break;
1396  }
1397  }
1398 
1399  params[t].kernel = this;
1400  params[t].result = result;
1401  params[t].start = compute_row_start(t*step, n, symmetric);
1402  params[t].end = m;
1403  params[t].total_start=t*step;
1404  params[t].total_end=total_num;
1405  params[t].n=n;
1406  params[t].m=m;
1407  params[t].symmetric=symmetric;
1408  params[t].verbose=true;
1409  get_kernel_matrix_helper<T>(&params[t]);
1410 
1411  for (t=0; t<num_threads; t++)
1412  {
1413  if (pthread_join(threads[t], NULL) != 0)
1414  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
1415  }
1416 
1417  SG_FREE(params);
1418  SG_FREE(threads);
1419  }
1420 
1421  SG_DONE()
1422 
1423  return SGMatrix<T>(result,m,n,true);
1424 }
1425 
1426 
1427 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
1428 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
1429 
1430 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
1431 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
1432 
virtual void clear_normal()
Definition: Kernel.cpp:846
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:916
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
Definition: Kernel.h:919
#define SG_INFO(...)
Definition: SGIO.h:118
virtual void cleanup()
Definition: Kernel.cpp:162
#define SG_RESET_LOCALE
Definition: SGIO.h:86
#define SG_DONE()
Definition: SGIO.h:157
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:126
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:856
void cache_multiple_kernel_rows(int32_t *key, int32_t varnum)
Definition: Kernel.cpp:365
int32_t get_num_threads() const
Definition: Parallel.cpp:64
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:1069
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:1280
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:139
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1067
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:1067
#define SG_ERROR(...)
Definition: SGIO.h:129
void cache_reset()
Definition: Kernel.h:602
#define REQUIRE(x,...)
Definition: SGIO.h:206
virtual bool delete_optimization()
Definition: Kernel.cpp:822
int64_t KERNELCACHE_IDX
Definition: Kernel.h:46
int32_t kernel_cache_space_available()
Definition: Kernel.h:698
index_t num_cols
Definition: SGMatrix.h:378
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:206
#define ENUM_CASE(n)
Definition: Kernel.cpp:695
uint64_t properties
Definition: Kernel.h:1082
Parallel * parallel
Definition: SGObject.h:499
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:682
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:516
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:219
#define SG_REF(x)
Definition: SGObject.h:51
#define SG_SET_LOCALE_C
Definition: SGIO.h:85
int32_t cache_size
cache_size in MB
Definition: Kernel.h:1047
index_t num_rows
Definition: SGMatrix.h:376
void kernel_cache_shrink(int32_t totdoc, int32_t num_shrink, int32_t *after)
Definition: Kernel.cpp:484
bool get_is_initialized()
Definition: Kernel.h:753
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1167
float64_t combined_kernel_weight
Definition: Kernel.h:1072
virtual void register_params()
Definition: Kernel.cpp:939
void save(CFile *writer)
Definition: Kernel.cpp:641
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:649
index_t vlen
Definition: SGVector.h:494
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:151
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:1226
void cache_kernel_row(int32_t x)
Definition: Kernel.cpp:291
#define SG_OBJ_PROGRESS(o,...)
Definition: SGIO.h:147
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1014
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:868
double float64_t
Definition: common.h:50
KERNEL_CACHE kernel_cache
kernel cache
Definition: Kernel.h:1051
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
Definition: Kernel.h:1056
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:931
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:828
EOptimizationType get_optimization_type()
Definition: Kernel.h:741
virtual void save_serializable_post()
Definition: SGObject.cpp:1072
void list_kernel()
Definition: Kernel.cpp:697
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
Definition: Kernel.h:802
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:1113
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:1067
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:525
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:875
virtual bool init_normalizer()
Definition: Kernel.cpp:157
bool optimization_initialized
Definition: Kernel.h:1075
EOptimizationType opt_type
Definition: Kernel.h:1079
void load(CFile *loader)
Definition: Kernel.cpp:635
virtual void load_serializable_post()
Definition: SGObject.cpp:1062
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:1061
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:884
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:834
bool lhs_equals_rhs
lhs
Definition: Kernel.h:1064
int machine_int_t
Definition: common.h:59
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:815
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:1059
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual void save_serializable_pre()
Definition: Kernel.cpp:923
void kernel_cache_cleanup()
Definition: Kernel.cpp:556
virtual void remove_lhs()
Definition: Kernel.cpp:668
int32_t kernel_cache_check(int32_t cacheidx)
Definition: Kernel.h:689
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:851
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:898
A generic Support Vector Machine Interface.
Definition: SVM.h:49
void kernel_cache_reset_lru()
Definition: Kernel.cpp:543
The Kernel base class.
Definition: Kernel.h:158
CKernelNormalizer * normalizer
Definition: Kernel.h:1086
virtual SGVector< float64_t > get_kernel_row(int32_t i)
Definition: Kernel.h:279
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
virtual bool has_features()
Definition: Kernel.h:534
void kernel_cache_init(int32_t size, bool regression_hack=false)
Definition: Kernel.cpp:170
virtual ~CKernel()
Definition: Kernel.cpp:73
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:841
void set_const(T const_elem)
Definition: SGMatrix.cpp:133
float64_t KERNELCACHE_ELEM
Definition: Kernel.h:35
void set_const(T const_elem)
Definition: SGVector.cpp:152
void resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack=false)
Definition: Kernel.cpp:85
virtual EFeatureType get_feature_type() const =0
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation