SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * EXCEPT FOR THE KERNEL CACHING FUNCTIONS WHICH ARE (W) THORSTEN JOACHIMS
3  * COPYRIGHT (C) 1999 UNIVERSITAET DORTMUND - ALL RIGHTS RESERVED
4  *
5  * this program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * Written (W) 1999-2009 Soeren Sonnenburg
11  * Written (W) 1999-2008 Gunnar Raetsch
12  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
13  */
14 
15 #include <shogun/lib/config.h>
16 #include <shogun/lib/common.h>
17 #include <shogun/io/SGIO.h>
18 #include <shogun/io/File.h>
19 #include <shogun/lib/Time.h>
20 #include <shogun/lib/Signal.h>
21 
22 #include <shogun/base/Parallel.h>
23 
24 #include <shogun/kernel/Kernel.h>
27 #include <shogun/base/Parameter.h>
28 
30 
31 #include <string.h>
32 #include <unistd.h>
33 #include <math.h>
34 
35 #ifdef HAVE_PTHREAD
36 #include <pthread.h>
37 #endif
38 
39 using namespace shogun;
40 
42 {
43  init();
45 }
46 
47 CKernel::CKernel(int32_t size) : CSGObject()
48 {
49  init();
50 
51  if (size<10)
52  size=10;
53 
54  cache_size=size;
56 }
57 
58 
59 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
60 {
61  init();
62 
63  if (size<10)
64  size=10;
65 
66  cache_size=size;
67 
69  init(p_lhs, p_rhs);
71 }
72 
74 {
75  if (get_is_initialized())
76  SG_ERROR("Kernel still initialized on destruction.\n")
77 
80 
81  SG_INFO("Kernel deleted (%p).\n", this)
82 }
83 
84 #ifdef USE_SVMLIGHT
85 void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)
86 {
87  if (size<10)
88  size=10;
89 
91  cache_size=size;
92 
93  if (has_features() && get_num_vec_lhs())
94  kernel_cache_init(cache_size, regression_hack);
95 }
96 #endif //USE_SVMLIGHT
97 
98 bool CKernel::init(CFeatures* l, CFeatures* r)
99 {
100  /* make sure that features are not deleted if same ones are used */
101  SG_REF(l);
102  SG_REF(r);
103 
104  //make sure features were indeed supplied
105  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
106  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
107 
108  //make sure features are compatible
111 
112  //remove references to previous features
114 
115  //increase reference counts
116  SG_REF(l);
117  if (l==r)
118  lhs_equals_rhs=true;
119  else // l!=r
120  SG_REF(r);
121 
122  lhs=l;
123  rhs=r;
124 
127 
130 
131  /* unref "safety" refs from beginning */
132  SG_UNREF(r);
133  SG_UNREF(l);
134 
135  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
136  return true;
137 }
138 
140 {
141  SG_REF(n);
142  if (lhs && rhs)
143  n->init(this);
144 
146  normalizer=n;
147 
148  return (normalizer!=NULL);
149 }
150 
152 {
154  return normalizer;
155 }
156 
158 {
159  return normalizer->init(this);
160 }
161 
163 {
165 }
166 
167 #ifdef USE_SVMLIGHT
168 /****************************** Cache handling *******************************/
169 
170 void CKernel::kernel_cache_init(int32_t buffsize, bool regression_hack)
171 {
172  int32_t totdoc=get_num_vec_lhs();
173  if (totdoc<=0)
174  {
175  SG_ERROR("kernel has zero rows: num_lhs=%d num_rhs=%d\n",
177  }
178  uint64_t buffer_size=0;
179  int32_t i;
180 
181  //in regression the additional constraints are made by doubling the training data
182  if (regression_hack)
183  totdoc*=2;
184 
185  buffer_size=((uint64_t) buffsize)*1024*1024/sizeof(KERNELCACHE_ELEM);
186  if (buffer_size>((uint64_t) totdoc)*totdoc)
187  buffer_size=((uint64_t) totdoc)*totdoc;
188 
189  SG_INFO("using a kernel cache of size %lld MB (%lld bytes) for %s Kernel\n", buffer_size*sizeof(KERNELCACHE_ELEM)/1024/1024, buffer_size*sizeof(KERNELCACHE_ELEM), get_name())
190 
191  //make sure it fits in the *signed* KERNELCACHE_IDX type
192  ASSERT(buffer_size < (((uint64_t) 1) << (sizeof(KERNELCACHE_IDX)*8-1)))
193 
194  kernel_cache.index = SG_MALLOC(int32_t, totdoc);
195  kernel_cache.occu = SG_MALLOC(int32_t, totdoc);
196  kernel_cache.lru = SG_MALLOC(int32_t, totdoc);
197  kernel_cache.invindex = SG_MALLOC(int32_t, totdoc);
198  kernel_cache.active2totdoc = SG_MALLOC(int32_t, totdoc);
199  kernel_cache.totdoc2active = SG_MALLOC(int32_t, totdoc);
200  kernel_cache.buffer = SG_MALLOC(KERNELCACHE_ELEM, buffer_size);
201  kernel_cache.buffsize=buffer_size;
202  kernel_cache.max_elems=(int32_t) (kernel_cache.buffsize/totdoc);
203 
204  if(kernel_cache.max_elems>totdoc) {
205  kernel_cache.max_elems=totdoc;
206  }
207 
208  kernel_cache.elems=0; // initialize cache
209  for(i=0;i<totdoc;i++) {
210  kernel_cache.index[i]=-1;
211  kernel_cache.lru[i]=0;
212  }
213  for(i=0;i<totdoc;i++) {
214  kernel_cache.occu[i]=0;
215  kernel_cache.invindex[i]=-1;
216  }
217 
218  kernel_cache.activenum=totdoc;;
219  for(i=0;i<totdoc;i++) {
220  kernel_cache.active2totdoc[i]=i;
221  kernel_cache.totdoc2active[i]=i;
222  }
223 
224  kernel_cache.time=0;
225 }
226 
228  int32_t docnum, int32_t *active2dnum, float64_t *buffer, bool full_line)
229 {
230  int32_t i,j;
231  KERNELCACHE_IDX start;
232 
233  int32_t num_vectors = get_num_vec_lhs();
234  if (docnum>=num_vectors)
235  docnum=2*num_vectors-1-docnum;
236 
237  /* is cached? */
238  if(kernel_cache.index[docnum] != -1)
239  {
240  kernel_cache.lru[kernel_cache.index[docnum]]=kernel_cache.time; /* lru */
241  start=((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[docnum];
242 
243  if (full_line)
244  {
245  for(j=0;j<get_num_vec_lhs();j++)
246  {
247  if(kernel_cache.totdoc2active[j] >= 0)
248  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
249  else
250  buffer[j]=(float64_t) kernel(docnum, j);
251  }
252  }
253  else
254  {
255  for(i=0;(j=active2dnum[i])>=0;i++)
256  {
257  if(kernel_cache.totdoc2active[j] >= 0)
258  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
259  else
260  {
261  int32_t k=j;
262  if (k>=num_vectors)
263  k=2*num_vectors-1-k;
264  buffer[j]=(float64_t) kernel(docnum, k);
265  }
266  }
267  }
268  }
269  else
270  {
271  if (full_line)
272  {
273  for(j=0;j<get_num_vec_lhs();j++)
274  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, j);
275  }
276  else
277  {
278  for(i=0;(j=active2dnum[i])>=0;i++)
279  {
280  int32_t k=j;
281  if (k>=num_vectors)
282  k=2*num_vectors-1-k;
283  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, k);
284  }
285  }
286  }
287 }
288 
289 
290 // Fills cache for the row m
292 {
293  register int32_t j,k,l;
294  register KERNELCACHE_ELEM *cache;
295 
296  int32_t num_vectors = get_num_vec_lhs();
297 
298  if (m>=num_vectors)
299  m=2*num_vectors-1-m;
300 
301  if(!kernel_cache_check(m)) // not cached yet
302  {
303  cache = kernel_cache_clean_and_malloc(m);
304  if(cache) {
305  l=kernel_cache.totdoc2active[m];
306 
307  for(j=0;j<kernel_cache.activenum;j++) // fill cache
308  {
309  k=kernel_cache.active2totdoc[j];
310 
311  if((kernel_cache.index[k] != -1) && (l != -1) && (k != m)) {
312  cache[j]=kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)
313  *kernel_cache.index[k]+l];
314  }
315  else
316  {
317  if (k>=num_vectors)
318  k=2*num_vectors-1-k;
319 
320  cache[j]=kernel(m, k);
321  }
322  }
323  }
324  else
325  perror("Error: Kernel cache full! => increase cache size");
326  }
327 }
328 
329 
330 void* CKernel::cache_multiple_kernel_row_helper(void* p)
331 {
332  int32_t j,k,l;
333  S_KTHREAD_PARAM* params = (S_KTHREAD_PARAM*) p;
334 
335  for (int32_t i=params->start; i<params->end; i++)
336  {
337  KERNELCACHE_ELEM* cache=params->cache[i];
338  int32_t m = params->uncached_rows[i];
339  l=params->kernel_cache->totdoc2active[m];
340 
341  for(j=0;j<params->kernel_cache->activenum;j++) // fill cache
342  {
343  k=params->kernel_cache->active2totdoc[j];
344 
345  if((params->kernel_cache->index[k] != -1) && (l != -1) && (!params->needs_computation[k])) {
346  cache[j]=params->kernel_cache->buffer[((KERNELCACHE_IDX) params->kernel_cache->activenum)
347  *params->kernel_cache->index[k]+l];
348  }
349  else
350  {
351  if (k>=params->num_vectors)
352  k=2*params->num_vectors-1-k;
353 
354  cache[j]=params->kernel->kernel(m, k);
355  }
356  }
357 
358  //now line m is cached
359  params->needs_computation[m]=0;
360  }
361  return NULL;
362 }
363 
364 // Fills cache for the rows in key
365 void CKernel::cache_multiple_kernel_rows(int32_t* rows, int32_t num_rows)
366 {
367 #ifdef HAVE_PTHREAD
368  int32_t nthreads=parallel->get_num_threads();
369 
370  if (nthreads<2)
371  {
372 #endif
373  for(int32_t i=0;i<num_rows;i++)
374  cache_kernel_row(rows[i]);
375 #ifdef HAVE_PTHREAD
376  }
377  else
378  {
379  // fill up kernel cache
380  int32_t* uncached_rows = SG_MALLOC(int32_t, num_rows);
381  KERNELCACHE_ELEM** cache = SG_MALLOC(KERNELCACHE_ELEM*, num_rows);
382  pthread_t* threads = SG_MALLOC(pthread_t, nthreads-1);
383  S_KTHREAD_PARAM* params = SG_MALLOC(S_KTHREAD_PARAM, nthreads-1);
384  int32_t num_threads=nthreads-1;
385  int32_t num_vec=get_num_vec_lhs();
386  ASSERT(num_vec>0)
387  uint8_t* needs_computation=SG_CALLOC(uint8_t, num_vec);
388 
389  int32_t step=0;
390  int32_t num=0;
391  int32_t end=0;
392 
393  // allocate cachelines if necessary
394  for (int32_t i=0; i<num_rows; i++)
395  {
396  int32_t idx=rows[i];
397  if (idx>=num_vec)
398  idx=2*num_vec-1-idx;
399 
400  if (kernel_cache_check(idx))
401  continue;
402 
403  needs_computation[idx]=1;
404  uncached_rows[num]=idx;
405  cache[num]= kernel_cache_clean_and_malloc(idx);
406 
407  if (!cache[num])
408  SG_ERROR("Kernel cache full! => increase cache size\n")
409 
410  num++;
411  }
412 
413  if (num>0)
414  {
415  step= num/nthreads;
416 
417  if (step<1)
418  {
419  num_threads=num-1;
420  step=1;
421  }
422 
423  for (int32_t t=0; t<num_threads; t++)
424  {
425  params[t].kernel = this;
426  params[t].kernel_cache = &kernel_cache;
427  params[t].cache = cache;
428  params[t].uncached_rows = uncached_rows;
429  params[t].needs_computation = needs_computation;
430  params[t].num_uncached = num;
431  params[t].start = t*step;
432  params[t].end = (t+1)*step;
433  params[t].num_vectors = get_num_vec_lhs();
434  end=params[t].end;
435 
436  int code=pthread_create(&threads[t], NULL,
437  CKernel::cache_multiple_kernel_row_helper, (void*)&params[t]);
438 
439  if (code != 0)
440  {
441  SG_WARNING("Thread creation failed (thread %d of %d) "
442  "with error:'%s'\n",t, num_threads, strerror(code));
443  num_threads=t;
444  end=t*step;
445  break;
446  }
447  }
448  }
449  else
450  num_threads=-1;
451 
452 
453  S_KTHREAD_PARAM last_param;
454  last_param.kernel = this;
455  last_param.kernel_cache = &kernel_cache;
456  last_param.cache = cache;
457  last_param.uncached_rows = uncached_rows;
458  last_param.needs_computation = needs_computation;
459  last_param.start = end;
460  last_param.num_uncached = num;
461  last_param.end = num;
462  last_param.num_vectors = get_num_vec_lhs();
463 
464  cache_multiple_kernel_row_helper(&last_param);
465 
466 
467  for (int32_t t=0; t<num_threads; t++)
468  {
469  if (pthread_join(threads[t], NULL) != 0)
470  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
471  }
472 
473  SG_FREE(needs_computation);
474  SG_FREE(params);
475  SG_FREE(threads);
476  SG_FREE(cache);
477  SG_FREE(uncached_rows);
478  }
479 #endif
480 }
481 
482 // remove numshrink columns in the cache
483 // which correspond to examples marked
485  int32_t totdoc, int32_t numshrink, int32_t *after)
486 {
487  ASSERT(totdoc > 0);
488  register int32_t i,j,jj,scount; // 0 in after.
489  KERNELCACHE_IDX from=0,to=0;
490  int32_t *keep;
491 
492  keep=SG_MALLOC(int32_t, totdoc);
493  for(j=0;j<totdoc;j++) {
494  keep[j]=1;
495  }
496  scount=0;
497  for(jj=0;(jj<kernel_cache.activenum) && (scount<numshrink);jj++) {
498  j=kernel_cache.active2totdoc[jj];
499  if(!after[j]) {
500  scount++;
501  keep[j]=0;
502  }
503  }
504 
505  for(i=0;i<kernel_cache.max_elems;i++) {
506  for(jj=0;jj<kernel_cache.activenum;jj++) {
507  j=kernel_cache.active2totdoc[jj];
508  if(!keep[j]) {
509  from++;
510  }
511  else {
512  kernel_cache.buffer[to]=kernel_cache.buffer[from];
513  to++;
514  from++;
515  }
516  }
517  }
518 
519  kernel_cache.activenum=0;
520  for(j=0;j<totdoc;j++) {
521  if((keep[j]) && (kernel_cache.totdoc2active[j] != -1)) {
522  kernel_cache.active2totdoc[kernel_cache.activenum]=j;
523  kernel_cache.totdoc2active[j]=kernel_cache.activenum;
524  kernel_cache.activenum++;
525  }
526  else {
527  kernel_cache.totdoc2active[j]=-1;
528  }
529  }
530 
531  kernel_cache.max_elems= (int32_t) kernel_cache.buffsize;
532 
533  if (kernel_cache.activenum>0)
534  kernel_cache.buffsize/=kernel_cache.activenum;
535 
536  if(kernel_cache.max_elems>totdoc)
537  kernel_cache.max_elems=totdoc;
538 
539  SG_FREE(keep);
540 
541 }
542 
544 {
545  int32_t maxlru=0,k;
546 
547  for(k=0;k<kernel_cache.max_elems;k++) {
548  if(maxlru < kernel_cache.lru[k])
549  maxlru=kernel_cache.lru[k];
550  }
551  for(k=0;k<kernel_cache.max_elems;k++) {
552  kernel_cache.lru[k]-=maxlru;
553  }
554 }
555 
557 {
558  SG_FREE(kernel_cache.index);
559  SG_FREE(kernel_cache.occu);
560  SG_FREE(kernel_cache.lru);
561  SG_FREE(kernel_cache.invindex);
562  SG_FREE(kernel_cache.active2totdoc);
563  SG_FREE(kernel_cache.totdoc2active);
564  SG_FREE(kernel_cache.buffer);
565  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
566 }
567 
568 int32_t CKernel::kernel_cache_malloc()
569 {
570  int32_t i;
571 
573  for(i=0;i<kernel_cache.max_elems;i++) {
574  if(!kernel_cache.occu[i]) {
575  kernel_cache.occu[i]=1;
576  kernel_cache.elems++;
577  return(i);
578  }
579  }
580  }
581  return(-1);
582 }
583 
584 void CKernel::kernel_cache_free(int32_t cacheidx)
585 {
586  kernel_cache.occu[cacheidx]=0;
587  kernel_cache.elems--;
588 }
589 
590 // remove least recently used cache
591 // element
592 int32_t CKernel::kernel_cache_free_lru()
593 {
594  register int32_t k,least_elem=-1,least_time;
595 
596  least_time=kernel_cache.time+1;
597  for(k=0;k<kernel_cache.max_elems;k++) {
598  if(kernel_cache.invindex[k] != -1) {
599  if(kernel_cache.lru[k]<least_time) {
600  least_time=kernel_cache.lru[k];
601  least_elem=k;
602  }
603  }
604  }
605 
606  if(least_elem != -1) {
607  kernel_cache_free(least_elem);
608  kernel_cache.index[kernel_cache.invindex[least_elem]]=-1;
609  kernel_cache.invindex[least_elem]=-1;
610  return(1);
611  }
612  return(0);
613 }
614 
615 // Get a free cache entry. In case cache is full, the lru
616 // element is removed.
617 KERNELCACHE_ELEM* CKernel::kernel_cache_clean_and_malloc(int32_t cacheidx)
618 {
619  int32_t result;
620  if((result = kernel_cache_malloc()) == -1) {
621  if(kernel_cache_free_lru()) {
622  result = kernel_cache_malloc();
623  }
624  }
625  kernel_cache.index[cacheidx]=result;
626  if(result == -1) {
627  return(0);
628  }
629  kernel_cache.invindex[result]=cacheidx;
630  kernel_cache.lru[kernel_cache.index[cacheidx]]=kernel_cache.time; // lru
631  return &kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[cacheidx]];
632 }
633 #endif //USE_SVMLIGHT
634 
635 void CKernel::load(CFile* loader)
636 {
639 }
640 
641 void CKernel::save(CFile* writer)
642 {
643  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
645  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
647 }
648 
650 {
651  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
652  if (rhs!=lhs)
653  SG_UNREF(rhs);
654  rhs = NULL;
655  num_rhs=0;
656 
657  SG_UNREF(lhs);
658  lhs = NULL;
659  num_lhs=0;
660  lhs_equals_rhs=false;
661 
662 #ifdef USE_SVMLIGHT
663  cache_reset();
664 #endif //USE_SVMLIGHT
665  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
666 }
667 
669 {
670  if (rhs==lhs)
671  rhs=NULL;
672  SG_UNREF(lhs);
673  lhs = NULL;
674  num_lhs=0;
675  lhs_equals_rhs=false;
676 #ifdef USE_SVMLIGHT
677  cache_reset();
678 #endif //USE_SVMLIGHT
679 }
680 
683 {
684  if (rhs!=lhs)
685  SG_UNREF(rhs);
686  rhs = NULL;
687  num_rhs=0;
688  lhs_equals_rhs=false;
689 
690 #ifdef USE_SVMLIGHT
691  cache_reset();
692 #endif //USE_SVMLIGHT
693 }
694 
695 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
696 
698 {
699  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
701  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
702  "SLOWBUTMEMEFFICIENT");
703 
704  switch (get_kernel_type())
705  {
765  }
766 
767  switch (get_feature_class())
768  {
779  ENUM_CASE(C_WD)
790  }
791 
792  switch (get_feature_type())
793  {
808  }
809  SG_INFO("\n")
810 }
811 #undef ENUM_CASE
812 
814  int32_t count, int32_t *IDX, float64_t * weights)
815 {
816  SG_ERROR("kernel does not support linadd optimization\n")
817  return false ;
818 }
819 
821 {
822  SG_ERROR("kernel does not support linadd optimization\n")
823  return false;
824 }
825 
827 {
828  SG_ERROR("kernel does not support linadd optimization\n")
829  return 0;
830 }
831 
833  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
834  int32_t* IDX, float64_t* weights, float64_t factor)
835 {
836  SG_ERROR("kernel does not support batch computation\n")
837 }
838 
839 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
840 {
841  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
842 }
843 
845 {
846  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
847 }
848 
850 {
851  return 1;
852 }
853 
855  int32_t vector_idx, float64_t * subkernel_contrib)
856 {
857  SG_ERROR("kernel compute_by_subkernel not implemented\n")
858 }
859 
860 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
861 {
862  num_weights=1 ;
863  return &combined_kernel_weight ;
864 }
865 
867 {
868  int num_weights = 1;
869  const float64_t* weight = get_subkernel_weights(num_weights);
870  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
871 }
872 
874 {
875  ASSERT(weights.vector)
876  if (weights.vlen!=1)
877  SG_ERROR("number of subkernel weights should be one ...\n")
878 
879  combined_kernel_weight = weights.vector[0] ;
880 }
881 
883 {
884  if (kernel)
885  {
886  CKernel* casted=dynamic_cast<CKernel*>(kernel);
887  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
888  " of class \"%s\" is not a subclass of CKernel!\n",
889  kernel->get_name());
890  return casted;
891  }
892  else
893  return NULL;
894 }
895 
897 {
898  int32_t num_suppvec=svm->get_num_support_vectors();
899  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
900  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
901 
902  for (int32_t i=0; i<num_suppvec; i++)
903  {
904  sv_idx[i] = svm->get_support_vector(i);
905  sv_weight[i] = svm->get_alpha(i);
906  }
907  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
908 
909  SG_FREE(sv_idx);
910  SG_FREE(sv_weight);
911  return ret;
912 }
913 
915 {
917  if (lhs_equals_rhs)
918  rhs=lhs;
919 }
920 
922 {
924 
925  if (lhs_equals_rhs)
926  rhs=NULL;
927 }
928 
930 {
932 
933  if (lhs_equals_rhs)
934  rhs=lhs;
935 }
936 
938  SG_ADD(&cache_size, "cache_size",
939  "Cache size in MB.", MS_NOT_AVAILABLE);
940  SG_ADD((CSGObject**) &lhs, "lhs",
941  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
942  SG_ADD((CSGObject**) &rhs, "rhs",
943  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
944  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
945  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
946  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
948  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
950  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
951  "Combined kernel weight.", MS_AVAILABLE);
952  SG_ADD(&optimization_initialized, "optimization_initialized",
953  "Optimization is initialized.", MS_NOT_AVAILABLE);
954  SG_ADD((machine_int_t*) &opt_type, "opt_type",
955  "Optimization type.", MS_NOT_AVAILABLE);
956  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
957  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
958  MS_AVAILABLE);
959 }
960 
961 
962 void CKernel::init()
963 {
964  cache_size=10;
965  kernel_matrix=NULL;
966  lhs=NULL;
967  rhs=NULL;
968  num_lhs=0;
969  num_rhs=0;
970  lhs_equals_rhs=false;
975  normalizer=NULL;
976 
977 #ifdef USE_SVMLIGHT
978  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
979 #endif //USE_SVMLIGHT
980 
982 }
983 
984 namespace shogun
985 {
987 template <class T> struct K_THREAD_PARAM
988 {
992  int32_t start;
994  int32_t end;
996  int64_t total_start;
998  int64_t total_end;
1000  int32_t m;
1002  int32_t n;
1008  bool verbose;
1009 };
1010 }
1011 
1012 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
1013 {
1014  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
1015  int32_t i_start=params->start;
1016  int32_t i_end=params->end;
1017  CKernel* k=params->kernel;
1018  T* result=params->result;
1019  bool symmetric=params->symmetric;
1020  int32_t n=params->n;
1021  int32_t m=params->m;
1022  bool verbose=params->verbose;
1023  int64_t total_start=params->total_start;
1024  int64_t total_end=params->total_end;
1025  int64_t total=total_start;
1026 
1027  for (int32_t i=i_start; i<i_end; i++)
1028  {
1029  int32_t j_start=0;
1030 
1031  if (symmetric)
1032  j_start=i;
1033 
1034  for (int32_t j=j_start; j<n; j++)
1035  {
1036  float64_t v=k->kernel(i,j);
1037  result[i+j*m]=v;
1038 
1039  if (symmetric && i!=j)
1040  result[j+i*m]=v;
1041 
1042  if (verbose)
1043  {
1044  total++;
1045 
1046  if (symmetric && i!=j)
1047  total++;
1048 
1049  if (total%100 == 0)
1050  SG_OBJ_PROGRESS(k, total, total_start, total_end)
1051 
1053  break;
1054  }
1055  }
1056 
1057  }
1058 
1059  return NULL;
1060 }
1061 
1062 template <class T>
1064 {
1065  T* result = NULL;
1066 
1067  REQUIRE(has_features(), "no features assigned to kernel\n")
1068 
1069  int32_t m=get_num_vec_lhs();
1070  int32_t n=get_num_vec_rhs();
1071 
1072  int64_t total_num = int64_t(m)*n;
1073 
1074  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
1075  bool symmetric= (lhs && lhs==rhs && m==n);
1076 
1077  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
1078 
1079  result=SG_MALLOC(T, total_num);
1080 
1081  int32_t num_threads=parallel->get_num_threads();
1082  if (num_threads < 2)
1083  {
1084  K_THREAD_PARAM<T> params;
1085  params.kernel=this;
1086  params.result=result;
1087  params.start=0;
1088  params.end=m;
1089  params.total_start=0;
1090  params.total_end=total_num;
1091  params.n=n;
1092  params.m=m;
1093  params.symmetric=symmetric;
1094  params.verbose=true;
1095  get_kernel_matrix_helper<T>((void*) &params);
1096  }
1097  else
1098  {
1099  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
1100  K_THREAD_PARAM<T>* params = SG_MALLOC(K_THREAD_PARAM<T>, num_threads);
1101  int64_t step= total_num/num_threads;
1102 
1103  int32_t t;
1104 
1105  num_threads--;
1106  for (t=0; t<num_threads; t++)
1107  {
1108  params[t].kernel = this;
1109  params[t].result = result;
1110  params[t].start = compute_row_start(t*step, n, symmetric);
1111  params[t].end = compute_row_start((t+1)*step, n, symmetric);
1112  params[t].total_start=t*step;
1113  params[t].total_end=(t+1)*step;
1114  params[t].n=n;
1115  params[t].m=m;
1116  params[t].symmetric=symmetric;
1117  params[t].verbose=false;
1118 
1119  int code=pthread_create(&threads[t], NULL,
1120  CKernel::get_kernel_matrix_helper<T>, (void*)&params[t]);
1121 
1122  if (code != 0)
1123  {
1124  SG_WARNING("Thread creation failed (thread %d of %d) "
1125  "with error:'%s'\n",t, num_threads, strerror(code));
1126  num_threads=t;
1127  break;
1128  }
1129  }
1130 
1131  params[t].kernel = this;
1132  params[t].result = result;
1133  params[t].start = compute_row_start(t*step, n, symmetric);
1134  params[t].end = m;
1135  params[t].total_start=t*step;
1136  params[t].total_end=total_num;
1137  params[t].n=n;
1138  params[t].m=m;
1139  params[t].symmetric=symmetric;
1140  params[t].verbose=true;
1141  get_kernel_matrix_helper<T>(&params[t]);
1142 
1143  for (t=0; t<num_threads; t++)
1144  {
1145  if (pthread_join(threads[t], NULL) != 0)
1146  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
1147  }
1148 
1149  SG_FREE(params);
1150  SG_FREE(threads);
1151  }
1152 
1153  SG_DONE()
1154 
1155  return SGMatrix<T>(result,m,n,true);
1156 }
1157 
1160 
1161 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
1162 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);

SHOGUN Machine Learning Toolbox - Documentation