SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
QuadraticTimeMMD.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2012-2013 Heiko Strathmann
4  * Written (w) 2014 Soumyajit De
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice, this
11  * list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * The views and conclusions contained in the software and documentation are those
28  * of the authors and should not be interpreted as representing official policies,
29  * either expressed or implied, of the Shogun Development Team.
30  */
31 
36 #include <shogun/kernel/Kernel.h>
39 
40 using namespace shogun;
41 
43 
44 using namespace Eigen;
45 
47 {
48  init();
49 }
50 
52  index_t m) :
53  CKernelTwoSampleTest(kernel, p_and_q, m)
54 {
55  init();
56 }
57 
59  CFeatures* q) : CKernelTwoSampleTest(kernel, p, q)
60 {
61  init();
62 }
63 
65  CKernelTwoSampleTest(custom_kernel, NULL, m)
66 {
67  init();
68 }
69 
71 {
72 }
73 
74 void CQuadraticTimeMMD::init()
75 {
76  SG_ADD(&m_num_samples_spectrum, "num_samples_spectrum", "Number of samples"
77  " for spectrum method null-distribution approximation",
79  SG_ADD(&m_num_eigenvalues_spectrum, "num_eigenvalues_spectrum", "Number of "
80  " Eigenvalues for spectrum method null-distribution approximation",
82  SG_ADD((machine_int_t*)&m_statistic_type, "statistic_type",
83  "Biased or unbiased MMD", MS_NOT_AVAILABLE);
84 
88 }
89 
91  int m, int n)
92 {
93  SG_DEBUG("Entering!\n");
94 
95  /* init kernel with features. NULL check is handled in compute_statistic */
97 
98  /* computing kernel values and their sums on the fly that are used both in
99  computing statistic and variance */
100 
101  /* the following matrix stores row-wise sum of kernel values k(X,X') in
102  the first column and row-wise squared sum of kernel values k^2(X,X')
103  in the second column. m entries in both column */
104  SGMatrix<float64_t> xx_sum_sq_sum_rowwise=m_kernel->
105  row_wise_sum_squared_sum_symmetric_block(0, m);
106 
107  /* row-wise sum of kernel values k(Y,Y'), n entries */
108  SGVector<float64_t> yy_sum_rowwise=m_kernel->
109  row_wise_sum_symmetric_block(m, n);
110 
111  /* row-wise and col-wise sum of kernel values k(X,Y), m+n entries
112  first m entries are row-wise sum, rest n entries are col-wise sum */
113  SGVector<float64_t> xy_sum_rowcolwise=m_kernel->
114  row_col_wise_sum_block(0, m, m, n);
115 
116  /* computing overall sum and squared sum from above for convenience */
117 
118  SGVector<float64_t> xx_sum_rowwise(m);
119  std::copy(xx_sum_sq_sum_rowwise.matrix, xx_sum_sq_sum_rowwise.matrix+m,
120  xx_sum_rowwise.vector);
121 
122  SGVector<float64_t> xy_sum_rowwise(m);
123  std::copy(xy_sum_rowcolwise.vector, xy_sum_rowcolwise.vector+m,
124  xy_sum_rowwise.vector);
125 
126  SGVector<float64_t> xy_sum_colwise(n);
127  std::copy(xy_sum_rowcolwise.vector+m, xy_sum_rowcolwise.vector+m+n,
128  xy_sum_colwise.vector);
129 
130  float64_t xx_sq_sum=0.0;
131  for (index_t i=0; i<xx_sum_sq_sum_rowwise.num_rows; i++)
132  xx_sq_sum+=xx_sum_sq_sum_rowwise(i, 1);
133 
134  float64_t xx_sum=0.0;
135  for (index_t i=0; i<xx_sum_rowwise.vlen; i++)
136  xx_sum+=xx_sum_rowwise[i];
137 
138  float64_t yy_sum=0.0;
139  for (index_t i=0; i<yy_sum_rowwise.vlen; i++)
140  yy_sum+=yy_sum_rowwise[i];
141 
142  float64_t xy_sum=0.0;
143  for (index_t i=0; i<xy_sum_rowwise.vlen; i++)
144  xy_sum+=xy_sum_rowwise[i];
145 
146  /* compute statistic */
147 
148  /* split computations into three terms from JLMR paper (see documentation )*/
149 
150  /* first term */
151  float64_t first=xx_sum/m/(m-1);
152 
153  /* second term */
154  float64_t second=yy_sum/n/(n-1);
155 
156  /* third term */
157  float64_t third=2.0*xy_sum/m/n;
158 
159  float64_t statistic=first+second-third;
160 
161  SG_INFO("Computed statistic!\n");
162  SG_DEBUG("first=%f, second=%f, third=%f\n", first, second, third);
163 
164  /* compute variance under null */
165 
166  /* split computations into three terms (see documentation) */
167 
168  /* first term */
169  float64_t kappa_0=CMath::sq(xx_sum/m/(m-1));
170 
171  /* second term */
172  float64_t kappa_1=0.0;
173  for (index_t i=0; i<m; ++i)
174  kappa_1+=CMath::sq(xx_sum_rowwise[i]/(m-1));
175  kappa_1/=m;
176 
177  /* third term */
178  float64_t kappa_2=xx_sq_sum/m/(m-1);
179 
180  float64_t var_null=2*(kappa_0-2*kappa_1+kappa_2);
181 
182  SG_INFO("Computed variance under null!\n");
183  SG_DEBUG("kappa_0=%f, kappa_1=%f, kappa_2=%f\n", kappa_0, kappa_1, kappa_2);
184 
185  /* compute variance under alternative */
186 
187  /* split computations into four terms (see documentation) */
188 
189  /* first term */
190  float64_t alt_var_first=0.0;
191  for (index_t i=0; i<m; ++i)
192  {
193  // use row-wise sum from k(X,X') and k(X,Y) blocks
194  float64_t term=xx_sum_rowwise[i]/(m-1)-xy_sum_rowwise[i]/n;
195  alt_var_first+=CMath::sq(term);
196  }
197  alt_var_first/=m;
198 
199  /* second term */
200  float64_t alt_var_second=CMath::sq(xx_sum/m/(m-1)-xy_sum/m/n);
201 
202  /* third term */
203  float64_t alt_var_third=0.0;
204  for (index_t i=0; i<n; ++i)
205  {
206  // use row-wise sum from k(Y,Y') and col-wise sum from k(X,Y)
207  // blocks to simulate row-wise sum from k(Y,X) blocks
208  float64_t term=yy_sum_rowwise[i]/(n-1)-xy_sum_colwise[i]/m;
209  alt_var_third+=CMath::sq(term);
210  }
211  alt_var_third/=n;
212 
213  /* fourth term */
214  float64_t alt_var_fourth=CMath::sq(yy_sum/n/(n-1)-xy_sum/m/n);
215 
216  /* finally computing variance */
217  float64_t rho_x=float64_t(m)/(m+n);
218  float64_t rho_y=float64_t(n)/(m+n);
219 
220  float64_t var_alt=4*rho_x*(alt_var_first-alt_var_second)+
221  4*rho_y*(alt_var_third-alt_var_fourth);
222 
223  SG_INFO("Computed variance under alternative!\n");
224  SG_DEBUG("first=%f, second=%f, third=%f, fourth=%f\n", alt_var_first,
225  alt_var_second, alt_var_third, alt_var_fourth);
226 
227  SGVector<float64_t> results(3);
228  results[0]=statistic;
229  results[1]=var_null;
230  results[2]=var_alt;
231 
232  SG_DEBUG("Leaving!\n");
233 
234  return results;
235 }
236 
238 {
239  SG_DEBUG("Entering!\n");
240 
241  /* init kernel with features. NULL check is handled in compute_statistic */
243 
244  /* computing kernel values and their sums on the fly that are used both in
245  computing statistic and variance */
246 
247  /* the following matrix stores row-wise sum of kernel values k(X,X') in
248  the first column and row-wise squared sum of kernel values k^2(X,X')
249  in the second column. m entries in both column */
250  SGMatrix<float64_t> xx_sum_sq_sum_rowwise=m_kernel->
251  row_wise_sum_squared_sum_symmetric_block(0, m, false);
252 
253  /* row-wise sum of kernel values k(Y,Y'), n entries */
254  SGVector<float64_t> yy_sum_rowwise=m_kernel->
255  row_wise_sum_symmetric_block(m, n, false);
256 
257  /* row-wise and col-wise sum of kernel values k(X,Y), m+n entries
258  first m entries are row-wise sum, rest n entries are col-wise sum */
259  SGVector<float64_t> xy_sum_rowcolwise=m_kernel->
260  row_col_wise_sum_block(0, m, m, n);
261 
262  /* computing overall sum and squared sum from above for convenience */
263 
264  SGVector<float64_t> xx_sum_rowwise(m);
265  std::copy(xx_sum_sq_sum_rowwise.matrix, xx_sum_sq_sum_rowwise.matrix+m,
266  xx_sum_rowwise.vector);
267 
268  SGVector<float64_t> xy_sum_rowwise(m);
269  std::copy(xy_sum_rowcolwise.vector, xy_sum_rowcolwise.vector+m,
270  xy_sum_rowwise.vector);
271 
272  SGVector<float64_t> xy_sum_colwise(n);
273  std::copy(xy_sum_rowcolwise.vector+m, xy_sum_rowcolwise.vector+m+n,
274  xy_sum_colwise.vector);
275 
276  float64_t xx_sq_sum=0.0;
277  for (index_t i=0; i<xx_sum_sq_sum_rowwise.num_rows; i++)
278  xx_sq_sum+=xx_sum_sq_sum_rowwise(i, 1);
279 
280  float64_t xx_sum=0.0;
281  for (index_t i=0; i<xx_sum_rowwise.vlen; i++)
282  xx_sum+=xx_sum_rowwise[i];
283 
284  float64_t yy_sum=0.0;
285  for (index_t i=0; i<yy_sum_rowwise.vlen; i++)
286  yy_sum+=yy_sum_rowwise[i];
287 
288  float64_t xy_sum=0.0;
289  for (index_t i=0; i<xy_sum_rowwise.vlen; i++)
290  xy_sum+=xy_sum_rowwise[i];
291 
292  /* compute statistic */
293 
294  /* split computations into three terms from JLMR paper (see documentation )*/
295 
296  /* first term */
297  float64_t first=xx_sum/m/m;
298 
299  /* second term */
300  float64_t second=yy_sum/n/n;
301 
302  /* third term */
303  float64_t third=2.0*xy_sum/m/n;
304 
305  float64_t statistic=first+second-third;
306 
307  SG_INFO("Computed statistic!\n");
308  SG_DEBUG("first=%f, second=%f, third=%f\n", first, second, third);
309 
310  /* compute variance under null */
311 
312  /* split computations into three terms (see documentation) */
313 
314  /* first term */
315  float64_t kappa_0=CMath::sq(xx_sum/m/m);
316 
317  /* second term */
318  float64_t kappa_1=0.0;
319  for (index_t i=0; i<m; ++i)
320  kappa_1+=CMath::sq(xx_sum_rowwise[i]/m);
321  kappa_1/=m;
322 
323  /* third term */
324  float64_t kappa_2=xx_sq_sum/m/m;
325 
326  float64_t var_null=2*(kappa_0-2*kappa_1+kappa_2);
327 
328  SG_INFO("Computed variance under null!\n");
329  SG_DEBUG("kappa_0=%f, kappa_1=%f, kappa_2=%f\n", kappa_0, kappa_1, kappa_2);
330 
331  /* compute variance under alternative */
332 
333  /* split computations into four terms (see documentation) */
334 
335  /* first term */
336  float64_t alt_var_first=0.0;
337  for (index_t i=0; i<m; ++i)
338  {
339  // use row-wise sum from k(X,X') and k(X,Y) blocks
340  float64_t term=xx_sum_rowwise[i]/m-xy_sum_rowwise[i]/n;
341  alt_var_first+=CMath::sq(term);
342  }
343  alt_var_first/=m;
344 
345  /* second term */
346  float64_t alt_var_second=CMath::sq(xx_sum/m/m-xy_sum/m/n);
347 
348  /* third term */
349  float64_t alt_var_third=0.0;
350  for (index_t i=0; i<n; ++i)
351  {
352  // use row-wise sum from k(Y,Y') and col-wise sum from k(X,Y)
353  // blocks to simulate row-wise sum from k(Y,X) blocks
354  float64_t term=yy_sum_rowwise[i]/n-xy_sum_colwise[i]/m;
355  alt_var_third+=CMath::sq(term);
356  }
357  alt_var_third/=n;
358 
359  /* fourth term */
360  float64_t alt_var_fourth=CMath::sq(yy_sum/n/n-xy_sum/m/n);
361 
362  /* finally computing variance */
363  float64_t rho_x=float64_t(m)/(m+n);
364  float64_t rho_y=float64_t(n)/(m+n);
365 
366  float64_t var_alt=4*rho_x*(alt_var_first-alt_var_second)+
367  4*rho_y*(alt_var_third-alt_var_fourth);
368 
369  SG_INFO("Computed variance under alternative!\n");
370  SG_DEBUG("first=%f, second=%f, third=%f, fourth=%f\n", alt_var_first,
371  alt_var_second, alt_var_third, alt_var_fourth);
372 
373  SGVector<float64_t> results(3);
374  results[0]=statistic;
375  results[1]=var_null;
376  results[2]=var_alt;
377 
378  SG_DEBUG("Leaving!\n");
379 
380  return results;
381 }
382 
384 {
385  SG_DEBUG("Entering!\n");
386 
387  /* init kernel with features. NULL check is handled in compute_statistic */
389 
390  /* computing kernel values and their sums on the fly that are used both in
391  computing statistic and variance */
392 
393  /* the following matrix stores row-wise sum of kernel values k(X,X') in
394  the first column and row-wise squared sum of kernel values k^2(X,X')
395  in the second column. n entries in both column */
396  SGMatrix<float64_t> xx_sum_sq_sum_rowwise=m_kernel->
397  row_wise_sum_squared_sum_symmetric_block(0, n);
398 
399  /* row-wise sum of kernel values k(Y,Y'), n entries */
400  SGVector<float64_t> yy_sum_rowwise=m_kernel->
401  row_wise_sum_symmetric_block(n, n);
402 
403  /* row-wise and col-wise sum of kernel values k(X,Y), 2n entries
404  first n entries are row-wise sum, rest n entries are col-wise sum */
405  SGVector<float64_t> xy_sum_rowcolwise=m_kernel->
406  row_col_wise_sum_block(0, n, n, n, true);
407 
408  /* computing overall sum and squared sum from above for convenience */
409 
410  SGVector<float64_t> xx_sum_rowwise(n);
411  std::copy(xx_sum_sq_sum_rowwise.matrix, xx_sum_sq_sum_rowwise.matrix+n,
412  xx_sum_rowwise.vector);
413 
414  SGVector<float64_t> xy_sum_rowwise(n);
415  std::copy(xy_sum_rowcolwise.vector, xy_sum_rowcolwise.vector+n,
416  xy_sum_rowwise.vector);
417 
418  SGVector<float64_t> xy_sum_colwise(n);
419  std::copy(xy_sum_rowcolwise.vector+n, xy_sum_rowcolwise.vector+2*n,
420  xy_sum_colwise.vector);
421 
422  float64_t xx_sq_sum=0.0;
423  for (index_t i=0; i<xx_sum_sq_sum_rowwise.num_rows; i++)
424  xx_sq_sum+=xx_sum_sq_sum_rowwise(i, 1);
425 
426  float64_t xx_sum=0.0;
427  for (index_t i=0; i<xx_sum_rowwise.vlen; i++)
428  xx_sum+=xx_sum_rowwise[i];
429 
430  float64_t yy_sum=0.0;
431  for (index_t i=0; i<yy_sum_rowwise.vlen; i++)
432  yy_sum+=yy_sum_rowwise[i];
433 
434  float64_t xy_sum=0.0;
435  for (index_t i=0; i<xy_sum_rowwise.vlen; i++)
436  xy_sum+=xy_sum_rowwise[i];
437 
438  /* compute statistic */
439 
440  /* split computations into three terms from JLMR paper (see documentation )*/
441 
442  /* first term */
443  float64_t first=xx_sum/n/(n-1);
444 
445  /* second term */
446  float64_t second=yy_sum/n/(n-1);
447 
448  /* third term */
449  float64_t third=2.0*xy_sum/n/(n-1);
450 
451  float64_t statistic=first+second-third;
452 
453  SG_INFO("Computed statistic!\n");
454  SG_DEBUG("first=%f, second=%f, third=%f\n", first, second, third);
455 
456  /* compute variance under null */
457 
458  /* split computations into three terms (see documentation) */
459 
460  /* first term */
461  float64_t kappa_0=CMath::sq(xx_sum/n/(n-1));
462 
463  /* second term */
464  float64_t kappa_1=0.0;
465  for (index_t i=0; i<n; ++i)
466  kappa_1+=CMath::sq(xx_sum_rowwise[i]/(n-1));
467  kappa_1/=n;
468 
469  /* third term */
470  float64_t kappa_2=xx_sq_sum/n/(n-1);
471 
472  float64_t var_null=2*(kappa_0-2*kappa_1+kappa_2);
473 
474  SG_INFO("Computed variance under null!\n");
475  SG_DEBUG("kappa_0=%f, kappa_1=%f, kappa_2=%f\n", kappa_0, kappa_1, kappa_2);
476 
477  /* compute variance under alternative */
478 
479  /* split computations into four terms (see documentation) */
480 
481  /* first term */
482  float64_t alt_var_first=0.0;
483  for (index_t i=0; i<n; ++i)
484  {
485  // use row-wise sum from k(X,X') and k(X,Y) blocks
486  float64_t term=(xx_sum_rowwise[i]-xy_sum_rowwise[i])/(n-1);
487  alt_var_first+=CMath::sq(term);
488  }
489  alt_var_first/=n;
490 
491  /* second term */
492  float64_t alt_var_second=CMath::sq(xx_sum/n/(n-1)-xy_sum/n/(n-1));
493 
494  /* third term */
495  float64_t alt_var_third=0.0;
496  for (index_t i=0; i<n; ++i)
497  {
498  // use row-wise sum from k(Y,Y') and col-wise sum from k(X,Y)
499  // blocks to simulate row-wise sum from k(Y,X) blocks
500  float64_t term=(yy_sum_rowwise[i]-xy_sum_colwise[i])/(n-1);
501  alt_var_third+=CMath::sq(term);
502  }
503  alt_var_third/=n;
504 
505  /* fourth term */
506  float64_t alt_var_fourth=CMath::sq(yy_sum/n/(n-1)-xy_sum/n/(n-1));
507 
508  /* finally computing variance */
509  float64_t rho_x=0.5;
510  float64_t rho_y=0.5;
511 
512  float64_t var_alt=4*rho_x*(alt_var_first-alt_var_second)+
513  4*rho_y*(alt_var_third-alt_var_fourth);
514 
515  SG_INFO("Computed variance under alternative!\n");
516  SG_DEBUG("first=%f, second=%f, third=%f, fourth=%f\n", alt_var_first,
517  alt_var_second, alt_var_third, alt_var_fourth);
518 
519  SGVector<float64_t> results(3);
520  results[0]=statistic;
521  results[1]=var_null;
522  results[2]=var_alt;
523 
524  SG_DEBUG("Leaving!\n");
525 
526  return results;
527 }
528 
530 {
531  return compute_unbiased_statistic_variance(m, n)[0];
532 }
533 
535 {
536  return compute_biased_statistic_variance(m, n)[0];
537 }
538 
540 {
542 }
543 
545 {
546  REQUIRE(m_kernel, "No kernel specified!\n")
547 
548  index_t m=m_m;
549  index_t n=0;
550 
551  /* check if kernel is precomputed (custom kernel) */
553  n=m_kernel->get_num_vec_lhs()-m;
554  else
555  {
556  REQUIRE(m_p_and_q, "The samples are not initialized!\n");
557  n=m_p_and_q->get_num_vectors()-m;
558  }
559 
560  SG_DEBUG("Computing MMD with %d samples from p and %d samples from q!\n",
561  m, n);
562 
563  float64_t result=0;
564  switch (m_statistic_type)
565  {
566  case UNBIASED:
567  result=compute_unbiased_statistic(m, n);
568  result*=m*n/float64_t(m+n);
569  break;
570  case UNBIASED_DEPRECATED:
571  result=compute_unbiased_statistic(m, n);
572  result*=m==n ? m : (m+n);
573  break;
574  case BIASED:
575  result=compute_biased_statistic(m, n);
576  result*=m*n/float64_t(m+n);
577  break;
578  case BIASED_DEPRECATED:
579  result=compute_biased_statistic(m, n);
580  result*=m==n? m : (m+n);
581  break;
582  case INCOMPLETE:
583  REQUIRE(m==n, "Only possible with equal number of samples from both"
584  "distribution!\n")
586  result*=n/2;
587  break;
588  default:
589  SG_ERROR("Unknown statistic type!\n");
590  break;
591  }
592 
593  return result;
594 }
595 
597 {
598  REQUIRE(m_kernel, "No kernel specified!\n")
599 
600  index_t m=m_m;
601  index_t n=0;
602 
603  /* check if kernel is precomputed (custom kernel) */
605  n=m_kernel->get_num_vec_lhs()-m;
606  else
607  {
608  REQUIRE(m_p_and_q, "The samples are not initialized!\n");
609  n=m_p_and_q->get_num_vectors()-m;
610  }
611 
612  SG_DEBUG("Computing MMD with %d samples from p and %d samples from q!\n",
613  m, n);
614 
615  SGVector<float64_t> result(2);
616  switch (m_statistic_type)
617  {
618  case UNBIASED:
619  case UNBIASED_DEPRECATED:
620  {
622  result[0]=res[1];
623  result[1]=res[2];
624  break;
625  }
626  case BIASED:
627  case BIASED_DEPRECATED:
628  {
630  result[0]=res[1];
631  result[1]=res[2];
632  break;
633  }
634  case INCOMPLETE:
635  {
636  REQUIRE(m==n, "Only possible with equal number of samples from both"
637  "distribution!\n")
639  result[0]=res[1];
640  result[1]=res[2];
641  break;
642  }
643  default:
644  SG_ERROR("Unknown statistic type!\n");
645  break;
646  }
647 
648  return result;
649 }
650 
652 {
653  return compute_variance()[0];
654 }
655 
657 {
658  return compute_variance()[1];
659 }
660 
662 {
663  SGVector<float64_t> mmds;
664  if (!multiple_kernels)
665  {
666  /* just one mmd result */
667  mmds=SGVector<float64_t>(1);
668  mmds[0]=compute_statistic();
669  }
670  else
671  {
672  REQUIRE(m_kernel, "No kernel specified!\n")
674  "multiple kernels specified, but underlying kernel is not of type "
675  "K_COMBINED\n");
676 
677  /* cast and allocate memory for results */
679  SG_REF(combined);
680  mmds=SGVector<float64_t>(combined->get_num_subkernels());
681 
682  /* iterate through all kernels and compute statistic */
683  /* TODO this might be done in parallel */
684  for (index_t i=0; i<mmds.vlen; ++i)
685  {
686  CKernel* current=combined->get_kernel(i);
687  /* temporarily replace underlying kernel and compute statistic */
688  m_kernel=current;
689  mmds[i]=compute_statistic();
690 
691  SG_UNREF(current);
692  }
693 
694  /* restore combined kernel */
695  m_kernel=combined;
696  SG_UNREF(combined);
697  }
698 
699  return mmds;
700 }
701 
703 {
704  SGMatrix<float64_t> vars;
705  if (!multiple_kernels)
706  {
707  /* just one mmd result */
708  vars=SGMatrix<float64_t>(1, 2);
710  vars(0, 0)=result[0];
711  vars(0, 1)=result[1];
712  }
713  else
714  {
715  REQUIRE(m_kernel, "No kernel specified!\n")
717  "multiple kernels specified, but underlying kernel is not of type "
718  "K_COMBINED\n");
719 
720  /* cast and allocate memory for results */
722  SG_REF(combined);
723  vars=SGMatrix<float64_t>(combined->get_num_subkernels(), 2);
724 
725  /* iterate through all kernels and compute variance */
726  /* TODO this might be done in parallel */
727  for (index_t i=0; i<vars.num_rows; ++i)
728  {
729  CKernel* current=combined->get_kernel(i);
730  /* temporarily replace underlying kernel and compute variance */
731  m_kernel=current;
733  vars(i, 0)=result[0];
734  vars(i, 1)=result[1];
735 
736  SG_UNREF(current);
737  }
738 
739  /* restore combined kernel */
740  m_kernel=combined;
741  SG_UNREF(combined);
742  }
743 
744  return vars;
745 }
746 
748 {
749  float64_t result=0;
750 
752  {
753  case MMD2_SPECTRUM:
754  {
755  /* get samples from null-distribution and compute p-value of statistic */
758  CMath::qsort(null_samples);
759  index_t pos=null_samples.find_position_to_insert(statistic);
760  result=1.0-((float64_t)pos)/null_samples.vlen;
761  break;
762  }
763 
765  {
766  /* get samples from null-distribution and compute p-value of statistic */
769  CMath::qsort(null_samples);
770  index_t pos=null_samples.find_position_to_insert(statistic);
771  result=1.0-((float64_t)pos)/null_samples.vlen;
772  break;
773  }
774 
775  case MMD2_GAMMA:
776  {
777  /* fit gamma and return cdf at statistic */
779  result=CStatistics::gamma_cdf(statistic, params[0], params[1]);
780  break;
781  }
782 
783  default:
784  result=CKernelTwoSampleTest::compute_p_value(statistic);
785  break;
786  }
787 
788  return result;
789 }
790 
792 {
793  float64_t result=0;
794 
796  {
797  case MMD2_SPECTRUM:
798  {
799  /* get samples from null-distribution and compute threshold */
802  CMath::qsort(null_samples);
803  result=null_samples[index_t(CMath::floor(null_samples.vlen*(1-alpha)))];
804  break;
805  }
806 
808  {
809  /* get samples from null-distribution and compute threshold */
812  CMath::qsort(null_samples);
813  result=null_samples[index_t(CMath::floor(null_samples.vlen*(1-alpha)))];
814  break;
815  }
816 
817  case MMD2_GAMMA:
818  {
819  /* fit gamma and return inverse cdf at alpha */
821  result=CStatistics::gamma_inverse_cdf(alpha, params[0], params[1]);
822  break;
823  }
824 
825  default:
826  /* sampling null is handled here */
828  break;
829  }
830 
831  return result;
832 }
833 
834 
836  index_t num_eigenvalues)
837 {
838  REQUIRE(m_kernel, "(%d, %d): No kernel set!\n", num_samples,
839  num_eigenvalues);
841  "(%d, %d): No features set and no custom kernel in use!\n",
842  num_samples, num_eigenvalues);
843 
844  index_t m=m_m;
845  index_t n=0;
846 
847  /* check if kernel is precomputed (custom kernel) */
849  n=m_kernel->get_num_vec_lhs()-m;
850  else
851  {
852  REQUIRE(m_p_and_q, "The samples are not initialized!\n");
853  n=m_p_and_q->get_num_vectors()-m;
854  }
855 
856  if (num_samples<=2)
857  {
858  SG_ERROR("Number of samples has to be at least 2, "
859  "better in the hundreds");
860  }
861 
862  if (num_eigenvalues>m+n-1)
863  SG_ERROR("Number of Eigenvalues too large\n");
864 
865  if (num_eigenvalues<1)
866  SG_ERROR("Number of Eigenvalues too small\n");
867 
868  /* imaginary matrix K=[K KL; KL' L] (MATLAB notation)
869  * K is matrix for XX, L is matrix for YY, KL is XY, LK is YX
870  * works since X and Y are concatenated here */
873 
874  /* center matrix K=H*K*H */
875  K.center();
876 
877  /* compute eigenvalues and select num_eigenvalues largest ones */
878  Map<MatrixXd> c_kernel_matrix(K.matrix, K.num_rows, K.num_cols);
879  SelfAdjointEigenSolver<MatrixXd> eigen_solver(c_kernel_matrix);
880  REQUIRE(eigen_solver.info()==Eigen::Success,
881  "Eigendecomposition failed!\n");
882  index_t max_num_eigenvalues=eigen_solver.eigenvalues().rows();
883 
884  /* finally, sample from null distribution */
885  SGVector<float64_t> null_samples(num_samples);
886  for (index_t i=0; i<num_samples; ++i)
887  {
888  null_samples[i]=0;
889  for (index_t j=0; j<num_eigenvalues; ++j)
890  {
892 
893  SG_DEBUG("z_j=%f\n", z_j);
894 
895  float64_t multiple=CMath::sq(z_j);
896 
897  /* take largest EV, scale by 1/(m+n) on the fly and take abs value*/
898  float64_t eigenvalue_estimate=CMath::abs(1.0/(m+n)
899  *eigen_solver.eigenvalues()[max_num_eigenvalues-1-j]);
900 
902  multiple-=1;
903 
904  SG_DEBUG("multiple=%f, eigenvalue=%f\n", multiple,
905  eigenvalue_estimate);
906 
907  null_samples[i]+=eigenvalue_estimate*multiple;
908  }
909  }
910 
911  return null_samples;
912 }
913 
915  index_t num_samples, index_t num_eigenvalues)
916 {
917  REQUIRE(m_kernel, "(%d, %d): No kernel set!\n", num_samples,
918  num_eigenvalues);
920  "(%d, %d): No features set and no custom kernel in use!\n",
921  num_samples, num_eigenvalues);
922 
923  index_t m=m_m;
924  index_t n=0;
925 
926  /* check if kernel is precomputed (custom kernel) */
928  n=m_kernel->get_num_vec_lhs()-m;
929  else
930  {
931  REQUIRE(m_p_and_q, "The samples are not initialized!\n");
932  n=m_p_and_q->get_num_vectors()-m;
933  }
934 
935  if (num_samples<=2)
936  {
937  SG_ERROR("Number of samples has to be at least 2, "
938  "better in the hundreds");
939  }
940 
941  if (num_eigenvalues>m+n-1)
942  SG_ERROR("Number of Eigenvalues too large\n");
943 
944  if (num_eigenvalues<1)
945  SG_ERROR("Number of Eigenvalues too small\n");
946 
947  /* imaginary matrix K=[K KL; KL' L] (MATLAB notation)
948  * K is matrix for XX, L is matrix for YY, KL is XY, LK is YX
949  * works since X and Y are concatenated here */
952 
953  /* center matrix K=H*K*H */
954  K.center();
955 
956  /* compute eigenvalues and select num_eigenvalues largest ones */
957  Map<MatrixXd> c_kernel_matrix(K.matrix, K.num_rows, K.num_cols);
958  SelfAdjointEigenSolver<MatrixXd> eigen_solver(c_kernel_matrix);
959  REQUIRE(eigen_solver.info()==Eigen::Success,
960  "Eigendecomposition failed!\n");
961  index_t max_num_eigenvalues=eigen_solver.eigenvalues().rows();
962 
963  /* precomputing terms with rho_x and rho_y of equation 10 in [1]
964  * (see documentation) */
965  float64_t rho_x=float64_t(m)/(m+n);
966  float64_t rho_y=1-rho_x;
967 
968  /* instead of using two Gaussian rv's ~ N(0,1), we'll use just one rv
969  * ~ N(0, 1/rho_x+1/rho_y) (derived from eq 10 in [1]) */
970  float64_t std_dev=CMath::sqrt(1/rho_x+1/rho_y);
971  float64_t inv_rho_x_y=1/(rho_x*rho_y);
972 
973  SG_DEBUG("Using Gaussian samples ~ N(0,%f)\n", std_dev*std_dev);
974 
975  /* finally, sample from null distribution */
976  SGVector<float64_t> null_samples(num_samples);
977  for (index_t i=0; i<num_samples; ++i)
978  {
979  null_samples[i]=0;
980  for (index_t j=0; j<num_eigenvalues; ++j)
981  {
982  /* compute the right hand multiple of eq. 10 in [1] using one RV.
983  * randn_double() gives a sample from N(0,1), we need samples
984  * from N(0,1/rho_x+1/rho_y) */
985  float64_t z_j=std_dev*CMath::randn_double();
986 
987  SG_DEBUG("z_j=%f\n", z_j);
988 
989  float64_t multiple=CMath::pow(z_j, 2);
990 
991  /* take largest EV, scale by 1/(m+n) on the fly and take abs value*/
992  float64_t eigenvalue_estimate=CMath::abs(1.0/(m+n)
993  *eigen_solver.eigenvalues()[max_num_eigenvalues-1-j]);
994 
996  multiple-=inv_rho_x_y;
997 
998  SG_DEBUG("multiple=%f, eigenvalue=%f\n", multiple,
999  eigenvalue_estimate);
1000 
1001  null_samples[i]+=eigenvalue_estimate*multiple;
1002  }
1003  }
1004 
1005  /* when m=n, return m*MMD^2 instead */
1006  if (m==n)
1007  null_samples.scale(0.5);
1008 
1009  return null_samples;
1010 }
1011 
1013 {
1014  REQUIRE(m_kernel, "No kernel set!\n");
1016  "No features set and no custom kernel in use!\n");
1017 
1018  index_t n=0;
1019 
1020  /* check if kernel is precomputed (custom kernel) */
1023  else
1024  {
1025  REQUIRE(m_p_and_q, "The samples are not initialized!\n");
1027  }
1028  REQUIRE(m_m==n, "Only possible with equal number of samples "
1029  "from both distribution!\n")
1030 
1031  index_t num_data;
1033  num_data=m_kernel->get_num_vec_rhs();
1034  else
1035  num_data=m_p_and_q->get_num_vectors();
1036 
1037  if (m_m!=num_data/2)
1038  SG_ERROR("Currently, only equal sample sizes are supported\n");
1039 
1040  /* evtl. warn user not to use wrong statistic type */
1042  {
1043  SG_WARNING("Note: provided statistic has "
1044  "to be BIASED. Please ensure that! To get rid of warning,"
1045  "call %s::set_statistic_type(BIASED_DEPRECATED)\n", get_name());
1046  }
1047 
1048  /* imaginary matrix K=[K KL; KL' L] (MATLAB notation)
1049  * K is matrix for XX, L is matrix for YY, KL is XY, LK is YX
1050  * works since X and Y are concatenated here */
1052 
1053  /* compute mean under H0 of MMD, which is
1054  * meanMMD = 2/m * ( 1 - 1/m*sum(diag(KL)) );
1055  * in MATLAB.
1056  * Remove diagonals on the fly */
1057  float64_t mean_mmd=0;
1058  for (index_t i=0; i<m_m; ++i)
1059  {
1060  /* virtual KL matrix is in upper right corner of SHOGUN K matrix
1061  * so this sums the diagonal of the matrix between X and Y*/
1062  mean_mmd+=m_kernel->kernel(i, m_m+i);
1063  }
1064  mean_mmd=2.0/m_m*(1.0-1.0/m_m*mean_mmd);
1065 
1066  /* compute variance under H0 of MMD, which is
1067  * varMMD = 2/m/(m-1) * 1/m/(m-1) * sum(sum( (K + L - KL - KL').^2 ));
1068  * in MATLAB, so sum up all elements */
1069  float64_t var_mmd=0;
1070  for (index_t i=0; i<m_m; ++i)
1071  {
1072  for (index_t j=0; j<m_m; ++j)
1073  {
1074  /* dont add diagonal of all pairs of imaginary kernel matrices */
1075  if (i==j || m_m+i==j || m_m+j==i)
1076  continue;
1077 
1078  float64_t to_add=m_kernel->kernel(i, j);
1079  to_add+=m_kernel->kernel(m_m+i, m_m+j);
1080  to_add-=m_kernel->kernel(i, m_m+j);
1081  to_add-=m_kernel->kernel(m_m+i, j);
1082  var_mmd+=CMath::pow(to_add, 2);
1083  }
1084  }
1085  var_mmd*=2.0/m_m/(m_m-1)*1.0/m_m/(m_m-1);
1086 
1087  /* parameters for gamma distribution */
1088  float64_t a=CMath::pow(mean_mmd, 2)/var_mmd;
1089  float64_t b=var_mmd*m_m / mean_mmd;
1090 
1091  SGVector<float64_t> result(2);
1092  result[0]=a;
1093  result[1]=b;
1094 
1095  return result;
1096 }
1097 
1099  num_samples_spectrum)
1100 {
1101  m_num_samples_spectrum=num_samples_spectrum;
1102 }
1103 
1105  index_t num_eigenvalues_spectrum)
1106 {
1107  m_num_eigenvalues_spectrum=num_eigenvalues_spectrum;
1108 }
1109 
1111  statistic_type)
1112 {
1113  m_statistic_type=statistic_type;
1114 }
1115 
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
virtual float64_t compute_threshold(float64_t alpha)
#define SG_INFO(...)
Definition: SGIO.h:118
virtual float64_t compute_p_value(float64_t statistic)
EQuadraticMMDType m_statistic_type
virtual const char * get_name() const
float64_t compute_variance_under_alternative()
int32_t index_t
Definition: common.h:62
SGVector< float64_t > compute_biased_statistic_variance(int m, int n)
The Custom Kernel allows for custom user provided kernel matrices.
Definition: CustomKernel.h:36
float64_t compute_unbiased_statistic(int m, int n)
virtual float64_t compute_statistic()
void set_statistic_type(EQuadraticMMDType statistic_type)
static T sq(T x)
Definition: Math.h:450
Definition: SGMatrix.h:20
SGVector< float64_t > fit_null_gamma()
virtual int32_t get_num_vectors() const =0
float64_t compute_incomplete_statistic(int n)
static float64_t randn_double()
Definition: Math.h:1132
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_cols
Definition: SGMatrix.h:376
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:207
Kernel two sample test base class. Provides an interface for performing a two-sample test using a ker...
void scale(T alpha)
Scale vector inplace.
Definition: SGVector.cpp:841
static float64_t gamma_cdf(float64_t x, float64_t a, float64_t b)
Definition: Statistics.cpp:395
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:517
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:220
#define SG_REF(x)
Definition: SGObject.h:51
static float64_t floor(float64_t d)
Definition: Math.h:407
index_t num_rows
Definition: SGMatrix.h:374
SGVector< float64_t > sample_null_spectrum_DEPRECATED(index_t num_samples, index_t num_eigenvalues)
static void qsort(T *output, int32_t size)
Definition: Math.h:1313
SGVector< float64_t > sample_null_spectrum(index_t num_samples, index_t num_eigenvalues)
index_t vlen
Definition: SGVector.h:492
void set_num_eigenvalues_spectrum(index_t num_eigenvalues_spectrum)
double float64_t
Definition: common.h:50
SGVector< float64_t > compute_unbiased_statistic_variance(int m, int n)
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
static float64_t gamma_inverse_cdf(float64_t p, float64_t a, float64_t b)
Definition: Statistics.cpp:513
void set_num_samples_spectrum(index_t num_samples_spectrum)
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:526
index_t find_position_to_insert(T element)
Definition: SGVector.cpp:187
#define SG_UNREF(x)
Definition: SGObject.h:52
virtual float64_t compute_p_value(float64_t statistic)
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
int machine_int_t
Definition: common.h:59
virtual EKernelType get_kernel_type()=0
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual SGVector< float64_t > compute_variance()
ENullApproximationMethod m_null_approximation_method
The Kernel base class.
Definition: Kernel.h:159
SGVector< float64_t > compute_incomplete_statistic_variance(int n)
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
static float32_t sqrt(float32_t x)
Definition: Math.h:459
virtual float64_t compute_threshold(float64_t alpha)
static int32_t pow(bool x, int32_t n)
Definition: Math.h:535
float64_t compute_biased_statistic(int m, int n)
static T abs(T a)
Definition: Math.h:179

SHOGUN Machine Learning Toolbox - Documentation