SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
RelaxedTree.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2012 Chiyuan Zhang
8  * Copyright (C) 2012 Chiyuan Zhang
9  */
10 
11 #include <limits>
12 #include <queue>
13 #include <algorithm>
14 #include <functional>
15 
21 
22 using namespace shogun;
23 
25  :m_max_num_iter(3), m_A(0.5), m_B(5), m_svm_C(1), m_svm_epsilon(0.001),
26  m_kernel(NULL), m_feats(NULL), m_machine_for_confusion_matrix(NULL), m_num_classes(0)
27 {
28  SG_ADD(&m_max_num_iter, "m_max_num_iter", "max number of iterations in alternating optimization", MS_NOT_AVAILABLE);
29  SG_ADD(&m_svm_C, "m_svm_C", "C for svm", MS_AVAILABLE);
30  SG_ADD(&m_A, "m_A", "parameter A", MS_AVAILABLE);
31  SG_ADD(&m_B, "m_B", "parameter B", MS_AVAILABLE);
32  SG_ADD(&m_svm_epsilon, "m_svm_epsilon", "epsilon for svm", MS_AVAILABLE);
33 }
34 
36 {
40 }
41 
43 {
44  if (data != NULL)
45  {
46  CDenseFeatures<float64_t> *feats = dynamic_cast<CDenseFeatures<float64_t>*>(data);
47  REQUIRE(feats != NULL, ("Require non-NULL dense features of float64_t\n"))
48  set_features(feats);
49  }
50 
51  // init kernels for all sub-machines
52  for (int32_t i=0; i<m_machines->get_num_elements(); i++)
53  {
54  CSVM *machine = (CSVM*)m_machines->get_element(i);
55  CKernel *kernel = machine->get_kernel();
56  CFeatures* lhs = kernel->get_lhs();
57  kernel->init(lhs, m_feats);
58  SG_UNREF(machine);
59  SG_UNREF(kernel);
60  SG_UNREF(lhs);
61  }
62 
64  SG_REF(lab);
65  for (int32_t i=0; i < lab->get_num_labels(); ++i)
66  {
67  lab->set_int_label(i, int32_t(apply_one(i)));
68  }
69 
70  return lab;
71 }
72 
74 {
75  bnode_t *node = (bnode_t*) m_root;
76  int32_t klass = -1;
77  while (node != NULL)
78  {
79  CSVM *svm = (CSVM *)m_machines->get_element(node->machine());
80  float64_t result = svm->apply_one(idx);
81 
82  if (result < 0)
83  {
84  // go left
85  if (node->left()) // has left subtree
86  {
87  node = node->left();
88  }
89  else // stop here
90  {
91  for (int32_t i=0; i < node->data.mu.vlen; ++i)
92  {
93  if (node->data.mu[i] <= 0 && node->data.mu[i] > -2)
94  {
95  klass = i;
96  break;
97  }
98  }
99  node = NULL;
100  }
101  }
102  else
103  {
104  // go right
105  if (node->right())
106  {
107  node = node->right();
108  }
109  else
110  {
111  for (int32_t i=0; i <node->data.mu.vlen; ++i)
112  {
113  if (node->data.mu[i] >= 0)
114  {
115  klass = i;
116  break;
117  }
118  }
119  node = NULL;
120  }
121  }
122 
123  SG_UNREF(svm);
124  }
125 
126  return klass;
127 }
128 
130 {
131  if (m_machine_for_confusion_matrix == NULL)
132  SG_ERROR("Call set_machine_for_confusion_matrix before training\n")
133  if (m_kernel == NULL)
134  SG_ERROR("assign a valid kernel before training\n")
135 
136  if (data)
137  {
138  CDenseFeatures<float64_t> *feats = dynamic_cast<CDenseFeatures<float64_t>*>(data);
139  if (feats == NULL)
140  SG_ERROR("Require non-NULL dense features of float64_t\n")
141  set_features(feats);
142  }
143 
144  CMulticlassLabels *lab = dynamic_cast<CMulticlassLabels *>(m_labels);
145 
146  RelaxedTreeUtil util;
148  m_feats, lab, m_num_classes);
149 
150  // train root
152 
153  for (int32_t i=0; i < m_num_classes; ++i)
154  classes[i] = i;
155 
156  SG_UNREF(m_root);
157  m_root = train_node(conf_mat, classes);
158 
159  std::queue<bnode_t *> node_q;
160  node_q.push((bnode_t*) m_root);
161 
162  while (node_q.size() != 0)
163  {
164  bnode_t *node = node_q.front();
165 
166  // left node
167  SGVector <int32_t> left_classes(m_num_classes);
168  int32_t k=0;
169  for (int32_t i=0; i < m_num_classes; ++i)
170  {
171  // active classes are labeled as -1 or 0
172  // -2 indicate classes that are already pruned away
173  if (node->data.mu[i] <= 0 && node->data.mu[i] > -2)
174  left_classes[k++] = i;
175  }
176 
177  left_classes.vlen = k;
178 
179  if (left_classes.vlen >= 2)
180  {
181  bnode_t *left_node = train_node(conf_mat, left_classes);
182  node->left(left_node);
183  node_q.push(left_node);
184  }
185 
186  // right node
187  SGVector <int32_t> right_classes(m_num_classes);
188  k=0;
189  for (int32_t i=0; i < m_num_classes; ++i)
190  {
191  // active classes are labeled as 0 or +1
192  if (node->data.mu[i] >= 0)
193  right_classes[k++] = i;
194  }
195 
196  right_classes.vlen = k;
197 
198  if (right_classes.vlen >= 2)
199  {
200  bnode_t *right_node = train_node(conf_mat, right_classes);
201  node->right(right_node);
202  node_q.push(right_node);
203  }
204 
205  node_q.pop();
206  }
207 
208  //m_root->debug_print(RelaxedTreeNodeData::print_data);
209 
210  return true;
211 }
212 
214 {
215  SGVector<int32_t> best_mu;
216  CSVM *best_svm = NULL;
218 
219  std::vector<CRelaxedTree::entry_t> mu_init = init_node(conf_mat, classes);
220  for (std::vector<CRelaxedTree::entry_t>::const_iterator it = mu_init.begin(); it != mu_init.end(); ++it)
221  {
222  CSVM *svm = new CLibSVM();
223  SG_REF(svm);
224  svm->set_store_model_features(true);
225 
226  SGVector<int32_t> mu = train_node_with_initialization(*it, classes, svm);
227  float64_t score = compute_score(mu, svm);
228 
229  if (score < best_score)
230  {
231  best_score = score;
232  best_mu = mu;
233  SG_UNREF(best_svm);
234  best_svm = svm;
235  }
236  else
237  {
238  SG_UNREF(svm);
239  }
240  }
241 
242  bnode_t *node = new bnode_t;
243  SG_REF(node);
244 
245  m_machines->push_back(best_svm);
246  node->machine(m_machines->get_num_elements()-1);
247 
249  std::fill(&long_mu[0], &long_mu[m_num_classes], -2);
250  for (int32_t i=0; i < best_mu.vlen; ++i)
251  {
252  if (best_mu[i] == 1)
253  long_mu[classes[i]] = 1;
254  else if (best_mu[i] == -1)
255  long_mu[classes[i]] = -1;
256  else if (best_mu[i] == 0)
257  long_mu[classes[i]] = 0;
258  }
259 
260  node->data.mu = long_mu;
261  return node;
262 }
263 
265 {
266  float64_t num_pos=0, num_neg=0;
267  for (int32_t i=0; i < mu.vlen; ++i)
268  {
269  if (mu[i] == 1)
270  num_pos++;
271  else if (mu[i] == -1)
272  num_neg++;
273  }
274 
275  int32_t totalSV = svm->get_support_vectors().vlen;
276  float64_t score = num_neg/(num_neg+num_pos) * totalSV/num_pos +
277  num_pos/(num_neg+num_pos)*totalSV/num_neg;
278  return score;
279 }
280 
282 {
283  SGVector<int32_t> mu(classes.vlen), prev_mu(classes.vlen);
284  mu.zero();
285  mu[mu_entry.first.first] = 1;
286  mu[mu_entry.first.second] = -1;
287 
289  svm->set_C(m_svm_C, m_svm_C);
291 
292  for (int32_t iiter=0; iiter < m_max_num_iter; ++iiter)
293  {
294  long_mu.zero();
295  for (int32_t i=0; i < classes.vlen; ++i)
296  {
297  if (mu[i] == 1)
298  long_mu[classes[i]] = 1;
299  else if (mu[i] == -1)
300  long_mu[classes[i]] = -1;
301  }
302 
305  int32_t k=0;
306 
307  CMulticlassLabels *labs = dynamic_cast<CMulticlassLabels *>(m_labels);
308  for (int32_t i=0; i < binlab.vlen; ++i)
309  {
310  int32_t lab = labs->get_int_label(i);
311  binlab[i] = long_mu[lab];
312  if (long_mu[lab] != 0)
313  subset[k++] = i;
314  }
315 
316  subset.vlen = k;
317 
318  CBinaryLabels *binary_labels = new CBinaryLabels(binlab);
319  SG_REF(binary_labels);
320  binary_labels->add_subset(subset);
321  m_feats->add_subset(subset);
322 
323  CKernel *kernel = (CKernel *)m_kernel->shallow_copy();
324  kernel->init(m_feats, m_feats);
325  svm->set_kernel(kernel);
326  svm->set_labels(binary_labels);
327  svm->train();
328 
329  binary_labels->remove_subset();
331  SG_UNREF(binary_labels);
332 
333  std::copy(&mu[0], &mu[mu.vlen], &prev_mu[0]);
334 
335  mu = color_label_space(svm, classes);
336 
337  bool bbreak = true;
338  for (int32_t i=0; i < mu.vlen; ++i)
339  {
340  if (mu[i] != prev_mu[i])
341  {
342  bbreak = false;
343  break;
344  }
345  }
346 
347  if (bbreak)
348  break;
349  }
350 
351  return mu;
352 }
353 
355 {
356  bool operator() (const CRelaxedTree::entry_t& e1, const CRelaxedTree::entry_t& e2)
357  {
358  return e1.second < e2.second;
359  }
360 };
361 std::vector<CRelaxedTree::entry_t> CRelaxedTree::init_node(const SGMatrix<float64_t> &global_conf_mat, SGVector<int32_t> classes)
362 {
363  // local confusion matrix
364  SGMatrix<float64_t> conf_mat(classes.vlen, classes.vlen);
365  for (index_t i=0; i < conf_mat.num_rows; ++i)
366  {
367  for (index_t j=0; j < conf_mat.num_cols; ++j)
368  {
369  conf_mat(i, j) = global_conf_mat(classes[i], classes[j]);
370  }
371  }
372 
373  // make conf matrix symmetry
374  for (index_t i=0; i < conf_mat.num_rows; ++i)
375  {
376  for (index_t j=0; j < conf_mat.num_cols; ++j)
377  {
378  conf_mat(i,j) += conf_mat(j,i);
379  }
380  }
381 
382  std::vector<CRelaxedTree::entry_t> entries;
383  for (index_t i=0; i < classes.vlen; ++i)
384  {
385  for (index_t j=i+1; j < classes.vlen; ++j)
386  {
387  entries.push_back(std::make_pair(std::make_pair(i, j), conf_mat(i,j)));
388  }
389  }
390 
391  std::sort(entries.begin(), entries.end(), EntryComparator());
392 
393  const size_t max_n_samples = 30;
394  int32_t n_samples = std::min(max_n_samples, entries.size());
395 
396  return std::vector<CRelaxedTree::entry_t>(entries.begin(), entries.begin() + n_samples);
397 }
398 
400 {
401  SGVector<int32_t> mu(classes.vlen);
402  CMulticlassLabels *labels = dynamic_cast<CMulticlassLabels *>(m_labels);
403 
405  ASSERT(resp.vlen == labels->get_num_labels())
406 
407  SGVector<float64_t> xi_pos_class(classes.vlen), xi_neg_class(classes.vlen);
408  SGVector<float64_t> delta_pos(classes.vlen), delta_neg(classes.vlen);
409 
410  for (int32_t i=0; i < classes.vlen; ++i)
411  {
412  // find number of instances from this class
413  int32_t ni=0;
414  for (int32_t j=0; j < labels->get_num_labels(); ++j)
415  {
416  if (labels->get_int_label(j) == classes[i])
417  {
418  ni++;
419  }
420  }
421 
422  xi_pos_class[i] = 0;
423  xi_neg_class[i] = 0;
424  for (int32_t j=0; j < resp.vlen; ++j)
425  {
426  if (labels->get_int_label(j) == classes[i])
427  {
428  xi_pos_class[i] += std::max(0.0, 1 - resp[j]);
429  xi_neg_class[i] += std::max(0.0, 1 + resp[j]);
430  }
431  }
432 
433  delta_pos[i] = 1.0/ni * xi_pos_class[i] - float64_t(m_A)/m_svm_C;
434  delta_neg[i] = 1.0/ni * xi_neg_class[i] - float64_t(m_A)/m_svm_C;
435 
436  if (delta_pos[i] > 0 && delta_neg[i] > 0)
437  {
438  mu[i] = 0;
439  }
440  else
441  {
442  if (delta_pos[i] < delta_neg[i])
443  mu[i] = 1;
444  else
445  mu[i] = -1;
446  }
447 
448  }
449 
450  // enforce balance constraints
451  int32_t B_prime = 0;
452  for (int32_t i=0; i < mu.vlen; ++i)
453  B_prime += mu[i];
454 
455  if (B_prime > m_B)
456  {
457  enforce_balance_constraints_upper(mu, delta_neg, delta_pos, B_prime, xi_neg_class);
458  }
459  if (B_prime < -m_B)
460  {
461  enforce_balance_constraints_lower(mu, delta_neg, delta_pos, B_prime, xi_neg_class);
462  }
463 
464  int32_t npos = 0;
465  for (index_t i=0; i < mu.vlen; ++i)
466  {
467  if (mu[i] == 1)
468  npos++;
469  }
470 
471  if (npos == 0)
472  {
473  // no positive class
474  index_t min_idx = CMath::arg_min(xi_pos_class.vector, 1, xi_pos_class.vlen);
475  mu[min_idx] = 1;
476  }
477 
478  int32_t nneg = 0;
479  for (index_t i=0; i < mu.vlen; ++i)
480  {
481  if (mu[i] == -1)
482  nneg++;
483  }
484 
485  if (nneg == 0)
486  {
487  // no negative class
488  index_t min_idx = CMath::arg_min(xi_neg_class.vector, 1, xi_neg_class.vlen);
489  if (mu[min_idx] == 1 && (npos == 0 || npos == 1))
490  {
491  // avoid overwritting the only positive class
492  float64_t min_val = 0;
493  int32_t i, min_i;
494  for (i=0; i < xi_neg_class.vlen; ++i)
495  {
496  if (mu[i] != 1)
497  {
498  min_val = xi_neg_class[i];
499  break;
500  }
501  }
502  min_i = i;
503  for (i=i+1; i < xi_neg_class.vlen; ++i)
504  {
505  if (mu[i] != 1 && xi_neg_class[i] < min_val)
506  {
507  min_val = xi_neg_class[i];
508  min_i = i;
509  }
510  }
511  mu[min_i] = -1;
512  }
513  else
514  {
515  mu[min_idx] = -1;
516  }
517  }
518 
519  return mu;
520 }
521 
523  SGVector<float64_t> &delta_pos, int32_t B_prime, SGVector<float64_t>& xi_neg_class)
524 {
525  SGVector<index_t> index_zero = mu.find(0);
526  SGVector<index_t> index_pos = mu.find_if(std::bind1st(std::less<int32_t>(), 0));
527 
528  int32_t num_zero = index_zero.vlen;
529  int32_t num_pos = index_pos.vlen;
530 
531  SGVector<index_t> class_index(num_zero+2*num_pos);
532  std::copy(&index_zero[0], &index_zero[num_zero], &class_index[0]);
533  std::copy(&index_pos[0], &index_pos[num_pos], &class_index[num_zero]);
534  std::copy(&index_pos[0], &index_pos[num_pos], &class_index[num_pos+num_zero]);
535 
536  SGVector<int32_t> orig_mu(num_zero + 2*num_pos);
537  orig_mu.zero();
538  std::fill(&orig_mu[num_zero], &orig_mu[orig_mu.vlen], 1);
539 
540  SGVector<int32_t> delta_steps(num_zero+2*num_pos);
541  std::fill(&delta_steps[0], &delta_steps[delta_steps.vlen], 1);
542 
543  SGVector<int32_t> new_mu(num_zero + 2*num_pos);
544  new_mu.zero();
545  std::fill(&new_mu[0], &new_mu[num_zero], -1);
546 
547  SGVector<float64_t> S_delta(num_zero + 2*num_pos);
548  S_delta.zero();
549  for (index_t i=0; i < num_zero; ++i)
550  S_delta[i] = delta_neg[index_zero[i]];
551 
552  for (int32_t i=0; i < num_pos; ++i)
553  {
554  float64_t delta_k = delta_neg[index_pos[i]];
555  float64_t delta_k_0 = -delta_pos[index_pos[i]];
556 
557  index_t tmp_index = num_zero + i*2;
558  if (delta_k_0 <= delta_k)
559  {
560  new_mu[tmp_index] = 0;
561  new_mu[tmp_index+1] = -1;
562 
563  S_delta[tmp_index] = delta_k_0;
564  S_delta[tmp_index+1] = delta_k;
565 
566  delta_steps[tmp_index] = 1;
567  delta_steps[tmp_index+1] = 1;
568  }
569  else
570  {
571  new_mu[tmp_index] = -1;
572  new_mu[tmp_index+1] = 0;
573 
574  S_delta[tmp_index] = (delta_k_0+delta_k)/2;
575  S_delta[tmp_index+1] = delta_k_0;
576 
577  delta_steps[tmp_index] = 2;
578  delta_steps[tmp_index+1] = 1;
579  }
580  }
581 
582  SGVector<index_t> sorted_index = S_delta.argsort();
583  SGVector<float64_t> S_delta_sorted(S_delta.vlen);
584  for (index_t i=0; i < sorted_index.vlen; ++i)
585  {
586  S_delta_sorted[i] = S_delta[sorted_index[i]];
587  new_mu[i] = new_mu[sorted_index[i]];
588  orig_mu[i] = orig_mu[sorted_index[i]];
589  class_index[i] = class_index[sorted_index[i]];
590  delta_steps[i] = delta_steps[sorted_index[i]];
591  }
592 
593  SGVector<int32_t> valid_flag(S_delta.vlen);
594  std::fill(&valid_flag[0], &valid_flag[valid_flag.vlen], 1);
595 
596  int32_t d=0;
597  int32_t ctr=0;
598 
599  while (true)
600  {
601  if (d == B_prime - m_B || d == B_prime - m_B + 1)
602  break;
603 
604  while (!valid_flag[ctr])
605  ctr++;
606 
607  if (delta_steps[ctr] == 1)
608  {
609  mu[class_index[ctr]] = new_mu[ctr];
610  d++;
611  }
612  else
613  {
614  // this case should happen only when rho >= 1
615  if (d <= B_prime - m_B - 2)
616  {
617  mu[class_index[ctr]] = new_mu[ctr];
618  ASSERT(new_mu[ctr] == -1)
619  d += 2;
620  for (index_t i=0; i < class_index.vlen; ++i)
621  {
622  if (class_index[i] == class_index[ctr])
623  valid_flag[i] = 0;
624  }
625  }
626  else
627  {
628  float64_t Delta_k_minus = 2*S_delta_sorted[ctr];
629 
630  // find the next smallest Delta_j or Delta_{j,0}
631  float64_t Delta_j_min=0;
632  int32_t j=0;
633  for (int32_t itr=ctr+1; itr < S_delta_sorted.vlen; ++itr)
634  {
635  if (valid_flag[itr] == 0)
636  continue;
637 
638  if (delta_steps[itr] == 1)
639  {
640  j=itr;
641  Delta_j_min = S_delta_sorted[j];
642  }
643  }
644 
645  // find the largest Delta_i or Delta_{i,0}
646  float64_t Delta_i_max = 0;
647  int32_t i=-1;
648  for (int32_t itr=ctr-1; itr >= 0; --itr)
649  {
650  if (delta_steps[itr] == 1 && valid_flag[itr] == 1)
651  {
652  i = itr;
653  Delta_i_max = S_delta_sorted[i];
654  }
655  }
656 
657  // find the l with the largest Delta_l_minus - Delta_l_0
658  float64_t Delta_l_max = std::numeric_limits<float64_t>::min();
659  int32_t l=-1;
660  for (int32_t itr=ctr-1; itr >= 0; itr--)
661  {
662  if (delta_steps[itr] == 2)
663  {
664  float64_t delta_tmp = xi_neg_class[class_index[itr]];
665  if (delta_tmp > Delta_l_max)
666  {
667  l = itr;
668  Delta_l_max = delta_tmp;
669  }
670  }
671  }
672 
673  // one-step-min = j
674  if (Delta_j_min <= Delta_k_minus - Delta_i_max &&
675  Delta_j_min <= Delta_k_minus - Delta_l_max)
676  {
677  mu[class_index[j]] = new_mu[j];
678  d++;
679  }
680  else
681  {
682  // one-step-min = Delta_k_minus - Delta_i_max
683  if (Delta_k_minus - Delta_i_max <= Delta_j_min &&
684  Delta_k_minus - Delta_i_max <= Delta_k_minus - Delta_l_max)
685  {
686  mu[class_index[ctr]] = -1;
687  if (i > 0)
688  {
689  mu[class_index[i]] = orig_mu[i];
690  d++;
691  }
692  else
693  {
694  d += 2;
695  }
696  }
697  else
698  {
699  ASSERT(l > 0)
700  mu[class_index[l]] = 0;
701  mu[class_index[ctr]] = -1;
702  d++;
703  }
704  }
705 
706  }
707  }
708  }
709 }
710 
712  SGVector<float64_t> &delta_pos, int32_t B_prime, SGVector<float64_t>& xi_neg_class)
713 {
714  SGVector<index_t> index_zero = mu.find(0);
715  SGVector<index_t> index_neg = mu.find_if(std::bind1st(std::greater<int32_t>(), 0));
716 
717  int32_t num_zero = index_zero.vlen;
718  int32_t num_neg = index_neg.vlen;
719 
720  SGVector<index_t> class_index(num_zero+2*num_neg);
721  std::copy(&index_zero[0], &index_zero[num_zero], &class_index[0]);
722  std::copy(&index_neg[0], &index_neg[num_neg], &class_index[num_zero]);
723  std::copy(&index_neg[0], &index_neg[num_neg], &class_index[num_neg+num_zero]);
724 
725  SGVector<int32_t> orig_mu(num_zero + 2*num_neg);
726  orig_mu.zero();
727  std::fill(&orig_mu[num_zero], &orig_mu[orig_mu.vlen], -1);
728 
729  SGVector<int32_t> delta_steps(num_zero+2*num_neg);
730  std::fill(&delta_steps[0], &delta_steps[delta_steps.vlen], 1);
731 
732  SGVector<int32_t> new_mu(num_zero + 2*num_neg);
733  new_mu.zero();
734  std::fill(&new_mu[0], &new_mu[num_zero], 1);
735 
736  SGVector<float64_t> S_delta(num_zero + 2*num_neg);
737  S_delta.zero();
738  for (index_t i=0; i < num_zero; ++i)
739  S_delta[i] = delta_pos[index_zero[i]];
740 
741  for (int32_t i=0; i < num_neg; ++i)
742  {
743  float64_t delta_k = delta_pos[index_neg[i]];
744  float64_t delta_k_0 = -delta_neg[index_neg[i]];
745 
746  index_t tmp_index = num_zero + i*2;
747  if (delta_k_0 <= delta_k)
748  {
749  new_mu[tmp_index] = 0;
750  new_mu[tmp_index+1] = 1;
751 
752  S_delta[tmp_index] = delta_k_0;
753  S_delta[tmp_index+1] = delta_k;
754 
755  delta_steps[tmp_index] = 1;
756  delta_steps[tmp_index+1] = 1;
757  }
758  else
759  {
760  new_mu[tmp_index] = 1;
761  new_mu[tmp_index+1] = 0;
762 
763  S_delta[tmp_index] = (delta_k_0+delta_k)/2;
764  S_delta[tmp_index+1] = delta_k_0;
765 
766  delta_steps[tmp_index] = 2;
767  delta_steps[tmp_index+1] = 1;
768  }
769  }
770 
771  SGVector<index_t> sorted_index = S_delta.argsort();
772  SGVector<float64_t> S_delta_sorted(S_delta.vlen);
773  for (index_t i=0; i < sorted_index.vlen; ++i)
774  {
775  S_delta_sorted[i] = S_delta[sorted_index[i]];
776  new_mu[i] = new_mu[sorted_index[i]];
777  orig_mu[i] = orig_mu[sorted_index[i]];
778  class_index[i] = class_index[sorted_index[i]];
779  delta_steps[i] = delta_steps[sorted_index[i]];
780  }
781 
782  SGVector<int32_t> valid_flag(S_delta.vlen);
783  std::fill(&valid_flag[0], &valid_flag[valid_flag.vlen], 1);
784 
785  int32_t d=0;
786  int32_t ctr=0;
787 
788  while (true)
789  {
790  if (d == -m_B - B_prime || d == -m_B - B_prime + 1)
791  break;
792 
793  while (!valid_flag[ctr])
794  ctr++;
795 
796  if (delta_steps[ctr] == 1)
797  {
798  mu[class_index[ctr]] = new_mu[ctr];
799  d++;
800  }
801  else
802  {
803  // this case should happen only when rho >= 1
804  if (d >= -m_B - B_prime - 2)
805  {
806  mu[class_index[ctr]] = new_mu[ctr];
807  ASSERT(new_mu[ctr] == 1)
808  d += 2;
809 
810  for (index_t i=0; i < class_index.vlen; ++i)
811  {
812  if (class_index[i] == class_index[ctr])
813  valid_flag[i] = 0;
814  }
815  }
816  else
817  {
818  float64_t Delta_k_minus = 2*S_delta_sorted[ctr];
819 
820  // find the next smallest Delta_j or Delta_{j,0}
821  float64_t Delta_j_min=0;
822  int32_t j=0;
823  for (int32_t itr=ctr+1; itr < S_delta_sorted.vlen; ++itr)
824  {
825  if (valid_flag[itr] == 0)
826  continue;
827 
828  if (delta_steps[itr] == 1)
829  {
830  j=itr;
831  Delta_j_min = S_delta_sorted[j];
832  }
833  }
834 
835  // find the largest Delta_i or Delta_{i,0}
836  float64_t Delta_i_max = 0;
837  int32_t i=-1;
838  for (int32_t itr=ctr-1; itr >= 0; --itr)
839  {
840  if (delta_steps[itr] == 1 && valid_flag[itr] == 1)
841  {
842  i = itr;
843  Delta_i_max = S_delta_sorted[i];
844  }
845  }
846 
847  // find the l with the largest Delta_l_minus - Delta_l_0
848  float64_t Delta_l_max = std::numeric_limits<float64_t>::min();
849  int32_t l=-1;
850  for (int32_t itr=ctr-1; itr >= 0; itr--)
851  {
852  if (delta_steps[itr] == 2)
853  {
854  float64_t delta_tmp = xi_neg_class[class_index[itr]];
855  if (delta_tmp > Delta_l_max)
856  {
857  l = itr;
858  Delta_l_max = delta_tmp;
859  }
860  }
861  }
862 
863  // one-step-min = j
864  if (Delta_j_min <= Delta_k_minus - Delta_i_max &&
865  Delta_j_min <= Delta_k_minus - Delta_l_max)
866  {
867  mu[class_index[j]] = new_mu[j];
868  d++;
869  }
870  else
871  {
872  // one-step-min = Delta_k_minus - Delta_i_max
873  if (Delta_k_minus - Delta_i_max <= Delta_j_min &&
874  Delta_k_minus - Delta_i_max <= Delta_k_minus - Delta_l_max)
875  {
876  mu[class_index[ctr]] = -1;
877  if (i > 0)
878  {
879  mu[class_index[i]] = orig_mu[i];
880  d++;
881  }
882  else
883  {
884  d += 2;
885  }
886  }
887  else
888  {
889  ASSERT(l > 0)
890  mu[class_index[l]] = 0;
891  mu[class_index[ctr]] = -1;
892  d++;
893  }
894  }
895 
896  }
897  }
898  }
899 }
900 
902 {
904  SGVector<float64_t> resp(lab->get_num_labels());
905  for (int32_t i=0; i < resp.vlen; ++i)
906  resp[i] = lab->get_label(i) - m_A/m_svm_C;
907  SG_UNREF(lab);
908  return resp;
909 }

SHOGUN Machine Learning Toolbox - Documentation