SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
NeuralNetwork.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Shogun Toolbox Foundation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7 
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived from this
17  * software without specific prior written permission.
18 
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Written (W) 2014 Khaled Nasr
32  */
33 
40 
41 using namespace shogun;
42 
44 : CMachine()
45 {
46  init();
47 }
48 
50 {
51  init();
52  set_layers(layers);
53 }
54 
56 {
58  SG_REF(layers);
59  m_layers = layers;
60 
64 
65  m_num_inputs = 0;
66  for (int32_t i=0; i<m_num_layers; i++)
67  {
68  if (get_layer(i)->is_input())
70  }
71 }
72 
73 void CNeuralNetwork::connect(int32_t i, int32_t j)
74 {
75  REQUIRE("i<j", "i(%i) must be less that j(%i)\n", i, j);
76  m_adj_matrix(i,j) = true;
77 }
78 
80 {
82  for (int32_t i=1; i<m_num_layers; i++)
83  m_adj_matrix(i-1, i) = true;
84 }
85 
86 void CNeuralNetwork::disconnect(int32_t i, int32_t j)
87 {
88  m_adj_matrix(i,j) = false;
89 }
90 
92 {
94 }
95 
97 {
98  for (int32_t j=0; j<m_num_layers; j++)
99  {
100  if (!get_layer(j)->is_input())
101  {
102  int32_t num_inputs = 0;
103  for (int32_t i=0; i<m_num_layers; i++)
104  num_inputs += m_adj_matrix(i,j);
105 
106  SGVector<int32_t> input_indices(num_inputs);
107 
108  int32_t k = 0;
109  for (int i=0; i<m_num_layers; i++)
110  {
111  if (m_adj_matrix(i,j))
112  {
113  input_indices[k] = i;
114  k++;
115  }
116  }
117 
118  get_layer(j)->initialize(m_layers, input_indices);
119  }
120  }
121 
123 
125  m_index_offsets[0] = 0;
126  for (int32_t i=1; i<m_num_layers; i++)
127  {
129  m_total_num_parameters += get_layer(i)->get_num_parameters();
130  }
131 
134 
135  m_params.zero();
136  m_param_regularizable.set_const(true);
137 
138  for (int32_t i=0; i<m_num_layers; i++)
139  {
140  SGVector<float64_t> layer_param = get_section(m_params, i);
141  SGVector<bool> layer_param_regularizable =
142  get_section(m_param_regularizable, i);
143 
144  get_layer(i)->initialize_parameters(layer_param,
145  layer_param_regularizable, sigma);
146 
148  }
149 }
150 
152 {
154 }
155 
157 {
158  SGMatrix<float64_t> output_activations = forward_propagate(data);
159  SGVector<float64_t> labels_vec(m_batch_size);
160 
161  for (int32_t i=0; i<m_batch_size; i++)
162  {
163  if (get_num_outputs()==1)
164  {
165  if (output_activations[i]>0.5) labels_vec[i] = 1;
166  else labels_vec[i] = -1;
167  }
168  else if (get_num_outputs()==2)
169  {
170  if (output_activations[2*i]>output_activations[2*i+1])
171  labels_vec[i] = 1;
172  else labels_vec[i] = -1;
173  }
174  }
175 
176  return new CBinaryLabels(labels_vec);
177 }
178 
180 {
181  SGMatrix<float64_t> output_activations = forward_propagate(data);
182  SGVector<float64_t> labels_vec(m_batch_size);
183 
184  for (int32_t i=0; i<m_batch_size; i++)
185  labels_vec[i] = output_activations[i];
186 
187  return new CRegressionLabels(labels_vec);
188 }
189 
190 
192 {
193  SGMatrix<float64_t> output_activations = forward_propagate(data);
194  SGVector<float64_t> labels_vec(m_batch_size);
195 
196  for (int32_t i=0; i<m_batch_size; i++)
197  {
198  labels_vec[i] = SGVector<float64_t>::arg_max(
199  output_activations.matrix+i*get_num_outputs(), 1, get_num_outputs());
200  }
201 
202  return new CMulticlassLabels(labels_vec);
203 }
204 
207 {
208  SGMatrix<float64_t> output_activations = forward_propagate(data);
209  return new CDenseFeatures<float64_t>(output_activations);
210 }
211 
213 {
215  "Maximum number of epochs (%i) must be >= 0\n", max_num_epochs);
216 
219 
220  for (int32_t i=0; i<m_num_layers-1; i++)
221  {
222  get_layer(i)->dropout_prop =
224  }
225  get_layer(m_num_layers-1)->dropout_prop = 0.0;
226 
227  m_is_training = true;
228  for (int32_t i=0; i<m_num_layers; i++)
229  get_layer(i)->is_training = true;
230 
231  bool result = false;
233  result = train_gradient_descent(inputs, targets);
235  result = train_lbfgs(inputs, targets);
236 
237  for (int32_t i=0; i<m_num_layers; i++)
238  get_layer(i)->is_training = false;
239  m_is_training = false;
240 
241  return result;
242 }
243 
245  SGMatrix<float64_t> targets)
246 {
248  "Gradient descent learning rate (%f) must be > 0\n", gd_learning_rate);
249  REQUIRE(gd_momentum>=0,
250  "Gradient descent momentum (%f) must be > 0\n", gd_momentum);
251 
252  int32_t training_set_size = inputs.num_cols;
253  if (gd_mini_batch_size==0) gd_mini_batch_size = training_set_size;
255 
256  int32_t n_param = get_num_parameters();
257  SGVector<float64_t> gradients(n_param);
258 
259  // needed for momentum
260  SGVector<float64_t> param_updates(n_param);
261  param_updates.zero();
262 
263  float64_t error_last_time = -1.0, error = 0;
264 
266  if (c==-1.0)
267  c = 0.99*(float64_t)gd_mini_batch_size/training_set_size + 1e-2;
268 
269  bool continue_training = true;
270  float64_t alpha = gd_learning_rate;
271 
272  for (int32_t i=0; continue_training; i++)
273  {
274  if (max_num_epochs!=0)
275  if (i>max_num_epochs) break;
276 
277  for (int32_t j=0; j < training_set_size; j += gd_mini_batch_size)
278  {
279  alpha = gd_learning_rate_decay*alpha;
280 
281  if (j+gd_mini_batch_size>training_set_size)
282  j = training_set_size-gd_mini_batch_size;
283 
284  SGMatrix<float64_t> targets_batch(targets.matrix+j*get_num_outputs(),
285  get_num_outputs(), gd_mini_batch_size, false);
286 
287  SGMatrix<float64_t> inputs_batch(inputs.matrix+j*m_num_inputs,
288  m_num_inputs, gd_mini_batch_size, false);
289 
290  for (int32_t k=0; k<n_param; k++)
291  m_params[k] += gd_momentum*param_updates[k];
292 
293  // filter the errors
294  error = (1.0-c) * error +
295  c*compute_gradients(inputs_batch, targets_batch, gradients);
296 
297  for (int32_t k=0; k<n_param; k++)
298  {
299  param_updates[k] = gd_momentum*param_updates[k]
300  -alpha*gradients[k];
301 
302  m_params[k] -= alpha*gradients[k];
303  }
304 
305  if (error_last_time!=-1.0)
306  {
307  float64_t error_change = (error_last_time-error)/error;
308  if (error_change< epsilon && error_change>=0)
309  {
310  SG_INFO("Gradient Descent Optimization Converged\n");
311  continue_training = false;
312  break;
313  }
314 
315  SG_INFO("Epoch %i: Error = %f\n",i, error);
316  }
317  error_last_time = error;
318  }
319  }
320 
321  return true;
322 }
323 
325  const SGMatrix<float64_t> targets)
326 {
327  int32_t training_set_size = inputs.num_cols;
328  set_batch_size(training_set_size);
329 
330  lbfgs_parameter_t lbfgs_param;
331  lbfgs_parameter_init(&lbfgs_param);
332  lbfgs_param.max_iterations = max_num_epochs;
333  lbfgs_param.epsilon = 0;
334  lbfgs_param.past = 1;
335  lbfgs_param.delta = epsilon;
336 
337  m_lbfgs_temp_inputs = &inputs;
338  m_lbfgs_temp_targets = &targets;
339 
340  int32_t result = lbfgs(m_total_num_parameters,
341  m_params,
342  NULL,
343  &CNeuralNetwork::lbfgs_evaluate,
344  &CNeuralNetwork::lbfgs_progress,
345  this,
346  &lbfgs_param);
347 
348  m_lbfgs_temp_inputs = NULL;
349  m_lbfgs_temp_targets = NULL;
350 
351  if (result==LBFGS_SUCCESS || 1)
352  {
353  SG_INFO("L-BFGS Optimization Converged\n");
354  }
355  else if (result==LBFGSERR_MAXIMUMITERATION)
356  {
357  SG_INFO("L-BFGS Max Number of Epochs reached\n");
358  }
359  else
360  {
361  SG_INFO("L-BFGS optimization ended with return code %i\n",result);
362  }
363  return true;
364 }
365 
366 float64_t CNeuralNetwork::lbfgs_evaluate(void* userdata,
367  const float64_t* W,
368  float64_t* grad,
369  const int32_t n,
370  const float64_t step)
371 {
372  CNeuralNetwork* network = static_cast<CNeuralNetwork*>(userdata);
373 
374  SGVector<float64_t> grad_vector(grad, network->get_num_parameters(), false);
375 
376  return network->compute_gradients(*network->m_lbfgs_temp_inputs,
377  *network->m_lbfgs_temp_targets, grad_vector);
378 }
379 
380 int CNeuralNetwork::lbfgs_progress(void* instance,
381  const float64_t* x,
382  const float64_t* g,
383  const float64_t fx,
384  const float64_t xnorm,
385  const float64_t gnorm,
386  const float64_t step,
387  int n, int k, int ls)
388 {
389  SG_SINFO("Epoch %i: Error = %f\n",k, fx);
390  return 0;
391 }
392 
394 {
397  return forward_propagate(inputs, j);
398 }
399 
401  SGMatrix<float64_t> inputs, int32_t j)
402 {
403  if (j==-1)
404  j = m_num_layers-1;
405 
406  for (int32_t i=0; i<=j; i++)
407  {
408  CNeuralLayer* layer = get_layer(i);
409 
410  if (layer->is_input())
411  layer->compute_activations(inputs);
412  else
413  layer->compute_activations(get_section(m_params, i), m_layers);
414 
415  layer->dropout_activations();
416  }
417 
418  return get_layer(j)->get_activations();
419 }
420 
422  SGMatrix<float64_t> targets, SGVector<float64_t> gradients)
423 {
424  forward_propagate(inputs);
425 
426  for (int32_t i=0; i<m_num_layers; i++)
428 
429  for (int32_t i=m_num_layers-1; i>=0; i--)
430  {
431  if (i==m_num_layers-1)
432  get_layer(i)->compute_gradients(get_section(m_params,i), targets,
433  m_layers, get_section(gradients,i));
434  else
435  get_layer(i)->compute_gradients(get_section(m_params,i),
436  SGMatrix<float64_t>(), m_layers, get_section(gradients,i));
437  }
438 
439  // L2 regularization
440  if (l2_coefficient != 0.0)
441  {
442  for (int32_t i=0; i<m_total_num_parameters; i++)
443  {
444  if (m_param_regularizable[i])
445  gradients[i] += l2_coefficient*m_params[i];
446  }
447  }
448 
449  // L1 regularization
450  if (l1_coefficient != 0.0)
451  {
452  for (int32_t i=0; i<m_total_num_parameters; i++)
453  {
454  if (m_param_regularizable[i])
455  gradients[i] +=
456  l1_coefficient*CMath::sign<float64_t>(m_params[i]);
457  }
458  }
459 
460  // max-norm regularization
461  if (max_norm != -1.0)
462  {
463  for (int32_t i=0; i<m_num_layers; i++)
464  {
465  SGVector<float64_t> layer_params = get_section(m_params,i);
466  get_layer(i)->enforce_max_norm(layer_params, max_norm);
467  }
468  }
469 
470  return compute_error(targets);
471 }
472 
474 {
475  float64_t error = get_layer(m_num_layers-1)->compute_error(targets);
476 
477  // L2 regularization
478  if (l2_coefficient != 0.0)
479  {
480  for (int32_t i=0; i<m_total_num_parameters; i++)
481  {
482  if (m_param_regularizable[i])
483  error += 0.5*l2_coefficient*m_params[i]*m_params[i];
484  }
485  }
486 
487  // L1 regularization
488  if (l1_coefficient != 0.0)
489  {
490  for (int32_t i=0; i<m_total_num_parameters; i++)
491  {
492  if (m_param_regularizable[i])
493  error += l1_coefficient*CMath::abs(m_params[i]);
494  }
495  }
496 
497  return error;
498 }
499 
501  SGMatrix<float64_t> targets)
502 {
503  forward_propagate(inputs);
504  return compute_error(targets);
505 }
506 
507 
509 {
510  // some random inputs and ouputs
513 
514  for (int32_t i=0; i<x.num_rows; i++)
515  x[i] = CMath::random(0.0,1.0);
516 
517  // the outputs are set up in the form of a probability distribution (in case
518  // that is required by the output layer, i.e softmax)
519  for (int32_t i=0; i<y.num_rows; i++)
520  y[i] = CMath::random(0.0,1.0);
521 
523  for (int32_t i=0; i<y.num_rows; i++)
524  y[i] /= y_sum;
525 
526  set_batch_size(1);
527 
528  // numerically compute gradients
529  SGVector<float64_t> gradients_numerical(m_total_num_parameters);
530 
531  for (int32_t i=0; i<m_total_num_parameters; i++)
532  {
533  float64_t c =
534  CMath::max<float64_t>(CMath::abs(approx_epsilon*m_params[i]),s);
535 
536  m_params[i] += c;
537  float64_t error_plus = compute_error(x,y);
538  m_params[i] -= 2*c;
539  float64_t error_minus = compute_error(x,y);
540  m_params[i] += c;
541 
542  gradients_numerical[i] = (error_plus-error_minus)/(2*c);
543  }
544 
545  // compute gradients using backpropagation
546  SGVector<float64_t> gradients_backprop(m_total_num_parameters);
547  compute_gradients(x, y, gradients_backprop);
548 
549  float64_t sum = 0.0;
550  for (int32_t i=0; i<m_total_num_parameters; i++)
551  {
552  sum += CMath::abs(gradients_backprop[i]-gradients_numerical[i]);
553  }
554 
555  return sum/m_total_num_parameters;
556 }
557 
558 void CNeuralNetwork::set_batch_size(int32_t batch_size)
559 {
560  if (batch_size!=m_batch_size)
561  {
562  m_batch_size = batch_size;
563  for (int32_t i=0; i<m_num_layers; i++)
565  }
566 }
567 
569 {
570  REQUIRE(features != NULL, "Invalid (NULL) feature pointer\n");
571  REQUIRE(features->get_feature_type() == F_DREAL,
572  "Feature type must be F_DREAL\n");
573  REQUIRE(features->get_feature_class() == C_DENSE,
574  "Feature class must be C_DENSE\n");
575 
577  REQUIRE(inputs->get_num_features()==m_num_inputs,
578  "Number of features (%i) must match the network's number of inputs "
579  "(%i)\n", inputs->get_num_features(), get_num_inputs());
580 
581  return inputs->get_feature_matrix();
582 }
583 
585 {
586  REQUIRE(labs != NULL, "Invalid (NULL) labels pointer\n");
587 
589  targets.zero();
590 
591  if (labs->get_label_type() == LT_MULTICLASS)
592  {
593  CMulticlassLabels* labels_mc = (CMulticlassLabels*) labs;
594  REQUIRE(labels_mc->get_num_classes()==get_num_outputs(),
595  "Number of classes (%i) must match the network's number of "
596  "outputs (%i)\n", labels_mc->get_num_classes(), get_num_outputs());
597 
598  for (int32_t i=0; i<labels_mc->get_num_labels(); i++)
599  targets[((int32_t)labels_mc->get_label(i))+ i*get_num_outputs()]
600  = 1.0;
601  }
602  else if (labs->get_label_type() == LT_BINARY)
603  {
604  CBinaryLabels* labels_bin = (CBinaryLabels*) labs;
605  if (get_num_outputs()==1)
606  {
607  for (int32_t i=0; i<labels_bin->get_num_labels(); i++)
608  targets[i] = (labels_bin->get_label(i)==1);
609  }
610  else if (get_num_outputs()==2)
611  {
612  for (int32_t i=0; i<labels_bin->get_num_labels(); i++)
613  {
614  targets[i*2] = (labels_bin->get_label(i)==1);
615  targets[i*2+1] = (labels_bin->get_label(i)==-1);
616  }
617  }
618  }
619  else if (labs->get_label_type() == LT_REGRESSION)
620  {
621  CRegressionLabels* labels_reg = (CRegressionLabels*) labs;
622  for (int32_t i=0; i<labels_reg->get_num_labels(); i++)
623  targets[i] = labels_reg->get_label(i);
624  }
625 
626  return targets;
627 }
628 
630 {
631  // problem type depends on the type of labels given to the network
632  // if no labels are given yet, just return PT_MULTICLASS
633  if (m_labels==NULL)
634  return PT_MULTICLASS;
635 
637  return PT_BINARY;
638  else if (m_labels->get_label_type() == LT_REGRESSION)
639  return PT_REGRESSION;
640  else return PT_MULTICLASS;
641 }
642 
644 {
645  return (lab->get_label_type() == LT_MULTICLASS ||
646  lab->get_label_type() == LT_BINARY ||
647  lab->get_label_type() == LT_REGRESSION);
648 }
649 
651 {
652  if (lab->get_label_type() == LT_BINARY)
653  {
654  REQUIRE(get_num_outputs() <= 2, "Cannot use %s in a neural network "
655  "with more that 2 output neurons\n", lab->get_name());
656  }
657  else if (lab->get_label_type() == LT_REGRESSION)
658  {
659  REQUIRE(get_num_outputs() == 1, "Cannot use %s in a neural network "
660  "with more that 1 output neuron\n", lab->get_name());
661  }
662 
664 }
665 
667 {
668  REQUIRE(i<m_num_layers && i >= 0, "Layer index (%i) out of range\n", i);
669 
670  int32_t n = get_layer(i)->get_num_parameters();
672 
673  memcpy(p->vector, get_section(m_params, i), n*sizeof(float64_t));
674  return p;
675 }
676 
678 {
679  CNeuralLayer* layer = (CNeuralLayer*)m_layers->element(i);
680  // needed because m_layers->element(i) increases the reference count of
681  // layer i
682  SG_UNREF(layer);
683  return layer;
684 }
685 
686 template <class T>
687 SGVector<T> CNeuralNetwork::get_section(SGVector<T> v, int32_t i)
688 {
689  return SGVector<T>(v.vector+m_index_offsets[i],
690  get_layer(i)->get_num_parameters(), false);
691 }
692 
694 {
696 }
697 
699 {
700  SG_REF(m_layers);
701  return m_layers;
702 }
703 
704 void CNeuralNetwork::init()
705 {
707  dropout_hidden = 0.0;
708  dropout_input = 0.0;
709  max_norm = -1.0;
710  l2_coefficient = 0.0;
711  l1_coefficient = 0.0;
712  gd_mini_batch_size = 0;
713  max_num_epochs = 0;
714  gd_learning_rate = 0.1;
716  gd_momentum = 0.9;
717  gd_error_damping_coeff = -1.0;
718  epsilon = 1.0e-5;
719  m_num_inputs = 0;
720  m_num_layers = 0;
721  m_layers = NULL;
723  m_batch_size = 1;
724  m_lbfgs_temp_inputs = NULL;
725  m_lbfgs_temp_targets = NULL;
726  m_is_training = false;
727 
728  SG_ADD((machine_int_t*)&optimization_method, "optimization_method",
729  "Optimization Method", MS_NOT_AVAILABLE);
730  SG_ADD(&gd_mini_batch_size, "gd_mini_batch_size",
731  "Gradient Descent Mini-batch size", MS_NOT_AVAILABLE);
732  SG_ADD(&max_num_epochs, "max_num_epochs",
733  "Max number of Epochs", MS_NOT_AVAILABLE);
734  SG_ADD(&gd_learning_rate, "gd_learning_rate",
735  "Gradient descent learning rate", MS_NOT_AVAILABLE);
736  SG_ADD(&gd_learning_rate_decay, "gd_learning_rate_decay",
737  "Gradient descent learning rate decay", MS_NOT_AVAILABLE);
738  SG_ADD(&gd_momentum, "gd_momentum",
739  "Gradient Descent Momentum", MS_NOT_AVAILABLE);
740  SG_ADD(&gd_error_damping_coeff, "gd_error_damping_coeff",
741  "Gradient Descent Error Damping Coeff", MS_NOT_AVAILABLE);
742  SG_ADD(&epsilon, "epsilon",
743  "Epsilon", MS_NOT_AVAILABLE);
744  SG_ADD(&m_num_inputs, "num_inputs",
745  "Number of Inputs", MS_NOT_AVAILABLE);
746  SG_ADD(&m_num_layers, "num_layers",
747  "Number of Layers", MS_NOT_AVAILABLE);
748  SG_ADD(&m_adj_matrix, "adj_matrix",
749  "Adjacency Matrix", MS_NOT_AVAILABLE);
750  SG_ADD(&l2_coefficient, "l2_coefficient",
751  "L2 regularization coeff", MS_NOT_AVAILABLE);
752  SG_ADD(&l1_coefficient, "l1_coefficient",
753  "L1 regularization coeff", MS_NOT_AVAILABLE);
754  SG_ADD(&dropout_hidden, "dropout_hidden",
755  "Hidden neuron dropout probability", MS_NOT_AVAILABLE);
756  SG_ADD(&dropout_input, "dropout_input",
757  "Input neuron dropout probability", MS_NOT_AVAILABLE);
758  SG_ADD(&max_norm, "max_norm",
759  "Max Norm", MS_NOT_AVAILABLE);
760  SG_ADD(&m_total_num_parameters, "total_num_parameters",
761  "Total number of parameters", MS_NOT_AVAILABLE);
762  SG_ADD(&m_batch_size, "batch_size",
763  "Batch Size", MS_NOT_AVAILABLE);
764  SG_ADD(&m_index_offsets, "index_offsets",
765  "Index Offsets", MS_NOT_AVAILABLE);
766  SG_ADD(&m_params, "params",
767  "Parameters", MS_NOT_AVAILABLE);
768  SG_ADD(&m_param_regularizable, "param_regularizable",
769  "Parameter Regularizable", MS_NOT_AVAILABLE);
770  SG_ADD((CSGObject**)&m_layers, "layers",
771  "DynamicObjectArray of NeuralNetwork objects",
773  SG_ADD(&m_is_training, "is_training",
774  "is_training", MS_NOT_AVAILABLE);
775 }

SHOGUN Machine Learning Toolbox - Documentation