SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
NeuralNetwork.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Shogun Toolbox Foundation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7 
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived from this
17  * software without specific prior written permission.
18 
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Written (W) 2014 Khaled Nasr
32  */
33 
40 
41 using namespace shogun;
42 
44 : CMachine()
45 {
46  init();
47 }
48 
50 {
51  init();
52  set_layers(layers);
53 }
54 
56 {
57  REQUIRE(layers, "Layers should not be NULL")
58 
60  SG_REF(layers);
61  m_layers = layers;
62 
66 
67  m_num_inputs = 0;
68  for (int32_t i=0; i<m_num_layers; i++)
69  {
70  if (get_layer(i)->is_input())
72  }
73 }
74 
75 void CNeuralNetwork::connect(int32_t i, int32_t j)
76 {
77  REQUIRE("i<j", "i(%i) must be less that j(%i)\n", i, j);
78  m_adj_matrix(i,j) = true;
79 }
80 
82 {
84  for (int32_t i=1; i<m_num_layers; i++)
85  m_adj_matrix(i-1, i) = true;
86 }
87 
88 void CNeuralNetwork::disconnect(int32_t i, int32_t j)
89 {
90  m_adj_matrix(i,j) = false;
91 }
92 
94 {
96 }
97 
99 {
100  for (int32_t j=0; j<m_num_layers; j++)
101  {
102  if (!get_layer(j)->is_input())
103  {
104  int32_t num_inputs = 0;
105  for (int32_t i=0; i<m_num_layers; i++)
106  num_inputs += m_adj_matrix(i,j);
107 
108  SGVector<int32_t> input_indices(num_inputs);
109 
110  int32_t k = 0;
111  for (int i=0; i<m_num_layers; i++)
112  {
113  if (m_adj_matrix(i,j))
114  {
115  input_indices[k] = i;
116  k++;
117  }
118  }
119 
120  get_layer(j)->initialize(m_layers, input_indices);
121  }
122  }
123 
125 
127  m_index_offsets[0] = 0;
128  for (int32_t i=1; i<m_num_layers; i++)
129  {
131  m_total_num_parameters += get_layer(i)->get_num_parameters();
132  }
133 
136 
137  m_params.zero();
138  m_param_regularizable.set_const(true);
139 
140  for (int32_t i=0; i<m_num_layers; i++)
141  {
142  SGVector<float64_t> layer_param = get_section(m_params, i);
143  SGVector<bool> layer_param_regularizable =
144  get_section(m_param_regularizable, i);
145 
146  get_layer(i)->initialize_parameters(layer_param,
147  layer_param_regularizable, sigma);
148 
150  }
151 }
152 
154 {
156 }
157 
159 {
160  SGMatrix<float64_t> output_activations = forward_propagate(data);
161  SGVector<float64_t> labels_vec(m_batch_size);
162 
163  for (int32_t i=0; i<m_batch_size; i++)
164  {
165  if (get_num_outputs()==1)
166  {
167  if (output_activations[i]>0.5) labels_vec[i] = 1;
168  else labels_vec[i] = -1;
169  }
170  else if (get_num_outputs()==2)
171  {
172  if (output_activations[2*i]>output_activations[2*i+1])
173  labels_vec[i] = 1;
174  else labels_vec[i] = -1;
175  }
176  }
177 
178  return new CBinaryLabels(labels_vec);
179 }
180 
182 {
183  SGMatrix<float64_t> output_activations = forward_propagate(data);
184  SGVector<float64_t> labels_vec(m_batch_size);
185 
186  for (int32_t i=0; i<m_batch_size; i++)
187  labels_vec[i] = output_activations[i];
188 
189  return new CRegressionLabels(labels_vec);
190 }
191 
192 
194 {
195  SGMatrix<float64_t> output_activations = forward_propagate(data);
196  SGVector<float64_t> labels_vec(m_batch_size);
197 
198  for (int32_t i=0; i<m_batch_size; i++)
199  {
200  labels_vec[i] = SGVector<float64_t>::arg_max(
201  output_activations.matrix+i*get_num_outputs(), 1, get_num_outputs());
202  }
203 
204  return new CMulticlassLabels(labels_vec);
205 }
206 
209 {
210  SGMatrix<float64_t> output_activations = forward_propagate(data);
211  return new CDenseFeatures<float64_t>(output_activations);
212 }
213 
215 {
217  "Maximum number of epochs (%i) must be >= 0\n", max_num_epochs);
218 
221 
222  for (int32_t i=0; i<m_num_layers-1; i++)
223  {
224  get_layer(i)->dropout_prop =
226  }
227  get_layer(m_num_layers-1)->dropout_prop = 0.0;
228 
229  m_is_training = true;
230  for (int32_t i=0; i<m_num_layers; i++)
231  get_layer(i)->is_training = true;
232 
233  bool result = false;
235  result = train_gradient_descent(inputs, targets);
237  result = train_lbfgs(inputs, targets);
238 
239  for (int32_t i=0; i<m_num_layers; i++)
240  get_layer(i)->is_training = false;
241  m_is_training = false;
242 
243  return result;
244 }
245 
247  SGMatrix<float64_t> targets)
248 {
250  "Gradient descent learning rate (%f) must be > 0\n", gd_learning_rate);
251  REQUIRE(gd_momentum>=0,
252  "Gradient descent momentum (%f) must be > 0\n", gd_momentum);
253 
254  int32_t training_set_size = inputs.num_cols;
255  if (gd_mini_batch_size==0) gd_mini_batch_size = training_set_size;
257 
258  int32_t n_param = get_num_parameters();
259  SGVector<float64_t> gradients(n_param);
260 
261  // needed for momentum
262  SGVector<float64_t> param_updates(n_param);
263  param_updates.zero();
264 
265  float64_t error_last_time = -1.0, error = -1.0;
266 
268  if (c==-1.0)
269  c = 0.99*(float64_t)gd_mini_batch_size/training_set_size + 1e-2;
270 
271  bool continue_training = true;
272  float64_t alpha = gd_learning_rate;
273 
274  for (int32_t i=0; continue_training; i++)
275  {
276  if (max_num_epochs!=0)
277  if (i>=max_num_epochs) break;
278 
279  for (int32_t j=0; j < training_set_size; j += gd_mini_batch_size)
280  {
281  alpha = gd_learning_rate_decay*alpha;
282 
283  if (j+gd_mini_batch_size>training_set_size)
284  j = training_set_size-gd_mini_batch_size;
285 
286  SGMatrix<float64_t> targets_batch(targets.matrix+j*get_num_outputs(),
287  get_num_outputs(), gd_mini_batch_size, false);
288 
289  SGMatrix<float64_t> inputs_batch(inputs.matrix+j*m_num_inputs,
290  m_num_inputs, gd_mini_batch_size, false);
291 
292  for (int32_t k=0; k<n_param; k++)
293  m_params[k] += gd_momentum*param_updates[k];
294 
295  float64_t e = compute_gradients(inputs_batch, targets_batch, gradients);
296 
297  // filter the errors
298  if (error==-1.0)
299  error = e;
300  else
301  error = (1.0-c) * error + c*e;
302 
303  for (int32_t k=0; k<n_param; k++)
304  {
305  param_updates[k] = gd_momentum*param_updates[k]
306  -alpha*gradients[k];
307 
308  m_params[k] -= alpha*gradients[k];
309  }
310 
311  if (error_last_time!=-1.0)
312  {
313  float64_t error_change = (error_last_time-error)/error;
314  if (error_change< epsilon && error_change>=0)
315  {
316  SG_INFO("Gradient Descent Optimization Converged\n");
317  continue_training = false;
318  break;
319  }
320 
321  SG_INFO("Epoch %i: Error = %f\n",i, error);
322  }
323  error_last_time = error;
324  }
325  }
326 
327  return true;
328 }
329 
331  const SGMatrix<float64_t> targets)
332 {
333  int32_t training_set_size = inputs.num_cols;
334  set_batch_size(training_set_size);
335 
336  lbfgs_parameter_t lbfgs_param;
337  lbfgs_parameter_init(&lbfgs_param);
338  lbfgs_param.max_iterations = max_num_epochs;
339  lbfgs_param.epsilon = 0;
340  lbfgs_param.past = 1;
341  lbfgs_param.delta = epsilon;
342 
343  m_lbfgs_temp_inputs = &inputs;
344  m_lbfgs_temp_targets = &targets;
345 
346  int32_t result = lbfgs(m_total_num_parameters,
347  m_params,
348  NULL,
349  &CNeuralNetwork::lbfgs_evaluate,
350  &CNeuralNetwork::lbfgs_progress,
351  this,
352  &lbfgs_param);
353 
354  m_lbfgs_temp_inputs = NULL;
355  m_lbfgs_temp_targets = NULL;
356 
357  if (result==LBFGS_SUCCESS || 1)
358  {
359  SG_INFO("L-BFGS Optimization Converged\n");
360  }
361  else if (result==LBFGSERR_MAXIMUMITERATION)
362  {
363  SG_INFO("L-BFGS Max Number of Epochs reached\n");
364  }
365  else
366  {
367  SG_INFO("L-BFGS optimization ended with return code %i\n",result);
368  }
369  return true;
370 }
371 
372 float64_t CNeuralNetwork::lbfgs_evaluate(void* userdata,
373  const float64_t* W,
374  float64_t* grad,
375  const int32_t n,
376  const float64_t step)
377 {
378  CNeuralNetwork* network = static_cast<CNeuralNetwork*>(userdata);
379 
380  SGVector<float64_t> grad_vector(grad, network->get_num_parameters(), false);
381 
382  return network->compute_gradients(*network->m_lbfgs_temp_inputs,
383  *network->m_lbfgs_temp_targets, grad_vector);
384 }
385 
386 int CNeuralNetwork::lbfgs_progress(void* instance,
387  const float64_t* x,
388  const float64_t* g,
389  const float64_t fx,
390  const float64_t xnorm,
391  const float64_t gnorm,
392  const float64_t step,
393  int n, int k, int ls)
394 {
395  SG_SINFO("Epoch %i: Error = %f\n",k, fx);
396  return 0;
397 }
398 
400 {
403  return forward_propagate(inputs, j);
404 }
405 
407  SGMatrix<float64_t> inputs, int32_t j)
408 {
409  if (j==-1)
410  j = m_num_layers-1;
411 
412  for (int32_t i=0; i<=j; i++)
413  {
414  CNeuralLayer* layer = get_layer(i);
415 
416  if (layer->is_input())
417  layer->compute_activations(inputs);
418  else
419  layer->compute_activations(get_section(m_params, i), m_layers);
420 
421  layer->dropout_activations();
422  }
423 
424  return get_layer(j)->get_activations();
425 }
426 
428  SGMatrix<float64_t> targets, SGVector<float64_t> gradients)
429 {
430  forward_propagate(inputs);
431 
432  for (int32_t i=0; i<m_num_layers; i++)
434 
435  for (int32_t i=m_num_layers-1; i>=0; i--)
436  {
437  if (i==m_num_layers-1)
438  get_layer(i)->compute_gradients(get_section(m_params,i), targets,
439  m_layers, get_section(gradients,i));
440  else
441  get_layer(i)->compute_gradients(get_section(m_params,i),
442  SGMatrix<float64_t>(), m_layers, get_section(gradients,i));
443  }
444 
445  // L2 regularization
446  if (l2_coefficient != 0.0)
447  {
448  for (int32_t i=0; i<m_total_num_parameters; i++)
449  {
450  if (m_param_regularizable[i])
451  gradients[i] += l2_coefficient*m_params[i];
452  }
453  }
454 
455  // L1 regularization
456  if (l1_coefficient != 0.0)
457  {
458  for (int32_t i=0; i<m_total_num_parameters; i++)
459  {
460  if (m_param_regularizable[i])
461  gradients[i] +=
462  l1_coefficient*CMath::sign<float64_t>(m_params[i]);
463  }
464  }
465 
466  // max-norm regularization
467  if (max_norm != -1.0)
468  {
469  for (int32_t i=0; i<m_num_layers; i++)
470  {
471  SGVector<float64_t> layer_params = get_section(m_params,i);
472  get_layer(i)->enforce_max_norm(layer_params, max_norm);
473  }
474  }
475 
476  return compute_error(targets);
477 }
478 
480 {
481  float64_t error = get_layer(m_num_layers-1)->compute_error(targets);
482 
483  // L2 regularization
484  if (l2_coefficient != 0.0)
485  {
486  for (int32_t i=0; i<m_total_num_parameters; i++)
487  {
488  if (m_param_regularizable[i])
489  error += 0.5*l2_coefficient*m_params[i]*m_params[i];
490  }
491  }
492 
493  // L1 regularization
494  if (l1_coefficient != 0.0)
495  {
496  for (int32_t i=0; i<m_total_num_parameters; i++)
497  {
498  if (m_param_regularizable[i])
499  error += l1_coefficient*CMath::abs(m_params[i]);
500  }
501  }
502 
503  return error;
504 }
505 
507  SGMatrix<float64_t> targets)
508 {
509  forward_propagate(inputs);
510  return compute_error(targets);
511 }
512 
513 
515 {
516  // some random inputs and ouputs
519 
520  for (int32_t i=0; i<x.num_rows; i++)
521  x[i] = CMath::random(0.0,1.0);
522 
523  // the outputs are set up in the form of a probability distribution (in case
524  // that is required by the output layer, i.e softmax)
525  for (int32_t i=0; i<y.num_rows; i++)
526  y[i] = CMath::random(0.0,1.0);
527 
529  for (int32_t i=0; i<y.num_rows; i++)
530  y[i] /= y_sum;
531 
532  set_batch_size(1);
533 
534  // numerically compute gradients
535  SGVector<float64_t> gradients_numerical(m_total_num_parameters);
536 
537  for (int32_t i=0; i<m_total_num_parameters; i++)
538  {
539  float64_t c =
540  CMath::max<float64_t>(CMath::abs(approx_epsilon*m_params[i]),s);
541 
542  m_params[i] += c;
543  float64_t error_plus = compute_error(x,y);
544  m_params[i] -= 2*c;
545  float64_t error_minus = compute_error(x,y);
546  m_params[i] += c;
547 
548  gradients_numerical[i] = (error_plus-error_minus)/(2*c);
549  }
550 
551  // compute gradients using backpropagation
552  SGVector<float64_t> gradients_backprop(m_total_num_parameters);
553  compute_gradients(x, y, gradients_backprop);
554 
555  float64_t sum = 0.0;
556  for (int32_t i=0; i<m_total_num_parameters; i++)
557  {
558  sum += CMath::abs(gradients_backprop[i]-gradients_numerical[i]);
559  }
560 
561  return sum/m_total_num_parameters;
562 }
563 
564 void CNeuralNetwork::set_batch_size(int32_t batch_size)
565 {
566  if (batch_size!=m_batch_size)
567  {
568  m_batch_size = batch_size;
569  for (int32_t i=0; i<m_num_layers; i++)
571  }
572 }
573 
575 {
576  REQUIRE(features != NULL, "Invalid (NULL) feature pointer\n");
577  REQUIRE(features->get_feature_type() == F_DREAL,
578  "Feature type must be F_DREAL\n");
579  REQUIRE(features->get_feature_class() == C_DENSE,
580  "Feature class must be C_DENSE\n");
581 
583  REQUIRE(inputs->get_num_features()==m_num_inputs,
584  "Number of features (%i) must match the network's number of inputs "
585  "(%i)\n", inputs->get_num_features(), get_num_inputs());
586 
587  return inputs->get_feature_matrix();
588 }
589 
591 {
592  REQUIRE(labs != NULL, "Invalid (NULL) labels pointer\n");
593 
595  targets.zero();
596 
597  if (labs->get_label_type() == LT_MULTICLASS)
598  {
599  CMulticlassLabels* labels_mc = (CMulticlassLabels*) labs;
600  REQUIRE(labels_mc->get_num_classes()==get_num_outputs(),
601  "Number of classes (%i) must match the network's number of "
602  "outputs (%i)\n", labels_mc->get_num_classes(), get_num_outputs());
603 
604  for (int32_t i=0; i<labels_mc->get_num_labels(); i++)
605  targets[((int32_t)labels_mc->get_label(i))+ i*get_num_outputs()]
606  = 1.0;
607  }
608  else if (labs->get_label_type() == LT_BINARY)
609  {
610  CBinaryLabels* labels_bin = (CBinaryLabels*) labs;
611  if (get_num_outputs()==1)
612  {
613  for (int32_t i=0; i<labels_bin->get_num_labels(); i++)
614  targets[i] = (labels_bin->get_label(i)==1);
615  }
616  else if (get_num_outputs()==2)
617  {
618  for (int32_t i=0; i<labels_bin->get_num_labels(); i++)
619  {
620  targets[i*2] = (labels_bin->get_label(i)==1);
621  targets[i*2+1] = (labels_bin->get_label(i)==-1);
622  }
623  }
624  }
625  else if (labs->get_label_type() == LT_REGRESSION)
626  {
627  CRegressionLabels* labels_reg = (CRegressionLabels*) labs;
628  for (int32_t i=0; i<labels_reg->get_num_labels(); i++)
629  targets[i] = labels_reg->get_label(i);
630  }
631 
632  return targets;
633 }
634 
636 {
637  // problem type depends on the type of labels given to the network
638  // if no labels are given yet, just return PT_MULTICLASS
639  if (m_labels==NULL)
640  return PT_MULTICLASS;
641 
643  return PT_BINARY;
644  else if (m_labels->get_label_type() == LT_REGRESSION)
645  return PT_REGRESSION;
646  else return PT_MULTICLASS;
647 }
648 
650 {
651  return (lab->get_label_type() == LT_MULTICLASS ||
652  lab->get_label_type() == LT_BINARY ||
653  lab->get_label_type() == LT_REGRESSION);
654 }
655 
657 {
658  if (lab->get_label_type() == LT_BINARY)
659  {
660  REQUIRE(get_num_outputs() <= 2, "Cannot use %s in a neural network "
661  "with more that 2 output neurons\n", lab->get_name());
662  }
663  else if (lab->get_label_type() == LT_REGRESSION)
664  {
665  REQUIRE(get_num_outputs() == 1, "Cannot use %s in a neural network "
666  "with more that 1 output neuron\n", lab->get_name());
667  }
668 
670 }
671 
673 {
674  REQUIRE(i<m_num_layers && i >= 0, "Layer index (%i) out of range\n", i);
675 
676  int32_t n = get_layer(i)->get_num_parameters();
678 
679  memcpy(p->vector, get_section(m_params, i), n*sizeof(float64_t));
680  return p;
681 }
682 
684 {
685  CNeuralLayer* layer = (CNeuralLayer*)m_layers->element(i);
686  // needed because m_layers->element(i) increases the reference count of
687  // layer i
688  SG_UNREF(layer);
689  return layer;
690 }
691 
692 template <class T>
693 SGVector<T> CNeuralNetwork::get_section(SGVector<T> v, int32_t i)
694 {
695  return SGVector<T>(v.vector+m_index_offsets[i],
696  get_layer(i)->get_num_parameters(), false);
697 }
698 
700 {
702 }
703 
705 {
706  SG_REF(m_layers);
707  return m_layers;
708 }
709 
710 void CNeuralNetwork::init()
711 {
713  dropout_hidden = 0.0;
714  dropout_input = 0.0;
715  max_norm = -1.0;
716  l2_coefficient = 0.0;
717  l1_coefficient = 0.0;
718  gd_mini_batch_size = 0;
719  max_num_epochs = 0;
720  gd_learning_rate = 0.1;
722  gd_momentum = 0.9;
723  gd_error_damping_coeff = -1.0;
724  epsilon = 1.0e-5;
725  m_num_inputs = 0;
726  m_num_layers = 0;
727  m_layers = NULL;
729  m_batch_size = 1;
730  m_lbfgs_temp_inputs = NULL;
731  m_lbfgs_temp_targets = NULL;
732  m_is_training = false;
733 
734  SG_ADD((machine_int_t*)&optimization_method, "optimization_method",
735  "Optimization Method", MS_NOT_AVAILABLE);
736  SG_ADD(&gd_mini_batch_size, "gd_mini_batch_size",
737  "Gradient Descent Mini-batch size", MS_NOT_AVAILABLE);
738  SG_ADD(&max_num_epochs, "max_num_epochs",
739  "Max number of Epochs", MS_NOT_AVAILABLE);
740  SG_ADD(&gd_learning_rate, "gd_learning_rate",
741  "Gradient descent learning rate", MS_NOT_AVAILABLE);
742  SG_ADD(&gd_learning_rate_decay, "gd_learning_rate_decay",
743  "Gradient descent learning rate decay", MS_NOT_AVAILABLE);
744  SG_ADD(&gd_momentum, "gd_momentum",
745  "Gradient Descent Momentum", MS_NOT_AVAILABLE);
746  SG_ADD(&gd_error_damping_coeff, "gd_error_damping_coeff",
747  "Gradient Descent Error Damping Coeff", MS_NOT_AVAILABLE);
748  SG_ADD(&epsilon, "epsilon",
749  "Epsilon", MS_NOT_AVAILABLE);
750  SG_ADD(&m_num_inputs, "num_inputs",
751  "Number of Inputs", MS_NOT_AVAILABLE);
752  SG_ADD(&m_num_layers, "num_layers",
753  "Number of Layers", MS_NOT_AVAILABLE);
754  SG_ADD(&m_adj_matrix, "adj_matrix",
755  "Adjacency Matrix", MS_NOT_AVAILABLE);
756  SG_ADD(&l2_coefficient, "l2_coefficient",
757  "L2 regularization coeff", MS_NOT_AVAILABLE);
758  SG_ADD(&l1_coefficient, "l1_coefficient",
759  "L1 regularization coeff", MS_NOT_AVAILABLE);
760  SG_ADD(&dropout_hidden, "dropout_hidden",
761  "Hidden neuron dropout probability", MS_NOT_AVAILABLE);
762  SG_ADD(&dropout_input, "dropout_input",
763  "Input neuron dropout probability", MS_NOT_AVAILABLE);
764  SG_ADD(&max_norm, "max_norm",
765  "Max Norm", MS_NOT_AVAILABLE);
766  SG_ADD(&m_total_num_parameters, "total_num_parameters",
767  "Total number of parameters", MS_NOT_AVAILABLE);
768  SG_ADD(&m_index_offsets, "index_offsets",
769  "Index Offsets", MS_NOT_AVAILABLE);
770  SG_ADD(&m_params, "params",
771  "Parameters", MS_NOT_AVAILABLE);
772  SG_ADD(&m_param_regularizable, "param_regularizable",
773  "Parameter Regularizable", MS_NOT_AVAILABLE);
774  SG_ADD((CSGObject**)&m_layers, "layers",
775  "DynamicObjectArray of NeuralNetwork objects",
777  SG_ADD(&m_is_training, "is_training",
778  "is_training", MS_NOT_AVAILABLE);
779 }

SHOGUN Machine Learning Toolbox - Documentation