SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
slep_solver.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2012 Sergey Lisitsyn
8  * Copyright (C) 2010-2012 Jun Liu, Jieping Ye
9  */
10 
18 #include <shogun/lib/Signal.h>
19 
20 namespace shogun
21 {
22 
23 double compute_regularizer(double* w, double lambda, double lambda2, int n_vecs, int n_feats,
24  int n_blocks, const slep_options& options)
25 {
26  double regularizer = 0.0;
27  switch (options.mode)
28  {
29  case MULTITASK_GROUP:
30  {
31  for (int i=0; i<n_feats; i++)
32  {
33  double w_row_norm = 0.0;
34  for (int t=0; t<n_blocks; t++)
35  w_row_norm += CMath::pow(w[i+t*n_feats],options.q);
36  regularizer += CMath::pow(w_row_norm,1.0/options.q);
37  }
38  regularizer *= lambda;
39  }
40  break;
41  case MULTITASK_TREE:
42  {
43  for (int i=0; i<n_feats; i++)
44  {
45  double tree_norm = 0.0;
46 
47  if (options.general)
48  tree_norm = general_treeNorm(w+i, n_blocks, n_blocks, options.G, options.ind_t, options.n_nodes);
49  else
50  tree_norm = treeNorm(w+i, n_blocks, n_blocks, options.ind_t, options.n_nodes);
51 
52  regularizer += tree_norm;
53  }
54  regularizer *= lambda;
55  }
56  break;
57  case FEATURE_GROUP:
58  {
59  for (int t=0; t<n_blocks; t++)
60  {
61  double group_qpow_sum = 0.0;
62  int group_ind_start = options.ind[t];
63  int group_ind_end = options.ind[t+1];
64  for (int i=group_ind_start; i<group_ind_end; i++)
65  group_qpow_sum += CMath::pow(w[i], options.q);
66 
67  regularizer += options.gWeight[t]*CMath::pow(group_qpow_sum, 1.0/options.q);
68  }
69  regularizer *= lambda;
70  }
71  break;
72  case FEATURE_TREE:
73  {
74  if (options.general)
75  regularizer = general_treeNorm(w, 1, n_feats, options.G, options.ind_t, options.n_nodes);
76  else
77  regularizer = treeNorm(w, 1, n_feats, options.ind_t, options.n_nodes);
78 
79  regularizer *= lambda;
80  }
81  break;
82  case PLAIN:
83  {
84  for (int i=0; i<n_feats; i++)
85  regularizer += CMath::abs(w[i]);
86 
87  regularizer *= lambda;
88  }
89  break;
90  case FUSED:
91  {
92  double l1 = 0.0;
93  for (int i=0; i<n_feats; i++)
94  l1 += CMath::abs(w[i]);
95  regularizer += lambda*l1;
96  double fuse = 0.0;
97  for (int i=1; i<n_feats; i++)
98  fuse += CMath::abs(w[i]-w[i-1]);
99  regularizer += lambda2*fuse;
100  }
101  break;
102  }
103  return regularizer;
104 };
105 
107  double* ATx,
108  double z,
109  CDotFeatures* features,
110  double* y,
111  int n_vecs, int n_feats,
112  int n_blocks,
113  const slep_options& options)
114 {
115  double lambda_max = 0.0;
116  if (z<0 || z>1)
117  SG_SERROR("z is not in range [0,1]")
118 
119  double q_bar = 0.0;
120  if (options.q==1)
121  q_bar = CMath::ALMOST_INFTY;
122  else if (options.q>1e6)
123  q_bar = 1;
124  else
125  q_bar = options.q/(options.q-1);
126 
127  SG_SINFO("q bar = %f \n",q_bar)
128 
129  switch (options.mode)
130  {
131  case MULTITASK_GROUP:
132  case MULTITASK_TREE:
133  {
134  for (int t=0; t<n_blocks; t++)
135  {
136  SGVector<index_t> task_idx = options.tasks_indices[t];
137  int n_vecs_task = task_idx.vlen;
138 
139  switch (options.loss)
140  {
141  case LOGISTIC:
142  {
143  double b = 0.0;
144  int m1 = 0, m2 = 0;
145  for (int i=0; i<n_vecs_task; i++)
146  {
147  if (y[task_idx[i]]>0)
148  m1++;
149  else
150  m2++;
151  }
152  for (int i=0; i<n_vecs_task; i++)
153  {
154  if (y[task_idx[i]]>0)
155  b = double(m1)/(m1+m2);
156  else
157  b = -double(m2)/(m1+m2);
158 
159  features->add_to_dense_vec(b,task_idx[i],ATx+t*n_feats,n_feats);
160  }
161  }
162  break;
163  case LEAST_SQUARES:
164  {
165  for (int i=0; i<n_vecs_task; i++)
166  features->add_to_dense_vec(y[task_idx[i]],task_idx[i],ATx+t*n_feats,n_feats);
167  }
168  }
169  }
170  }
171  break;
172  case FEATURE_GROUP:
173  case FEATURE_TREE:
174  case PLAIN:
175  case FUSED:
176  {
177  switch (options.loss)
178  {
179  case LOGISTIC:
180  {
181  int m1 = 0, m2 = 0;
182  double b = 0.0;
183  for (int i=0; i<n_vecs; i++)
184  y[i]>0 ? m1++ : m2++;
185 
186  SG_SDEBUG("# pos = %d , # neg = %d\n",m1,m2)
187 
188  for (int i=0; i<n_vecs; i++)
189  {
190  y[i]>0 ? b=double(m2) / CMath::sq(n_vecs) : b=-double(m1) / CMath::sq(n_vecs);
191  features->add_to_dense_vec(b,i,ATx,n_feats);
192  }
193  }
194  break;
195  case LEAST_SQUARES:
196  {
197  for (int i=0; i<n_vecs; i++)
198  features->add_to_dense_vec(y[i],i,ATx,n_feats);
199  }
200  break;
201  }
202  }
203  break;
204  }
205 
206  switch (options.mode)
207  {
208  case MULTITASK_GROUP:
209  {
210  for (int i=0; i<n_feats; i++)
211  {
212  double sum = 0.0;
213  for (int t=0; t<n_blocks; t++)
214  sum += CMath::pow(fabs(ATx[t*n_feats+i]),q_bar);
215  lambda_max =
216  CMath::max(lambda_max, CMath::pow(sum,1.0/q_bar));
217  }
218 
219  if (options.loss==LOGISTIC)
220  lambda_max /= n_vecs;
221  }
222  break;
223  case MULTITASK_TREE:
224  {
225  if (options.general)
226  lambda_max = general_findLambdaMax_mt(ATx, n_feats, n_blocks, options.G, options.ind_t, options.n_nodes);
227  else
228  lambda_max = findLambdaMax_mt(ATx, n_feats, n_blocks, options.ind_t, options.n_nodes);
229 
230  lambda_max /= n_vecs*n_blocks;
231  }
232  break;
233  case FEATURE_GROUP:
234  {
235  for (int t=0; t<n_blocks; t++)
236  {
237  int group_ind_start = options.ind[t];
238  int group_ind_end = options.ind[t+1];
239  double sum = 0.0;
240  for (int i=group_ind_start; i<group_ind_end; i++)
241  sum += CMath::pow(fabs(ATx[i]),q_bar);
242 
243  sum = CMath::pow(sum, 1.0/q_bar);
244  sum /= options.gWeight[t];
245  SG_SINFO("sum = %f\n",sum)
246  if (sum>lambda_max)
247  lambda_max = sum;
248  }
249  }
250  break;
251  case FEATURE_TREE:
252  {
253  if (options.general)
254  lambda_max = general_findLambdaMax(ATx, n_feats, options.G, options.ind_t, options.n_nodes);
255  else
256  lambda_max = findLambdaMax(ATx, n_feats, options.ind_t, options.n_nodes);
257  }
258  break;
259  case PLAIN:
260  case FUSED:
261  {
262  double max = 0.0;
263  for (int i=0; i<n_feats; i++)
264  {
265  if (CMath::abs(ATx[i]) > max)
266  max = CMath::abs(ATx[i]);
267  }
268  lambda_max = max;
269  }
270  break;
271  }
272 
273  SG_SINFO("Computed lambda = %f * %f = %f\n",z,lambda_max,z*lambda_max)
274  return z*lambda_max;
275 }
276 
277 void projection(double* w, double* v, int n_feats, int n_blocks, double lambda, double lambda2,
278  double L, double* z, double* z0, const slep_options& options)
279 {
280  switch (options.mode)
281  {
282  case MULTITASK_GROUP:
283  eppMatrix(w, v, n_feats, n_blocks, lambda/L, options.q);
284  break;
285  case MULTITASK_TREE:
286  if (options.general)
287  general_altra_mt(w, v, n_feats, n_blocks, options.G, options.ind_t, options.n_nodes, lambda/L);
288  else
289  altra_mt(w, v, n_feats, n_blocks, options.ind_t, options.n_nodes, lambda/L);
290  break;
291  case FEATURE_GROUP:
292  eppVector(w, v, options.ind, n_blocks, n_feats, options.gWeight, lambda/L, options.q > 1e6 ? 1e6 : options.q);
293  break;
294  case FEATURE_TREE:
295  if (options.general)
296  general_altra(w, v, n_feats, options.G, options.ind_t, options.n_nodes, lambda/L);
297  else
298  altra(w, v, n_feats, options.ind_t, options.n_nodes, lambda/L);
299  break;
300  case PLAIN:
301  for (int i=0; i<n_feats; i++)
302  w[i] = CMath::sign(v[i])*CMath::max(0.0,CMath::abs(v[i])-lambda/L);
303  break;
304  case FUSED:
305  flsa(w,z,NULL,v,z0,lambda/L,lambda2/L,n_feats,1000,1e-8,1,6);
306  for (int i=0; i<n_feats; i++)
307  z0[i] = z[i];
308  break;
309  }
310 
311 }
312 
313 double search_point_gradient_and_objective(CDotFeatures* features, double* ATx, double* As,
314  double* sc, double* y, int n_vecs,
315  int n_feats, int n_tasks,
316  double* g, double* gc,
317  const slep_options& options)
318 {
319  double fun_s = 0.0;
320  //SG_SDEBUG("As=%f\n", SGVector<float64_t>::dot(As,As,n_vecs))
321  //SG_SDEBUG("sc=%f\n", SGVector<float64_t>::dot(sc,sc,n_tasks))
322  switch (options.mode)
323  {
324  case MULTITASK_GROUP:
325  case MULTITASK_TREE:
326  for (int t=0; t<n_tasks; t++)
327  {
328  SGVector<index_t> task_idx = options.tasks_indices[t];
329  int n_vecs_task = task_idx.vlen;
330  switch (options.loss)
331  {
332  case LOGISTIC:
333  gc[t] = 0.0;
334  for (int i=0; i<n_vecs_task; i++)
335  {
336  double aa = -y[task_idx[i]]*(As[task_idx[i]]+sc[t]);
337  double bb = CMath::max(aa,0.0);
338  fun_s += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb)/ n_vecs;
339  double prob = 1.0/(1.0+CMath::exp(aa));
340  double b = -y[task_idx[i]]*(1.0-prob) / n_vecs;
341  gc[t] += b;
342  features->add_to_dense_vec(b,task_idx[i],g+t*n_feats,n_feats);
343  }
344  break;
345  case LEAST_SQUARES:
346  for (int i=0; i<n_feats*n_tasks; i++)
347  g[i] = -ATx[i];
348  for (int i=0; i<n_vecs_task; i++)
349  features->add_to_dense_vec(As[task_idx[i]],task_idx[i],g+t*n_feats,n_feats);
350  break;
351  }
352  }
353  break;
354  case FEATURE_GROUP:
355  case FEATURE_TREE:
356  case PLAIN:
357  case FUSED:
358  switch (options.loss)
359  {
360  case LOGISTIC:
361  gc[0] = 0.0;
362 
363  for (int i=0; i<n_vecs; i++)
364  {
365  double aa = -y[i]*(As[i]+sc[0]);
366  double bb = CMath::max(aa,0.0);
367  fun_s += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb);
368  /*
369  if (y[i]>0)
370  fun_s += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb)*pos_weight;
371  else
372  fun_s += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb)*neg_weight;
373  */
374  double prob = 1.0/(1.0+CMath::exp(aa));
375  //double b = 0;
376  double b = -y[i]*(1.0-prob)/n_vecs;
377  /*
378  if (y[i]>0)
379  b = -y[i]*(1.0-prob)*pos_weight;
380  else
381  b = -y[i]*(1.0-prob)*neg_weight;
382  */
383  gc[0] += b;
384  features->add_to_dense_vec(b,i,g,n_feats);
385  }
386  fun_s /= n_vecs;
387  break;
388  case LEAST_SQUARES:
389  for (int i=0; i<n_feats; i++)
390  g[i] = -ATx[i];
391  for (int i=0; i<n_vecs; i++)
392  features->add_to_dense_vec(As[i],i,g,n_feats);
393  break;
394  }
395  break;
396  }
397  SG_SDEBUG("G=%f\n", SGVector<float64_t>::dot(g,g,n_feats*n_tasks))
398 
399  return fun_s;
400 }
401 
402 slep_result_t slep_solver(
403  CDotFeatures* features,
404  double* y,
405  double z,
406  const slep_options& options)
407 {
408  int i,t;
409  int n_feats = features->get_dim_feature_space();
410  int n_vecs = features->get_num_vectors();
411  double lambda, beta;
412  double funcp = 0.0, func = 0.0;
413 
414  int n_blocks = 0;
415  int n_tasks = 0;
416 
417  switch (options.mode)
418  {
419  case MULTITASK_GROUP:
420  case MULTITASK_TREE:
421  n_tasks = options.n_tasks;
422  n_blocks = options.n_tasks;
423  break;
424  case FEATURE_GROUP:
425  case FEATURE_TREE:
426  n_tasks = 1;
427  n_blocks = options.n_feature_blocks;
428  break;
429  case PLAIN:
430  case FUSED:
431  n_tasks = 1;
432  n_blocks = 1;
433  break;
434  }
435  SG_SDEBUG("n_tasks = %d, n_blocks = %d\n",n_tasks,n_blocks)
436  SG_SDEBUG("n_nodes = %d\n",options.n_nodes)
437 
438  int iter = 1;
439  bool done = false;
440  bool gradient_break = false;
441 
442  double rsL2 = options.rsL2;
443 
444  double* ATx = SG_CALLOC(double, n_feats*n_tasks);
445  if (options.regularization!=0)
446  {
447  lambda = compute_lambda(ATx, z, features, y, n_vecs, n_feats, n_blocks, options);
448  rsL2*= lambda;
449  }
450  else
451  lambda = z;
452 
453  double lambda2 = 0.0;
454 
455  SGMatrix<double> w(n_feats,n_tasks);
456  w.zero();
457  SGVector<double> c(n_tasks);
458  c.zero();
459 
460  if (options.last_result)
461  {
462  w = options.last_result->w;
463  c = options.last_result->c;
464  }
465 
466  double* s = SG_CALLOC(double, n_feats*n_tasks);
467  double* sc = SG_CALLOC(double, n_tasks);
468  double* g = SG_CALLOC(double, n_feats*n_tasks);
469  double* v = SG_CALLOC(double, n_feats*n_tasks);
470  double* z_flsa = SG_CALLOC(double, n_feats);
471  double* z0_flsa = SG_CALLOC(double, n_feats);
472 
473  double* Aw = SG_CALLOC(double, n_vecs);
474  switch (options.mode)
475  {
476  case MULTITASK_GROUP:
477  case MULTITASK_TREE:
478  {
479  for (t=0; t<n_blocks; t++)
480  {
481  SGVector<index_t> task_idx = options.tasks_indices[t];
482  //task_idx.display_vector("task");
483  int n_vecs_task = task_idx.vlen;
484  for (i=0; i<n_vecs_task; i++)
485  Aw[task_idx[i]] = features->dense_dot(task_idx[i],w.matrix+t*n_feats,n_feats);
486  }
487  }
488  break;
489  case FEATURE_GROUP:
490  case FEATURE_TREE:
491  case PLAIN:
492  case FUSED:
493  {
494  for (i=0; i<n_vecs; i++)
495  Aw[i] = features->dense_dot(i,w.matrix,n_feats);
496  }
497  break;
498  }
499 
500  double* Av = SG_MALLOC(double, n_vecs);
501  double* As = SG_MALLOC(double, n_vecs);
502 
503  double L = 1.0/n_vecs;
504 
505  if (options.mode==FUSED)
506  L += rsL2;
507 
508  double* wp = SG_CALLOC(double, n_feats*n_tasks);
509  for (i=0; i<n_feats*n_tasks; i++)
510  wp[i] = w[i];
511  double* Awp = SG_MALLOC(double, n_vecs);
512  for (i=0; i<n_vecs; i++)
513  Awp[i] = Aw[i];
514  double* wwp = SG_CALLOC(double, n_feats*n_tasks);
515 
516  double* cp = SG_MALLOC(double, n_tasks);
517  for (t=0; t<n_tasks; t++)
518  cp[t] = c[t];
519  double* ccp = SG_CALLOC(double, n_tasks);
520 
521  double* gc = SG_MALLOC(double, n_tasks);
522  double alphap = 0.0, alpha = 1.0;
523  double fun_x = 0.0;
524 
525  while (!done && iter <= options.max_iter && !CSignal::cancel_computations())
526  {
527  beta = (alphap-1.0)/alpha;
528 
529  for (i=0; i<n_feats*n_tasks; i++)
530  s[i] = w[i] + beta*wwp[i];
531  for (t=0; t<n_tasks; t++)
532  sc[t] = c[t] + beta*ccp[t];
533  for (i=0; i<n_vecs; i++)
534  As[i] = Aw[i] + beta*(Aw[i]-Awp[i]);
535  for (i=0; i<n_tasks*n_feats; i++)
536  g[i] = 0.0;
537 
538  double fun_s = search_point_gradient_and_objective(features, ATx, As, sc, y, n_vecs, n_feats, n_tasks, g, gc, options);
539 
540  //SG_SDEBUG("fun_s = %f\n", fun_s)
541 
542  if (options.mode==PLAIN || options.mode==FUSED)
543  fun_s += rsL2/2 * SGVector<float64_t>::dot(w.matrix,w.matrix,n_feats);
544 
545  for (i=0; i<n_feats*n_tasks; i++)
546  wp[i] = w[i];
547  for (t=0; t<n_tasks; t++)
548  cp[t] = c[t];
549  for (i=0; i<n_vecs; i++)
550  Awp[i] = Aw[i];
551 
552  int inner_iter = 1;
553  while (inner_iter <= 1000)
554  {
555  for (i=0; i<n_feats*n_tasks; i++)
556  v[i] = s[i] - g[i]*(1.0/L);
557 
558  for (t=0; t<n_tasks; t++)
559  c[t] = sc[t] - gc[t]*(1.0/L);
560 
561  projection(w.matrix,v,n_feats,n_blocks,lambda,lambda2,L,z_flsa,z0_flsa,options);
562 
563  for (i=0; i<n_feats*n_tasks; i++)
564  v[i] = w[i] - s[i];
565 
566  fun_x = 0.0;
567  switch (options.mode)
568  {
569  case MULTITASK_GROUP:
570  case MULTITASK_TREE:
571  for (t=0; t<n_blocks; t++)
572  {
573  SGVector<index_t> task_idx = options.tasks_indices[t];
574  int n_vecs_task = task_idx.vlen;
575  for (i=0; i<n_vecs_task; i++)
576  {
577  Aw[task_idx[i]] = features->dense_dot(task_idx[i],w.matrix+t*n_feats,n_feats);
578  if (options.loss==LOGISTIC)
579  {
580  double aa = -y[task_idx[i]]*(Aw[task_idx[i]]+c[t]);
581  double bb = CMath::max(aa,0.0);
582  fun_x += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb);
583  }
584  }
585  }
586  break;
587  case FEATURE_GROUP:
588  case FEATURE_TREE:
589  case PLAIN:
590  case FUSED:
591  for (i=0; i<n_vecs; i++)
592  {
593  Aw[i] = features->dense_dot(i, w.matrix, n_feats);
594  if (options.loss==LOGISTIC)
595  {
596  double aa = -y[i]*(Aw[i]+c[0]);
597  double bb = CMath::max(aa,0.0);
598  if (y[i]>0)
599  fun_x += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb);//*pos_weight;
600  else
601  fun_x += (CMath::log(CMath::exp(-bb) + CMath::exp(aa-bb)) + bb);//*neg_weight;
602  }
603  }
604  break;
605  }
606  if (options.loss==LOGISTIC)
607  fun_x /= n_vecs;
608  if (options.mode==PLAIN || options.mode==FUSED)
609  fun_x += rsL2/2 * SGVector<float64_t>::dot(w.matrix,w.matrix,n_feats);
610 
611  double l_sum = 0.0, r_sum = 0.0;
612  switch (options.loss)
613  {
614  case LOGISTIC:
615  r_sum = SGVector<float64_t>::dot(v,v,n_feats*n_tasks);
616  l_sum = fun_x - fun_s - SGVector<float64_t>::dot(v,g,n_feats*n_tasks);
617  for (t=0; t<n_tasks; t++)
618  {
619  r_sum += CMath::sq(c[t] - sc[t]);
620  l_sum -= (c[t] - sc[t])*gc[t];
621  }
622  r_sum /= 2.0;
623  break;
624  case LEAST_SQUARES:
625  r_sum = SGVector<float64_t>::dot(v,v,n_feats*n_tasks);
626  for (i=0; i<n_vecs; i++)
627  l_sum += CMath::sq(Aw[i]-As[i]);
628  break;
629  }
630 
631  if (r_sum <= 1e-20)
632  {
633  gradient_break = true;
634  break;
635  }
636 
637  if (l_sum <= r_sum*L)
638  break;
639  else
640  L = CMath::max(2*L, l_sum/r_sum);
641  inner_iter++;
642  }
643 
644  alphap = alpha;
645  alpha = 0.5*(1+CMath::sqrt(4*alpha*alpha+1));
646  for (i=0; i<n_feats*n_tasks; i++)
647  wwp[i] = w[i] - wp[i];
648  for (t=0; t<n_tasks; t++)
649  ccp[t] = c[t] - cp[t];
650  double regularizer = compute_regularizer(w.matrix, lambda, lambda2, n_vecs, n_feats, n_blocks, options);
651  funcp = func;
652 
653  if (options.loss==LOGISTIC)
654  {
655  func = fun_x + regularizer;
656  }
657  if (options.loss==LEAST_SQUARES)
658  {
659  func = regularizer;
660  for (i=0; i<n_vecs; i++)
661  func += CMath::sq(Aw[i] - y[i]);
662  }
663  SG_SDEBUG("Obj = %f + %f = %f \n",fun_x, regularizer, func)
664 
665  if (gradient_break)
666  {
667  SG_SINFO("Gradient norm is less than 1e-20\n")
668  break;
669  }
670 
671  double norm_wp, norm_wwp;
672  double step;
673  switch (options.termination)
674  {
675  case 0:
676  if (iter>=2)
677  {
678  step = CMath::abs(func-funcp);
679  if (step <= options.tolerance)
680  {
681  SG_SINFO("Objective changes less than tolerance\n")
682  done = true;
683  }
684  }
685  break;
686  case 1:
687  if (iter>=2)
688  {
689  step = CMath::abs(func-funcp);
690  if (step <= step*options.tolerance)
691  {
692  SG_SINFO("Objective changes relatively less than tolerance\n")
693  done = true;
694  }
695  }
696  break;
697  case 2:
698  if (func <= options.tolerance)
699  {
700  SG_SINFO("Objective is less than tolerance\n")
701  done = true;
702  }
703  break;
704  case 3:
705  norm_wwp = CMath::sqrt(SGVector<float64_t>::dot(wwp,wwp,n_feats*n_tasks));
706  if (norm_wwp <= options.tolerance)
707  done = true;
708  break;
709  case 4:
710  norm_wp = CMath::sqrt(SGVector<float64_t>::dot(wp,wp,n_feats*n_tasks));
711  norm_wwp = CMath::sqrt(SGVector<float64_t>::dot(wwp,wwp,n_feats*n_tasks));
712  if (norm_wwp <= options.tolerance*CMath::max(norm_wp,1.0))
713  done = true;
714  break;
715  default:
716  done = true;
717  }
718 
719  iter++;
720  }
721  SG_SINFO("Finished %d iterations, objective = %f\n", iter, func)
722 
723  SG_FREE(ATx);
724  SG_FREE(wp);
725  SG_FREE(wwp);
726  SG_FREE(s);
727  SG_FREE(sc);
728  SG_FREE(cp);
729  SG_FREE(ccp);
730  SG_FREE(g);
731  SG_FREE(v);
732  SG_FREE(Aw);
733  SG_FREE(Awp);
734  SG_FREE(Av);
735  SG_FREE(As);
736  SG_FREE(gc);
737  SG_FREE(z_flsa);
738  SG_FREE(z0_flsa);
739 
740  return slep_result_t(w,c);
741 };
742 };

SHOGUN Machine Learning Toolbox - Documentation