SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
SVMOcas.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2007-2008 Vojtech Franc
8  * Written (W) 2007-2009 Soeren Sonnenburg
9  * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/labels/Labels.h>
14 #include <shogun/lib/Time.h>
15 #include <shogun/base/Parameter.h>
16 #include <shogun/base/Parallel.h>
20 #include <shogun/labels/Labels.h>
22 
23 using namespace shogun;
24 
27 {
28  init();
29 }
30 
31 CSVMOcas::CSVMOcas(E_SVM_TYPE type)
33 {
34  init();
35  method=type;
36 }
37 
39  float64_t C, CDotFeatures* traindat, CLabels* trainlab)
41 {
42  init();
43  C1=C;
44  C2=C;
45 
46  set_features(traindat);
47  set_labels(trainlab);
48 }
49 
50 
52 {
53 }
54 
56 {
57  SG_INFO("C=%f, epsilon=%f, bufsize=%d\n", get_C1(), get_epsilon(), bufsize)
58  SG_DEBUG("use_bias = %i\n", get_bias_enabled())
59 
62  if (data)
63  {
64  if (!data->has_property(FP_DOT))
65  SG_ERROR("Specified features are not of type CDotFeatures\n")
66  set_features((CDotFeatures*) data);
67  }
69 
70  int32_t num_vec=features->get_num_vectors();
71  lab = SGVector<float64_t>(num_vec);
72  for (int32_t i=0; i<num_vec; i++)
73  lab[i] = ((CBinaryLabels*)m_labels)->get_label(i);
74 
76  w.zero();
77 
78  if (num_vec!=lab.vlen || num_vec<=0)
79  SG_ERROR("num_vec=%d num_train_labels=%d\n", num_vec, lab.vlen)
80 
81  SG_FREE(old_w);
82  old_w=SG_CALLOC(float64_t, w.vlen);
83  bias=0;
84  old_bias=0;
85 
86  tmp_a_buf=SG_CALLOC(float64_t, w.vlen);
87  cp_value=SG_CALLOC(float64_t*, bufsize);
88  cp_index=SG_CALLOC(uint32_t*, bufsize);
89  cp_nz_dims=SG_CALLOC(uint32_t, bufsize);
90  cp_bias=SG_CALLOC(float64_t, bufsize);
91 
92  float64_t TolAbs=0;
93  float64_t QPBound=0;
94  int32_t Method=0;
95  if (method == SVM_OCAS)
96  Method = 1;
97  ocas_return_value_T result = svm_ocas_solver( get_C1(), num_vec, get_epsilon(),
98  TolAbs, QPBound, get_max_train_time(), bufsize, Method,
105  this);
106 
107  SG_INFO("Ocas Converged after %d iterations\n"
108  "==================================\n"
109  "timing statistics:\n"
110  "output_time: %f s\n"
111  "sort_time: %f s\n"
112  "add_time: %f s\n"
113  "w_time: %f s\n"
114  "solver_time %f s\n"
115  "ocas_time %f s\n\n", result.nIter, result.output_time, result.sort_time,
116  result.add_time, result.w_time, result.qp_solver_time, result.ocas_time);
117 
118  SG_FREE(tmp_a_buf);
119 
120  primal_objective = result.Q_P;
121 
122  uint32_t num_cut_planes = result.nCutPlanes;
123 
124  SG_DEBUG("num_cut_planes=%d\n", num_cut_planes)
125  for (uint32_t i=0; i<num_cut_planes; i++)
126  {
127  SG_DEBUG("cp_value[%d]=%p\n", i, cp_value)
128  SG_FREE(cp_value[i]);
129  SG_DEBUG("cp_index[%d]=%p\n", i, cp_index)
130  SG_FREE(cp_index[i]);
131  }
132 
133  SG_FREE(cp_value);
134  cp_value=NULL;
135  SG_FREE(cp_index);
136  cp_index=NULL;
137  SG_FREE(cp_nz_dims);
138  cp_nz_dims=NULL;
139  SG_FREE(cp_bias);
140  cp_bias=NULL;
141 
142  SG_FREE(old_w);
143  old_w=NULL;
144 
145  return true;
146 }
147 
148 /*----------------------------------------------------------------------------------
149  sq_norm_W = sparse_update_W( t ) does the following:
150 
151  W = oldW*(1-t) + t*W;
152  sq_norm_W = W'*W;
153 
154  ---------------------------------------------------------------------------------*/
156 {
157  float64_t sq_norm_W = 0;
158  CSVMOcas* o = (CSVMOcas*) ptr;
159  uint32_t nDim = (uint32_t) o->w.vlen;
160  float64_t* W=o->w.vector;
161  float64_t* oldW=o->old_w;
162 
163  for(uint32_t j=0; j <nDim; j++)
164  {
165  W[j] = oldW[j]*(1-t) + t*W[j];
166  sq_norm_W += W[j]*W[j];
167  }
168  o->bias=o->old_bias*(1-t) + t*o->bias;
169  sq_norm_W += CMath::sq(o->bias);
170 
171  return( sq_norm_W );
172 }
173 
174 /*----------------------------------------------------------------------------------
175  sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
176 
177  new_a = sum(data_X(:,find(new_cut ~=0 )),2);
178  new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
179  sparse_A(:,nSel+1) = new_a;
180 
181  ---------------------------------------------------------------------------------*/
183  float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
184  uint32_t nSel, void* ptr)
185 {
186  CSVMOcas* o = (CSVMOcas*) ptr;
187  CDotFeatures* f = o->features;
188  uint32_t nDim=(uint32_t) o->w.vlen;
189  float64_t* y = o->lab.vector;
190 
191  float64_t** c_val = o->cp_value;
192  uint32_t** c_idx = o->cp_index;
193  uint32_t* c_nzd = o->cp_nz_dims;
194  float64_t* c_bias = o->cp_bias;
195 
196  float64_t sq_norm_a;
197  uint32_t i, j, nz_dims;
198 
199  /* temporary vector */
200  float64_t* new_a = o->tmp_a_buf;
201  memset(new_a, 0, sizeof(float64_t)*nDim);
202 
203  for(i=0; i < cut_length; i++)
204  {
205  f->add_to_dense_vec(y[new_cut[i]], new_cut[i], new_a, nDim);
206 
207  if (o->use_bias)
208  c_bias[nSel]+=y[new_cut[i]];
209  }
210 
211  /* compute new_a'*new_a and count number of non-zerou dimensions */
212  nz_dims = 0;
213  sq_norm_a = CMath::sq(c_bias[nSel]);
214  for(j=0; j < nDim; j++ ) {
215  if(new_a[j] != 0) {
216  nz_dims++;
217  sq_norm_a += new_a[j]*new_a[j];
218  }
219  }
220 
221  /* sparsify new_a and insert it to the last column of sparse_A */
222  c_nzd[nSel] = nz_dims;
223  c_idx[nSel]=NULL;
224  c_val[nSel]=NULL;
225 
226  if(nz_dims > 0)
227  {
228  c_idx[nSel]=SG_MALLOC(uint32_t, nz_dims);
229  c_val[nSel]=SG_MALLOC(float64_t, nz_dims);
230 
231  uint32_t idx=0;
232  for(j=0; j < nDim; j++ )
233  {
234  if(new_a[j] != 0)
235  {
236  c_idx[nSel][idx] = j;
237  c_val[nSel][idx++] = new_a[j];
238  }
239  }
240  }
241 
242  new_col_H[nSel] = sq_norm_a;
243 
244  for(i=0; i < nSel; i++)
245  {
246  float64_t tmp = c_bias[nSel]*c_bias[i];
247  for(j=0; j < c_nzd[i]; j++)
248  tmp += new_a[c_idx[i][j]]*c_val[i][j];
249 
250  new_col_H[i] = tmp;
251  }
252  //CMath::display_vector(new_col_H, nSel+1, "new_col_H");
253  //CMath::display_vector((int32_t*) c_idx[nSel], (int32_t) nz_dims, "c_idx");
254  //CMath::display_vector((float64_t*) c_val[nSel], nz_dims, "c_val");
255  return 0;
256 }
257 
258 int CSVMOcas::sort(float64_t* vals, float64_t* data, uint32_t size)
259 {
260  CMath::qsort_index(vals, data, size);
261  return 0;
262 }
263 
264 /*----------------------------------------------------------------------
265  sparse_compute_output( output ) does the follwing:
266 
267  output = data_X'*W;
268  ----------------------------------------------------------------------*/
269 int CSVMOcas::compute_output(float64_t *output, void* ptr)
270 {
271  CSVMOcas* o = (CSVMOcas*) ptr;
272  CDotFeatures* f=o->features;
273  int32_t nData=f->get_num_vectors();
274 
275  float64_t* y = o->lab.vector;
276 
277  f->dense_dot_range(output, 0, nData, y, o->w.vector, o->w.vlen, 0.0);
278 
279  for (int32_t i=0; i<nData; i++)
280  output[i]+=y[i]*o->bias;
281  //CMath::display_vector(o->w, o->w.vlen, "w");
282  //CMath::display_vector(output, nData, "out");
283  return 0;
284 }
285 
286 /*----------------------------------------------------------------------
287  sq_norm_W = compute_W( alpha, nSel ) does the following:
288 
289  oldW = W;
290  W = sparse_A(:,1:nSel)'*alpha;
291  sq_norm_W = W'*W;
292  dp_WoldW = W'*oldW';
293 
294  ----------------------------------------------------------------------*/
296  float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel,
297  void* ptr )
298 {
299  CSVMOcas* o = (CSVMOcas*) ptr;
300  uint32_t nDim= (uint32_t) o->w.vlen;
301  CMath::swap(o->w.vector, o->old_w);
302  float64_t* W=o->w.vector;
303  float64_t* oldW=o->old_w;
304  memset(W, 0, sizeof(float64_t)*nDim);
306  float64_t bias=0;
307 
308  float64_t** c_val = o->cp_value;
309  uint32_t** c_idx = o->cp_index;
310  uint32_t* c_nzd = o->cp_nz_dims;
311  float64_t* c_bias = o->cp_bias;
312 
313  for(uint32_t i=0; i<nSel; i++)
314  {
315  uint32_t nz_dims = c_nzd[i];
316 
317  if(nz_dims > 0 && alpha[i] > 0)
318  {
319  for(uint32_t j=0; j < nz_dims; j++)
320  W[c_idx[i][j]] += alpha[i]*c_val[i][j];
321  }
322  bias += c_bias[i]*alpha[i];
323  }
324 
325  *sq_norm_W = SGVector<float64_t>::dot(W,W, nDim) + CMath::sq(bias);
326  *dp_WoldW = SGVector<float64_t>::dot(W,oldW, nDim) + bias*old_bias;
327  //SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW)
328 
329  o->bias = bias;
330  o->old_bias = old_bias;
331 }
332 
333 void CSVMOcas::init()
334 {
335  use_bias=true;
336  bufsize=3000;
337  C1=1;
338  C2=1;
339 
340  epsilon=1e-3;
341  method=SVM_OCAS;
342  old_w=NULL;
343  tmp_a_buf=NULL;
344  cp_value=NULL;
345  cp_index=NULL;
346  cp_nz_dims=NULL;
347  cp_bias=NULL;
348 
349  primal_objective = 0.0;
350 
351  m_parameters->add(&C1, "C1", "Cost constant 1.");
352  m_parameters->add(&C2, "C2", "Cost constant 2.");
353  m_parameters->add(&use_bias, "use_bias",
354  "Indicates if bias is used.");
355  m_parameters->add(&epsilon, "epsilon", "Convergence precision.");
356  m_parameters->add(&bufsize, "bufsize", "Maximum number of cutting planes.");
357  m_parameters->add((machine_int_t*) &method, "method",
358  "SVMOcas solver type.");
359 }
360 
362 {
363  return primal_objective;
364 }
365 

SHOGUN Machine Learning Toolbox - Documentation