SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LinearARDKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2015 Wu Lin
8  * Written (W) 2012 Jacob Walker
9  *
10  * Adapted from WeightedDegreeRBFKernel.cpp
11  */
12 
14 
15 #ifdef HAVE_LINALG_LIB
17 #endif
18 
19 using namespace shogun;
20 
22 {
23  initialize();
24 }
25 
27 {
29 }
30 
31 void CLinearARDKernel::initialize()
32 {
35  m_weights.set_const(1.0);
36  SG_ADD(&m_weights, "weights", "Feature weights", MS_AVAILABLE,
38  SG_ADD((int *)(&m_ARD_type), "type", "ARD kernel type", MS_NOT_AVAILABLE);
39 }
40 
41 #ifdef HAVE_LINALG_LIB
43 {
44  initialize();
45 }
46 
48  CDotFeatures* r, int32_t size) : CDotKernel(size)
49 {
50  initialize();
51  init(l,r);
52 }
53 
54 bool CLinearARDKernel::init(CFeatures* l, CFeatures* r)
55 {
56  cleanup();
57  CDotKernel::init(l, r);
58  int32_t dim=((CDotFeatures*) l)->get_dim_feature_space();
59  if (m_ARD_type==KT_FULL)
60  {
61  REQUIRE(m_weights.num_cols==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
62  dim, m_weights.num_cols);
63  }
64  else if (m_ARD_type==KT_DIAG)
65  {
66  REQUIRE(m_weights.num_rows==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
67  dim, m_weights.num_rows);
68  }
69  return init_normalizer();
70 }
71 
72 
73 SGMatrix<float64_t> CLinearARDKernel::compute_right_product(SGVector<float64_t>right_vec,
74  float64_t & scalar_weight)
75 {
76  SGMatrix<float64_t> right;
77 
78  if (m_ARD_type==KT_SCALAR)
79  {
80  right=SGMatrix<float64_t>(right_vec.vector,right_vec.vlen,1,false);
81  scalar_weight*=m_weights[0];
82  }
83  else
84  {
86 
87  SGMatrix<float64_t> rtmp(right_vec.vector,right_vec.vlen,1,false);
88 
89  if(m_ARD_type==KT_DIAG)
90  linalg::elementwise_product(m_weights, rtmp, right);
91  else if(m_ARD_type==KT_FULL)
92  linalg::matrix_product(m_weights, rtmp, right);
93  else
94  SG_ERROR("Unsupported ARD type\n");
95  }
96  return right;
97 }
98 
99 float64_t CLinearARDKernel::compute_helper(SGVector<float64_t> avec, SGVector<float64_t>bvec)
100 {
101  SGMatrix<float64_t> left;
102 
103  float64_t scalar_weight=1.0;
104  if (m_ARD_type==KT_SCALAR)
105  {
106  left=SGMatrix<float64_t>(avec.vector,1,avec.vlen,false);
107  scalar_weight=m_weights[0];
108  }
109  else
110  {
112 
113  SGMatrix<float64_t> ltmp(avec.vector,avec.vlen,1,false);
114 
115  SGMatrix<float64_t> left_transpose(left.matrix,left.num_cols,1,false);
116  if(m_ARD_type==KT_DIAG)
117  linalg::elementwise_product(m_weights, ltmp, left_transpose);
118  else if(m_ARD_type==KT_FULL)
119  linalg::matrix_product(m_weights, ltmp, left_transpose);
120  else
121  SG_ERROR("Unsupported ARD type\n");
122  }
123 
124  SGMatrix<float64_t> res(1,1);
125  SGMatrix<float64_t> right=compute_right_product(bvec, scalar_weight);
126  linalg::matrix_product(left, right, res);
127  return res[0]*scalar_weight;
128 }
129 
130 float64_t CLinearARDKernel::compute(int32_t idx_a, int32_t idx_b)
131 {
132  REQUIRE(lhs && rhs, "Features not set!\n")
133 
134  SGVector<float64_t> avec=((CDotFeatures *)lhs)->get_computed_dot_feature_vector(idx_a);
135  SGVector<float64_t> bvec=((CDotFeatures *)rhs)->get_computed_dot_feature_vector(idx_b);
136 
137  return compute_helper(avec, bvec);
138 }
139 
140 float64_t CLinearARDKernel::compute_gradient_helper(SGVector<float64_t> avec,
141  SGVector<float64_t> bvec, float64_t scale, index_t index)
142 {
143  float64_t result;
144 
145  if(m_ARD_type==KT_DIAG)
146  {
147  result=2.0*avec[index]*bvec[index]*m_weights[index];
148  }
149  else
150  {
151  SGMatrix<float64_t> left(avec.vector,1,avec.vlen,false);
152  SGMatrix<float64_t> right(bvec.vector,bvec.vlen,1,false);
153  SGMatrix<float64_t> res(1,1);
154 
155  if (m_ARD_type==KT_SCALAR)
156  {
157  linalg::matrix_product(left, right, res);
158  result=2.0*res[0]*m_weights[0];
159  }
160  else if(m_ARD_type==KT_FULL)
161  {
162  int32_t row_index=index%m_weights.num_rows;
163  int32_t col_index=index/m_weights.num_rows;
164  //index is a linearized index of m_weights (column-major)
165  //m_weights is a d-by-p matrix, where p is #dimension of features
166  SGVector<float64_t> row_vec=m_weights.get_row_vector(row_index);
167  SGMatrix<float64_t> row_vec_r(row_vec.vector,row_vec.vlen,1,false);
168 
169  linalg::matrix_product(left, row_vec_r, res);
170  result=res[0]*bvec[col_index];
171 
172  SGMatrix<float64_t> row_vec_l(row_vec.vector,1,row_vec.vlen,false);
173  linalg::matrix_product(row_vec_l, right, res);
174  result+=res[0]*avec[col_index];
175 
176  }
177  else
178  {
179  SG_ERROR("Unsupported ARD type\n");
180  }
181 
182  }
183  return result*scale;
184 }
185 
186 
188  const TParameter* param, index_t index)
189 {
190  REQUIRE(lhs && rhs, "Features not set!\n");
191 
192  int32_t row_index, col_index;
193  if (m_ARD_type!=KT_SCALAR)
194  {
195  REQUIRE(index>=0, "Index (%d) must be non-negative\n",index);
196  if (m_ARD_type==KT_DIAG)
197  {
198  REQUIRE(index<m_weights.num_rows, "Index (%d) must be within #dimension of weights (%d)\n",
199  index, m_weights.num_rows);
200  }
201  else if(m_ARD_type==KT_FULL)
202  {
203  row_index=index%m_weights.num_rows;
204  col_index=index/m_weights.num_rows;
205  REQUIRE(row_index<m_weights.num_rows,
206  "Row index (%d) must be within #row of weights (%d)\n",
207  row_index, m_weights.num_rows);
208  REQUIRE(col_index<m_weights.num_cols,
209  "Column index (%d) must be within #column of weights (%d)\n",
210  col_index, m_weights.num_cols);
211  }
212  }
213  if (!strcmp(param->m_name, "weights"))
214  {
215  SGMatrix<float64_t> derivative(num_lhs, num_rhs);
216 
217  for (index_t j=0; j<num_lhs; j++)
218  {
219  SGVector<float64_t> avec=((CDotFeatures *)lhs)->get_computed_dot_feature_vector(j);
220  for (index_t k=0; k<num_rhs; k++)
221  {
222  SGVector<float64_t> bvec=((CDotFeatures *)rhs)->get_computed_dot_feature_vector(k);
223  derivative(j,k)=compute_gradient_helper(avec, bvec, 1.0, index);
224  }
225  }
226  return derivative;
227  }
228  else
229  {
230  SG_ERROR("Can't compute derivative wrt %s parameter\n", param->m_name);
231  return SGMatrix<float64_t>();
232  }
233 }
234 
235 SGMatrix<float64_t> CLinearARDKernel::get_weights()
236 {
238 }
239 
240 void CLinearARDKernel::set_weights(SGMatrix<float64_t> weights)
241 {
242  REQUIRE(weights.num_cols>0 && weights.num_rows>0,
243  "Weight Matrix (%d-by-%d) must not be empty\n",
244  weights.num_rows, weights.num_cols);
245  if (weights.num_cols>1)
246  {
248  }
249  else
250  {
251  if (weights.num_rows==1)
252  {
254  }
255  else
256  {
258  }
259  }
260  m_weights=weights;
261 }
262 
263 void CLinearARDKernel::set_scalar_weights(float64_t weight)
264 {
265  SGMatrix<float64_t> weights(1,1);
266  weights(0,0)=weight;
267  set_weights(weights);
268 }
269 
270 void CLinearARDKernel::set_vector_weights(SGVector<float64_t> weights)
271 {
272  SGMatrix<float64_t> weights_mat(weights.vlen,1);
273  std::copy(weights.vector, weights.vector+weights.vlen, weights_mat.matrix);
274  set_weights(weights_mat);
275 }
276 
277 void CLinearARDKernel::set_matrix_weights(SGMatrix<float64_t> weights)
278 {
279  set_weights(weights);
280 }
281 #endif //HAVE_LINALG_LIB

SHOGUN Machine Learning Toolbox - Documentation