SHOGUN  v2.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
HMM.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/config.h>
15 #include <shogun/lib/Signal.h>
16 #include <shogun/base/Parallel.h>
19 
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <time.h>
23 #include <ctype.h>
24 
25 #define VAL_MACRO log((default_value == 0) ? (CMath::random(MIN_RAND, MAX_RAND)) : default_value)
26 #define ARRAY_SIZE 65336
27 
28 using namespace shogun;
29 
31 // Construction/Destruction
33 
34 const int32_t CHMM::GOTN= (1<<1);
35 const int32_t CHMM::GOTM= (1<<2);
36 const int32_t CHMM::GOTO= (1<<3);
37 const int32_t CHMM::GOTa= (1<<4);
38 const int32_t CHMM::GOTb= (1<<5);
39 const int32_t CHMM::GOTp= (1<<6);
40 const int32_t CHMM::GOTq= (1<<7);
41 
42 const int32_t CHMM::GOTlearn_a= (1<<1);
43 const int32_t CHMM::GOTlearn_b= (1<<2);
44 const int32_t CHMM::GOTlearn_p= (1<<3);
45 const int32_t CHMM::GOTlearn_q= (1<<4);
46 const int32_t CHMM::GOTconst_a= (1<<5);
47 const int32_t CHMM::GOTconst_b= (1<<6);
48 const int32_t CHMM::GOTconst_p= (1<<7);
49 const int32_t CHMM::GOTconst_q= (1<<8);
50 
51 enum E_STATE
52 {
71 };
72 
73 
74 #ifdef FIX_POS
75 const char Model::FIX_DISALLOWED=0 ;
76 const char Model::FIX_ALLOWED=1 ;
77 const char Model::FIX_DEFAULT=-1 ;
78 const float64_t Model::DISALLOWED_PENALTY=CMath::ALMOST_NEG_INFTY ;
79 #endif
80 
82 {
91 
92 
97 
98 #ifdef FIX_POS
99  fix_pos_state = SG_MALLOC(char, ARRAY_SIZE);
100 #endif
101  for (int32_t i=0; i<ARRAY_SIZE; i++)
102  {
103  const_a[i]=-1 ;
104  const_b[i]=-1 ;
105  const_p[i]=-1 ;
106  const_q[i]=-1 ;
107  const_a_val[i]=1.0 ;
108  const_b_val[i]=1.0 ;
109  const_p_val[i]=1.0 ;
110  const_q_val[i]=1.0 ;
111  learn_a[i]=-1 ;
112  learn_b[i]=-1 ;
113  learn_p[i]=-1 ;
114  learn_q[i]=-1 ;
115 #ifdef FIX_POS
116  fix_pos_state[i] = FIX_DEFAULT ;
117 #endif
118  } ;
119 }
120 
122 {
123  SG_FREE(const_a);
124  SG_FREE(const_b);
125  SG_FREE(const_p);
126  SG_FREE(const_q);
131 
132  SG_FREE(learn_a);
133  SG_FREE(learn_b);
134  SG_FREE(learn_p);
135  SG_FREE(learn_q);
136 
137 #ifdef FIX_POS
138  SG_FREE(fix_pos_state);
139 #endif
140 
141 }
142 
144 {
145  N=0;
146  M=0;
147  model=NULL;
148  status=false;
149 }
150 
152 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
153 {
154 #ifdef USE_HMMPARALLEL_STRUCTURES
155  SG_INFO( "hmm is using %i separate tables\n", parallel->get_num_threads()) ;
156 #endif
157 
158  this->N=h->get_N();
159  this->M=h->get_M();
160  status=initialize(NULL, h->get_pseudo());
161  this->copy_model(h);
163 }
164 
165 CHMM::CHMM(int32_t p_N, int32_t p_M, Model* p_model, float64_t p_PSEUDO)
166 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
167 {
168  this->N=p_N;
169  this->M=p_M;
170  model=NULL ;
171 
172 #ifdef USE_HMMPARALLEL_STRUCTURES
173  SG_INFO( "hmm is using %i separate tables\n", parallel->get_num_threads()) ;
174 #endif
175 
176  status=initialize(p_model, p_PSEUDO);
177 }
178 
180  CStringFeatures<uint16_t>* obs, int32_t p_N, int32_t p_M,
181  float64_t p_PSEUDO)
182 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
183 {
184  this->N=p_N;
185  this->M=p_M;
186  model=NULL ;
187 
188 #ifdef USE_HMMPARALLEL_STRUCTURES
189  SG_INFO( "hmm is using %i separate tables\n", parallel->get_num_threads()) ;
190 #endif
191 
192  initialize(model, p_PSEUDO);
193  set_observations(obs);
194 }
195 
196 CHMM::CHMM(int32_t p_N, float64_t* p, float64_t* q, float64_t* a)
197 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
198 {
199  this->N=p_N;
200  this->M=0;
201  model=NULL ;
202 
203  trans_list_forward = NULL ;
204  trans_list_forward_cnt = NULL ;
205  trans_list_forward_val = NULL ;
206  trans_list_backward = NULL ;
207  trans_list_backward_cnt = NULL ;
208  trans_list_len = 0 ;
209  mem_initialized = false ;
210 
211  this->transition_matrix_a=NULL;
212  this->observation_matrix_b=NULL;
213  this->initial_state_distribution_p=NULL;
214  this->end_state_distribution_q=NULL;
215  this->PSEUDO= PSEUDO;
216  this->model= model;
217  this->p_observations=NULL;
218  this->reused_caches=false;
219 
220 #ifdef USE_HMMPARALLEL_STRUCTURES
221  this->alpha_cache=NULL;
222  this->beta_cache=NULL;
223 #else
224  this->alpha_cache.table=NULL;
225  this->beta_cache.table=NULL;
226  this->alpha_cache.dimension=0;
227  this->beta_cache.dimension=0;
228 #endif
229 
230  this->states_per_observation_psi=NULL ;
231  this->path=NULL;
232  arrayN1=NULL ;
233  arrayN2=NULL ;
234 
235  this->loglikelihood=false;
236  mem_initialized = true ;
237 
239  observation_matrix_b=NULL ;
242  transition_matrix_A=NULL ;
243  observation_matrix_B=NULL ;
244 
245 // this->invalidate_model();
246 }
247 
249  int32_t p_N, float64_t* p, float64_t* q, int32_t num_trans,
250  float64_t* a_trans)
251 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
252 {
253  model=NULL ;
254 
255  this->N=p_N;
256  this->M=0;
257 
258  trans_list_forward = NULL ;
259  trans_list_forward_cnt = NULL ;
260  trans_list_forward_val = NULL ;
261  trans_list_backward = NULL ;
262  trans_list_backward_cnt = NULL ;
263  trans_list_len = 0 ;
264  mem_initialized = false ;
265 
266  this->transition_matrix_a=NULL;
267  this->observation_matrix_b=NULL;
268  this->initial_state_distribution_p=NULL;
269  this->end_state_distribution_q=NULL;
270  this->PSEUDO= PSEUDO;
271  this->model= model;
272  this->p_observations=NULL;
273  this->reused_caches=false;
274 
275 #ifdef USE_HMMPARALLEL_STRUCTURES
276  this->alpha_cache=NULL;
277  this->beta_cache=NULL;
278 #else
279  this->alpha_cache.table=NULL;
280  this->beta_cache.table=NULL;
281  this->alpha_cache.dimension=0;
282  this->beta_cache.dimension=0;
283 #endif
284 
285  this->states_per_observation_psi=NULL ;
286  this->path=NULL;
287  arrayN1=NULL ;
288  arrayN2=NULL ;
289 
290  this->loglikelihood=false;
291  mem_initialized = true ;
292 
293  trans_list_forward_cnt=NULL ;
294  trans_list_len = N ;
295  trans_list_forward = SG_MALLOC(T_STATES*, N);
296  trans_list_forward_val = SG_MALLOC(float64_t*, N);
297  trans_list_forward_cnt = SG_MALLOC(T_STATES, N);
298 
299  int32_t start_idx=0;
300  for (int32_t j=0; j<N; j++)
301  {
302  int32_t old_start_idx=start_idx;
303 
304  while (start_idx<num_trans && a_trans[start_idx+num_trans]==j)
305  {
306  start_idx++;
307 
308  if (start_idx>1 && start_idx<num_trans)
309  ASSERT(a_trans[start_idx+num_trans-1]<=
310  a_trans[start_idx+num_trans]);
311  }
312 
313  if (start_idx>1 && start_idx<num_trans)
314  ASSERT(a_trans[start_idx+num_trans-1]<=
315  a_trans[start_idx+num_trans]);
316 
317  int32_t len=start_idx-old_start_idx;
318  ASSERT(len>=0);
319 
320  trans_list_forward_cnt[j] = 0 ;
321 
322  if (len>0)
323  {
324  trans_list_forward[j] = SG_MALLOC(T_STATES, len);
325  trans_list_forward_val[j] = SG_MALLOC(float64_t, len);
326  }
327  else
328  {
329  trans_list_forward[j] = NULL;
330  trans_list_forward_val[j] = NULL;
331  }
332  }
333 
334  for (int32_t i=0; i<num_trans; i++)
335  {
336  int32_t from = (int32_t)a_trans[i+num_trans] ;
337  int32_t to = (int32_t)a_trans[i] ;
338  float64_t val = a_trans[i+num_trans*2] ;
339 
340  ASSERT(from>=0 && from<N);
341  ASSERT(to>=0 && to<N);
342 
343  trans_list_forward[from][trans_list_forward_cnt[from]]=to ;
344  trans_list_forward_val[from][trans_list_forward_cnt[from]]=val ;
345  trans_list_forward_cnt[from]++ ;
346  //ASSERT(trans_list_forward_cnt[from]<3000);
347  } ;
348 
349  transition_matrix_a=NULL ;
350  observation_matrix_b=NULL ;
353  transition_matrix_A=NULL ;
354  observation_matrix_B=NULL ;
355 
356 // this->invalidate_model();
357 }
358 
359 
360 CHMM::CHMM(FILE* model_file, float64_t p_PSEUDO)
361 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
362 {
363 #ifdef USE_HMMPARALLEL_STRUCTURES
364  SG_INFO( "hmm is using %i separate tables\n", parallel->get_num_threads()) ;
365 #endif
366 
367  status=initialize(NULL, p_PSEUDO, model_file);
368 }
369 
371 {
373 
374  if (trans_list_forward_cnt)
375  SG_FREE(trans_list_forward_cnt);
376  if (trans_list_backward_cnt)
377  SG_FREE(trans_list_backward_cnt);
378  if (trans_list_forward)
379  {
380  for (int32_t i=0; i<trans_list_len; i++)
381  if (trans_list_forward[i])
382  SG_FREE(trans_list_forward[i]);
383  SG_FREE(trans_list_forward);
384  }
385  if (trans_list_forward_val)
386  {
387  for (int32_t i=0; i<trans_list_len; i++)
388  if (trans_list_forward_val[i])
389  SG_FREE(trans_list_forward_val[i]);
390  SG_FREE(trans_list_forward_val);
391  }
392  if (trans_list_backward)
393  {
394  for (int32_t i=0; i<trans_list_len; i++)
395  if (trans_list_backward[i])
396  SG_FREE(trans_list_backward[i]);
397  SG_FREE(trans_list_backward);
398  } ;
399 
401 
402  if (!reused_caches)
403  {
404 #ifdef USE_HMMPARALLEL_STRUCTURES
405  for (int32_t i=0; i<parallel->get_num_threads(); i++)
406  {
407  SG_FREE(alpha_cache[i].table);
408  SG_FREE(beta_cache[i].table);
409  alpha_cache[i].table=NULL;
410  beta_cache[i].table=NULL;
411  }
412 
415  alpha_cache=NULL;
416  beta_cache=NULL;
417 #else // USE_HMMPARALLEL_STRUCTURES
418  SG_FREE(alpha_cache.table);
419  SG_FREE(beta_cache.table);
420  alpha_cache.table=NULL;
421  beta_cache.table=NULL;
422 #endif // USE_HMMPARALLEL_STRUCTURES
423 
426  }
427 
428 #ifdef USE_LOGSUMARRAY
429 #ifdef USE_HMMPARALLEL_STRUCTURES
430  {
431  for (int32_t i=0; i<parallel->get_num_threads(); i++)
432  SG_FREE(arrayS[i]);
433  SG_FREE(arrayS);
434  } ;
435 #else //USE_HMMPARALLEL_STRUCTURES
436  SG_FREE(arrayS);
437 #endif //USE_HMMPARALLEL_STRUCTURES
438 #endif //USE_LOGSUMARRAY
439 
440  if (!reused_caches)
441  {
442 #ifdef USE_HMMPARALLEL_STRUCTURES
445  for (int32_t i=0; i<parallel->get_num_threads(); i++)
446  SG_FREE(path[i]);
447 #endif //USE_HMMPARALLEL_STRUCTURES
448  SG_FREE(path);
449  }
450 }
451 
453 {
454  if (data)
455  {
456  if (data->get_feature_class() != C_STRING ||
457  data->get_feature_type() != F_WORD)
458  {
459  SG_ERROR("Expected features of class string type word\n");
460  }
462  }
464 }
465 
467 {
468 
471  {
477  convert_to_log();
478  }
479 
480 #ifdef USE_HMMPARALLEL_STRUCTURES
481  for (int32_t i=0; i<parallel->get_num_threads(); i++)
482  {
485  }
486 #else //USE_HMMPARALLEL_STRUCTURES
489 #endif //USE_HMMPARALLEL_STRUCTURES
490 
491 #ifdef LOG_SUMARRAY
492 #ifdef USE_HMMPARALLEL_STRUCTURES
493  for (int32_t i=0; i<parallel->get_num_threads(); i++)
494  arrayS[i]=SG_MALLOC(float64_t, (int32_t)(this->N/2+1));
495 #else //USE_HMMPARALLEL_STRUCTURES
496  arrayS=SG_MALLOC(float64_t, (int32_t)(this->N/2+1));
497 #endif //USE_HMMPARALLEL_STRUCTURES
498 #endif //LOG_SUMARRAY
499  transition_matrix_A=SG_MALLOC(float64_t, this->N*this->N);
500  observation_matrix_B=SG_MALLOC(float64_t, this->N*this->M);
501 
502  if (p_observations)
503  {
504 #ifdef USE_HMMPARALLEL_STRUCTURES
505  if (alpha_cache[0].table!=NULL)
506 #else //USE_HMMPARALLEL_STRUCTURES
507  if (alpha_cache.table!=NULL)
508 #endif //USE_HMMPARALLEL_STRUCTURES
510  else
513  }
514 
515  this->invalidate_model();
516 
517  return ((transition_matrix_A != NULL) && (observation_matrix_B != NULL) &&
518  (transition_matrix_a != NULL) && (observation_matrix_b != NULL) &&
519  (initial_state_distribution_p != NULL) &&
520  (end_state_distribution_q != NULL));
521 }
522 
524 {
525 #ifdef USE_HMMPARALLEL_STRUCTURES
526  for (int32_t i=0; i<parallel->get_num_threads(); i++)
527  {
528  SG_FREE(arrayN1[i]);
529  SG_FREE(arrayN2[i]);
530 
531  arrayN1[i]=NULL;
532  arrayN2[i]=NULL;
533  }
534 #else
535  SG_FREE(arrayN1);
536  SG_FREE(arrayN2);
537  arrayN1=NULL;
538  arrayN2=NULL;
539 #endif
541  {
548  } ;
549 
550  transition_matrix_A=NULL;
552  transition_matrix_a=NULL;
556 }
557 
558 bool CHMM::initialize(Model* m, float64_t pseudo, FILE* modelfile)
559 {
560  //yes optimistic
561  bool files_ok=true;
562 
563  trans_list_forward = NULL ;
564  trans_list_forward_cnt = NULL ;
565  trans_list_forward_val = NULL ;
566  trans_list_backward = NULL ;
567  trans_list_backward_cnt = NULL ;
568  trans_list_len = 0 ;
569  mem_initialized = false ;
570 
571  this->transition_matrix_a=NULL;
572  this->observation_matrix_b=NULL;
573  this->initial_state_distribution_p=NULL;
574  this->end_state_distribution_q=NULL;
575  this->PSEUDO= pseudo;
576  this->model= m;
577  this->p_observations=NULL;
578  this->reused_caches=false;
579 
580 #ifdef USE_HMMPARALLEL_STRUCTURES
581  alpha_cache=SG_MALLOC(T_ALPHA_BETA, parallel->get_num_threads());
582  beta_cache=SG_MALLOC(T_ALPHA_BETA, parallel->get_num_threads());
584 
585  for (int32_t i=0; i<parallel->get_num_threads(); i++)
586  {
587  this->alpha_cache[i].table=NULL;
588  this->beta_cache[i].table=NULL;
589  this->alpha_cache[i].dimension=0;
590  this->beta_cache[i].dimension=0;
591  this->states_per_observation_psi[i]=NULL ;
592  }
593 
594 #else // USE_HMMPARALLEL_STRUCTURES
595  this->alpha_cache.table=NULL;
596  this->beta_cache.table=NULL;
597  this->alpha_cache.dimension=0;
598  this->beta_cache.dimension=0;
599  this->states_per_observation_psi=NULL ;
600 #endif //USE_HMMPARALLEL_STRUCTURES
601 
602  if (modelfile)
603  files_ok= files_ok && load_model(modelfile);
604 
605 #ifdef USE_HMMPARALLEL_STRUCTURES
608 
610 
611  for (int32_t i=0; i<parallel->get_num_threads(); i++)
612  this->path[i]=NULL;
613 
614 #else // USE_HMMPARALLEL_STRUCTURES
615  this->path=NULL;
616 
617 #endif //USE_HMMPARALLEL_STRUCTURES
618 
619 #ifdef USE_HMMPARALLEL_STRUCTURES
622 #endif //USE_HMMPARALLEL_STRUCTURES
623 
624 #ifdef LOG_SUMARRAY
625 #ifdef USE_HMMPARALLEL_STRUCTURES
627 #endif // USE_HMMPARALLEL_STRUCTURES
628 #endif //LOG_SUMARRAY
629 
631 
632  this->loglikelihood=false;
633  mem_initialized = true ;
634  this->invalidate_model();
635 
636  return ((files_ok) &&
637  (transition_matrix_A != NULL) && (observation_matrix_B != NULL) &&
638  (transition_matrix_a != NULL) && (observation_matrix_b != NULL) && (initial_state_distribution_p != NULL) &&
639  (end_state_distribution_q != NULL));
640 }
641 
642 //------------------------------------------------------------------------------------//
643 
644 //forward algorithm
645 //calculates Pr[O_0,O_1, ..., O_t, q_time=S_i| lambda] for 0<= time <= T-1
646 //Pr[O|lambda] for time > T
647 float64_t CHMM::forward_comp(int32_t time, int32_t state, int32_t dimension)
648 {
649  T_ALPHA_BETA_TABLE* alpha_new;
650  T_ALPHA_BETA_TABLE* alpha;
651  T_ALPHA_BETA_TABLE* dummy;
652  if (time<1)
653  time=0;
654 
655 
656  int32_t wanted_time=time;
657 
658  if (ALPHA_CACHE(dimension).table)
659  {
660  alpha=&ALPHA_CACHE(dimension).table[0];
661  alpha_new=&ALPHA_CACHE(dimension).table[N];
662  time=p_observations->get_vector_length(dimension)+1;
663  }
664  else
665  {
666  alpha_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
667  alpha=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
668  }
669 
670  if (time<1)
671  return get_p(state) + get_b(state, p_observations->get_feature(dimension,0));
672  else
673  {
674  //initialization alpha_1(i)=p_i*b_i(O_1)
675  for (int32_t i=0; i<N; i++)
676  alpha[i] = get_p(i) + get_b(i, p_observations->get_feature(dimension,0)) ;
677 
678  //induction alpha_t+1(j) = (sum_i=1^N alpha_t(i)a_ij) b_j(O_t+1)
679  for (register int32_t t=1; t<time && t < p_observations->get_vector_length(dimension); t++)
680  {
681 
682  for (int32_t j=0; j<N; j++)
683  {
684  register int32_t i, num = trans_list_forward_cnt[j] ;
685  float64_t sum=-CMath::INFTY;
686  for (i=0; i < num; i++)
687  {
688  int32_t ii = trans_list_forward[j][i] ;
689  sum = CMath::logarithmic_sum(sum, alpha[ii] + get_a(ii,j));
690  } ;
691 
692  alpha_new[j]= sum + get_b(j, p_observations->get_feature(dimension,t));
693  }
694 
695  if (!ALPHA_CACHE(dimension).table)
696  {
697  dummy=alpha;
698  alpha=alpha_new;
699  alpha_new=dummy; //switch alpha/alpha_new
700  }
701  else
702  {
703  alpha=alpha_new;
704  alpha_new+=N; //perversely pointer arithmetic
705  }
706  }
707 
708 
709  if (time<p_observations->get_vector_length(dimension))
710  {
711  register int32_t i, num=trans_list_forward_cnt[state];
712  register float64_t sum=-CMath::INFTY;
713  for (i=0; i<num; i++)
714  {
715  int32_t ii = trans_list_forward[state][i] ;
716  sum= CMath::logarithmic_sum(sum, alpha[ii] + get_a(ii, state));
717  } ;
718 
719  return sum + get_b(state, p_observations->get_feature(dimension,time));
720  }
721  else
722  {
723  // termination
724  register int32_t i ;
725  float64_t sum ;
726  sum=-CMath::INFTY;
727  for (i=0; i<N; i++) //sum over all paths
728  sum=CMath::logarithmic_sum(sum, alpha[i] + get_q(i)); //to get model probability
729 
730  if (!ALPHA_CACHE(dimension).table)
731  return sum;
732  else
733  {
734  ALPHA_CACHE(dimension).dimension=dimension;
735  ALPHA_CACHE(dimension).updated=true;
736  ALPHA_CACHE(dimension).sum=sum;
737 
738  if (wanted_time<p_observations->get_vector_length(dimension))
739  return ALPHA_CACHE(dimension).table[wanted_time*N+state];
740  else
741  return ALPHA_CACHE(dimension).sum;
742  }
743  }
744  }
745 }
746 
747 
748 //forward algorithm
749 //calculates Pr[O_0,O_1, ..., O_t, q_time=S_i| lambda] for 0<= time <= T-1
750 //Pr[O|lambda] for time > T
751 float64_t CHMM::forward_comp_old(int32_t time, int32_t state, int32_t dimension)
752 {
753  T_ALPHA_BETA_TABLE* alpha_new;
754  T_ALPHA_BETA_TABLE* alpha;
755  T_ALPHA_BETA_TABLE* dummy;
756  if (time<1)
757  time=0;
758 
759  int32_t wanted_time=time;
760 
761  if (ALPHA_CACHE(dimension).table)
762  {
763  alpha=&ALPHA_CACHE(dimension).table[0];
764  alpha_new=&ALPHA_CACHE(dimension).table[N];
765  time=p_observations->get_vector_length(dimension)+1;
766  }
767  else
768  {
769  alpha_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
770  alpha=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
771  }
772 
773  if (time<1)
774  return get_p(state) + get_b(state, p_observations->get_feature(dimension,0));
775  else
776  {
777  //initialization alpha_1(i)=p_i*b_i(O_1)
778  for (int32_t i=0; i<N; i++)
779  alpha[i] = get_p(i) + get_b(i, p_observations->get_feature(dimension,0)) ;
780 
781  //induction alpha_t+1(j) = (sum_i=1^N alpha_t(i)a_ij) b_j(O_t+1)
782  for (register int32_t t=1; t<time && t < p_observations->get_vector_length(dimension); t++)
783  {
784 
785  for (int32_t j=0; j<N; j++)
786  {
787  register int32_t i ;
788 #ifdef USE_LOGSUMARRAY
789  for (i=0; i<(N>>1); i++)
790  ARRAYS(dimension)[i]=CMath::logarithmic_sum(alpha[i<<1] + get_a(i<<1,j),
791  alpha[(i<<1)+1] + get_a((i<<1)+1,j));
792  if (N%2==1)
793  alpha_new[j]=get_b(j, p_observations->get_feature(dimension,t))+
794  CMath::logarithmic_sum(alpha[N-1]+get_a(N-1,j),
795  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
796  else
797  alpha_new[j]=get_b(j, p_observations->get_feature(dimension,t))+CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
798 #else //USE_LOGSUMARRAY
799  float64_t sum=-CMath::INFTY;
800  for (i=0; i<N; i++)
801  sum= CMath::logarithmic_sum(sum, alpha[i] + get_a(i,j));
802 
803  alpha_new[j]= sum + get_b(j, p_observations->get_feature(dimension,t));
804 #endif //USE_LOGSUMARRAY
805  }
806 
807  if (!ALPHA_CACHE(dimension).table)
808  {
809  dummy=alpha;
810  alpha=alpha_new;
811  alpha_new=dummy; //switch alpha/alpha_new
812  }
813  else
814  {
815  alpha=alpha_new;
816  alpha_new+=N; //perversely pointer arithmetic
817  }
818  }
819 
820 
821  if (time<p_observations->get_vector_length(dimension))
822  {
823  register int32_t i;
824 #ifdef USE_LOGSUMARRAY
825  for (i=0; i<(N>>1); i++)
826  ARRAYS(dimension)[i]=CMath::logarithmic_sum(alpha[i<<1] + get_a(i<<1,state),
827  alpha[(i<<1)+1] + get_a((i<<1)+1,state));
828  if (N%2==1)
829  return get_b(state, p_observations->get_feature(dimension,time))+
830  CMath::logarithmic_sum(alpha[N-1]+get_a(N-1,state),
831  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
832  else
833  return get_b(state, p_observations->get_feature(dimension,time))+CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
834 #else //USE_LOGSUMARRAY
835  register float64_t sum=-CMath::INFTY;
836  for (i=0; i<N; i++)
837  sum= CMath::logarithmic_sum(sum, alpha[i] + get_a(i, state));
838 
839  return sum + get_b(state, p_observations->get_feature(dimension,time));
840 #endif //USE_LOGSUMARRAY
841  }
842  else
843  {
844  // termination
845  register int32_t i ;
846  float64_t sum ;
847 #ifdef USE_LOGSUMARRAY
848  for (i=0; i<(N>>1); i++)
849  ARRAYS(dimension)[i]=CMath::logarithmic_sum(alpha[i<<1] + get_q(i<<1),
850  alpha[(i<<1)+1] + get_q((i<<1)+1));
851  if (N%2==1)
852  sum=CMath::logarithmic_sum(alpha[N-1]+get_q(N-1),
853  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
854  else
855  sum=CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
856 #else //USE_LOGSUMARRAY
857  sum=-CMath::INFTY;
858  for (i=0; i<N; i++) //sum over all paths
859  sum=CMath::logarithmic_sum(sum, alpha[i] + get_q(i)); //to get model probability
860 #endif //USE_LOGSUMARRAY
861 
862  if (!ALPHA_CACHE(dimension).table)
863  return sum;
864  else
865  {
866  ALPHA_CACHE(dimension).dimension=dimension;
867  ALPHA_CACHE(dimension).updated=true;
868  ALPHA_CACHE(dimension).sum=sum;
869 
870  if (wanted_time<p_observations->get_vector_length(dimension))
871  return ALPHA_CACHE(dimension).table[wanted_time*N+state];
872  else
873  return ALPHA_CACHE(dimension).sum;
874  }
875  }
876  }
877 }
878 
879 
880 //backward algorithm
881 //calculates Pr[O_t+1,O_t+2, ..., O_T| q_time=S_i, lambda] for 0<= time <= T-1
882 //Pr[O|lambda] for time >= T
883 float64_t CHMM::backward_comp(int32_t time, int32_t state, int32_t dimension)
884 {
885  T_ALPHA_BETA_TABLE* beta_new;
886  T_ALPHA_BETA_TABLE* beta;
887  T_ALPHA_BETA_TABLE* dummy;
888  int32_t wanted_time=time;
889 
890  if (time<0)
891  forward(time, state, dimension);
892 
893  if (BETA_CACHE(dimension).table)
894  {
895  beta=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-1)];
896  beta_new=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-2)];
897  time=-1;
898  }
899  else
900  {
901  beta_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
902  beta=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
903  }
904 
905  if (time>=p_observations->get_vector_length(dimension)-1)
906  // return 0;
907  // else if (time==p_observations->get_vector_length(dimension)-1)
908  return get_q(state);
909  else
910  {
911  //initialization beta_T(i)=q(i)
912  for (register int32_t i=0; i<N; i++)
913  beta[i]=get_q(i);
914 
915  //induction beta_t(i) = (sum_j=1^N a_ij*b_j(O_t+1)*beta_t+1(j)
916  for (register int32_t t=p_observations->get_vector_length(dimension)-1; t>time+1 && t>0; t--)
917  {
918  for (register int32_t i=0; i<N; i++)
919  {
920  register int32_t j, num=trans_list_backward_cnt[i] ;
921  float64_t sum=-CMath::INFTY;
922  for (j=0; j<num; j++)
923  {
924  int32_t jj = trans_list_backward[i][j] ;
925  sum= CMath::logarithmic_sum(sum, get_a(i, jj) + get_b(jj, p_observations->get_feature(dimension,t)) + beta[jj]);
926  } ;
927  beta_new[i]=sum;
928  }
929 
930  if (!BETA_CACHE(dimension).table)
931  {
932  dummy=beta;
933  beta=beta_new;
934  beta_new=dummy; //switch beta/beta_new
935  }
936  else
937  {
938  beta=beta_new;
939  beta_new-=N; //perversely pointer arithmetic
940  }
941  }
942 
943  if (time>=0)
944  {
945  register int32_t j, num=trans_list_backward_cnt[state] ;
946  float64_t sum=-CMath::INFTY;
947  for (j=0; j<num; j++)
948  {
949  int32_t jj = trans_list_backward[state][j] ;
950  sum= CMath::logarithmic_sum(sum, get_a(state, jj) + get_b(jj, p_observations->get_feature(dimension,time+1))+beta[jj]);
951  } ;
952  return sum;
953  }
954  else // time<0
955  {
956  if (BETA_CACHE(dimension).table)
957  {
958  float64_t sum=-CMath::INFTY;
959  for (register int32_t j=0; j<N; j++)
960  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
961  BETA_CACHE(dimension).sum=sum;
962  BETA_CACHE(dimension).dimension=dimension;
963  BETA_CACHE(dimension).updated=true;
964 
965  if (wanted_time<p_observations->get_vector_length(dimension))
966  return BETA_CACHE(dimension).table[wanted_time*N+state];
967  else
968  return BETA_CACHE(dimension).sum;
969  }
970  else
971  {
972  float64_t sum=-CMath::INFTY; // apply LOG_SUM_ARRAY -- no cache ... does not make very sense anyway...
973  for (register int32_t j=0; j<N; j++)
974  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
975  return sum;
976  }
977  }
978  }
979 }
980 
981 
983  int32_t time, int32_t state, int32_t dimension)
984 {
985  T_ALPHA_BETA_TABLE* beta_new;
986  T_ALPHA_BETA_TABLE* beta;
987  T_ALPHA_BETA_TABLE* dummy;
988  int32_t wanted_time=time;
989 
990  if (time<0)
991  forward(time, state, dimension);
992 
993  if (BETA_CACHE(dimension).table)
994  {
995  beta=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-1)];
996  beta_new=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-2)];
997  time=-1;
998  }
999  else
1000  {
1001  beta_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
1002  beta=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
1003  }
1004 
1005  if (time>=p_observations->get_vector_length(dimension)-1)
1006  // return 0;
1007  // else if (time==p_observations->get_vector_length(dimension)-1)
1008  return get_q(state);
1009  else
1010  {
1011  //initialization beta_T(i)=q(i)
1012  for (register int32_t i=0; i<N; i++)
1013  beta[i]=get_q(i);
1014 
1015  //induction beta_t(i) = (sum_j=1^N a_ij*b_j(O_t+1)*beta_t+1(j)
1016  for (register int32_t t=p_observations->get_vector_length(dimension)-1; t>time+1 && t>0; t--)
1017  {
1018  for (register int32_t i=0; i<N; i++)
1019  {
1020  register int32_t j ;
1021 #ifdef USE_LOGSUMARRAY
1022  for (j=0; j<(N>>1); j++)
1023  ARRAYS(dimension)[j]=CMath::logarithmic_sum(
1024  get_a(i, j<<1) + get_b(j<<1, p_observations->get_feature(dimension,t)) + beta[j<<1],
1025  get_a(i, (j<<1)+1) + get_b((j<<1)+1, p_observations->get_feature(dimension,t)) + beta[(j<<1)+1]);
1026  if (N%2==1)
1027  beta_new[i]=CMath::logarithmic_sum(get_a(i, N-1) + get_b(N-1, p_observations->get_feature(dimension,t)) + beta[N-1],
1028  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
1029  else
1030  beta_new[i]=CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
1031 #else //USE_LOGSUMARRAY
1032  float64_t sum=-CMath::INFTY;
1033  for (j=0; j<N; j++)
1034  sum= CMath::logarithmic_sum(sum, get_a(i, j) + get_b(j, p_observations->get_feature(dimension,t)) + beta[j]);
1035 
1036  beta_new[i]=sum;
1037 #endif //USE_LOGSUMARRAY
1038  }
1039 
1040  if (!BETA_CACHE(dimension).table)
1041  {
1042  dummy=beta;
1043  beta=beta_new;
1044  beta_new=dummy; //switch beta/beta_new
1045  }
1046  else
1047  {
1048  beta=beta_new;
1049  beta_new-=N; //perversely pointer arithmetic
1050  }
1051  }
1052 
1053  if (time>=0)
1054  {
1055  register int32_t j ;
1056 #ifdef USE_LOGSUMARRAY
1057  for (j=0; j<(N>>1); j++)
1058  ARRAYS(dimension)[j]=CMath::logarithmic_sum(
1059  get_a(state, j<<1) + get_b(j<<1, p_observations->get_feature(dimension,time+1)) + beta[j<<1],
1060  get_a(state, (j<<1)+1) + get_b((j<<1)+1, p_observations->get_feature(dimension,time+1)) + beta[(j<<1)+1]);
1061  if (N%2==1)
1062  return CMath::logarithmic_sum(get_a(state, N-1) + get_b(N-1, p_observations->get_feature(dimension,time+1)) + beta[N-1],
1063  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
1064  else
1065  return CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
1066 #else //USE_LOGSUMARRAY
1067  float64_t sum=-CMath::INFTY;
1068  for (j=0; j<N; j++)
1069  sum= CMath::logarithmic_sum(sum, get_a(state, j) + get_b(j, p_observations->get_feature(dimension,time+1))+beta[j]);
1070 
1071  return sum;
1072 #endif //USE_LOGSUMARRAY
1073  }
1074  else // time<0
1075  {
1076  if (BETA_CACHE(dimension).table)
1077  {
1078 #ifdef USE_LOGSUMARRAY//AAA
1079  for (int32_t j=0; j<(N>>1); j++)
1080  ARRAYS(dimension)[j]=CMath::logarithmic_sum(get_p(j<<1) + get_b(j<<1, p_observations->get_feature(dimension,0))+beta[j<<1],
1081  get_p((j<<1)+1) + get_b((j<<1)+1, p_observations->get_feature(dimension,0))+beta[(j<<1)+1]) ;
1082  if (N%2==1)
1083  BETA_CACHE(dimension).sum=CMath::logarithmic_sum(get_p(N-1) + get_b(N-1, p_observations->get_feature(dimension,0))+beta[N-1],
1084  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
1085  else
1086  BETA_CACHE(dimension).sum=CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
1087 #else //USE_LOGSUMARRAY
1088  float64_t sum=-CMath::INFTY;
1089  for (register int32_t j=0; j<N; j++)
1090  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
1091  BETA_CACHE(dimension).sum=sum;
1092 #endif //USE_LOGSUMARRAY
1093  BETA_CACHE(dimension).dimension=dimension;
1094  BETA_CACHE(dimension).updated=true;
1095 
1096  if (wanted_time<p_observations->get_vector_length(dimension))
1097  return BETA_CACHE(dimension).table[wanted_time*N+state];
1098  else
1099  return BETA_CACHE(dimension).sum;
1100  }
1101  else
1102  {
1103  float64_t sum=-CMath::INFTY; // apply LOG_SUM_ARRAY -- no cache ... does not make very sense anyway...
1104  for (register int32_t j=0; j<N; j++)
1105  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
1106  return sum;
1107  }
1108  }
1109  }
1110 }
1111 
1112 //calculates probability of best path through the model lambda AND path itself
1113 //using viterbi algorithm
1114 float64_t CHMM::best_path(int32_t dimension)
1115 {
1116  if (!p_observations)
1117  return -1;
1118 
1119  if (dimension==-1)
1120  {
1121  if (!all_path_prob_updated)
1122  {
1123  SG_INFO( "computing full viterbi likelihood\n") ;
1124  float64_t sum = 0 ;
1125  for (int32_t i=0; i<p_observations->get_num_vectors(); i++)
1126  sum+=best_path(i) ;
1127  sum /= p_observations->get_num_vectors() ;
1128  all_pat_prob=sum ;
1129  all_path_prob_updated=true ;
1130  return sum ;
1131  } else
1132  return all_pat_prob ;
1133  } ;
1134 
1135  if (!STATES_PER_OBSERVATION_PSI(dimension))
1136  return -1 ;
1137 
1138  if (dimension >= p_observations->get_num_vectors())
1139  return -1;
1140 
1141  if (PATH_PROB_UPDATED(dimension) && dimension==PATH_PROB_DIMENSION(dimension))
1142  return pat_prob;
1143  else
1144  {
1145  register float64_t* delta= ARRAYN2(dimension);
1146  register float64_t* delta_new= ARRAYN1(dimension);
1147 
1148  { //initialization
1149  for (register int32_t i=0; i<N; i++)
1150  {
1151  delta[i]=get_p(i)+get_b(i, p_observations->get_feature(dimension,0));
1152  set_psi(0, i, 0, dimension);
1153  }
1154  }
1155 
1156 #ifdef USE_PATHDEBUG
1157  float64_t worst=-CMath::INFTY/4 ;
1158 #endif
1159  //recursion
1160  for (register int32_t t=1; t<p_observations->get_vector_length(dimension); t++)
1161  {
1162  register float64_t* dummy;
1163  register int32_t NN=N ;
1164  for (register int32_t j=0; j<NN; j++)
1165  {
1166  register float64_t * matrix_a=&transition_matrix_a[j*N] ; // sorry for that
1167  register float64_t maxj=delta[0] + matrix_a[0];
1168  register int32_t argmax=0;
1169 
1170  for (register int32_t i=1; i<NN; i++)
1171  {
1172  register float64_t temp = delta[i] + matrix_a[i];
1173 
1174  if (temp>maxj)
1175  {
1176  maxj=temp;
1177  argmax=i;
1178  }
1179  }
1180 #ifdef FIX_POS
1181  if ((!model) || (model->get_fix_pos_state(t,j,NN)!=Model::FIX_DISALLOWED))
1182 #endif
1183  delta_new[j]=maxj + get_b(j,p_observations->get_feature(dimension,t));
1184 #ifdef FIX_POS
1185  else
1186  delta_new[j]=maxj + get_b(j,p_observations->get_feature(dimension,t)) + Model::DISALLOWED_PENALTY;
1187 #endif
1188  set_psi(t, j, argmax, dimension);
1189  }
1190 
1191 #ifdef USE_PATHDEBUG
1192  float64_t best=log(0) ;
1193  for (int32_t jj=0; jj<N; jj++)
1194  if (delta_new[jj]>best)
1195  best=delta_new[jj] ;
1196 
1197  if (best<-CMath::INFTY/2)
1198  {
1199  SG_DEBUG( "worst case at %i: %e:%e\n", t, best, worst) ;
1200  worst=best ;
1201  } ;
1202 #endif
1203 
1204  dummy=delta;
1205  delta=delta_new;
1206  delta_new=dummy; //switch delta/delta_new
1207  }
1208 
1209 
1210  { //termination
1211  register float64_t maxj=delta[0]+get_q(0);
1212  register int32_t argmax=0;
1213 
1214  for (register int32_t i=1; i<N; i++)
1215  {
1216  register float64_t temp=delta[i]+get_q(i);
1217 
1218  if (temp>maxj)
1219  {
1220  maxj=temp;
1221  argmax=i;
1222  }
1223  }
1224  pat_prob=maxj;
1225  PATH(dimension)[p_observations->get_vector_length(dimension)-1]=argmax;
1226  } ;
1227 
1228 
1229  { //state sequence backtracking
1230  for (register int32_t t=p_observations->get_vector_length(dimension)-1; t>0; t--)
1231  {
1232  PATH(dimension)[t-1]=get_psi(t, PATH(dimension)[t], dimension);
1233  }
1234  }
1235  PATH_PROB_UPDATED(dimension)=true;
1236  PATH_PROB_DIMENSION(dimension)=dimension;
1237  return pat_prob ;
1238  }
1239 }
1240 
1241 #ifndef USE_HMMPARALLEL
1243 {
1244  //for faster calculation cache model probability
1245  mod_prob=0 ;
1246  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++) //sum in log space
1248 
1249  mod_prob_updated=true;
1250  return mod_prob;
1251 }
1252 
1253 #else
1254 
1256 {
1257  pthread_t *threads=SG_MALLOC(pthread_t, parallel->get_num_threads());
1258  S_BW_THREAD_PARAM *params=SG_MALLOC(S_BW_THREAD_PARAM, parallel->get_num_threads());
1259 
1260  SG_INFO( "computing full model probablity\n");
1261  mod_prob=0;
1262 
1263  for (int32_t cpu=0; cpu<parallel->get_num_threads(); cpu++)
1264  {
1265  params[cpu].hmm=this ;
1266  params[cpu].dim_start= p_observations->get_num_vectors()*cpu/parallel->get_num_threads();
1267  params[cpu].dim_stop= p_observations->get_num_vectors()*(cpu+1)/parallel->get_num_threads();
1268  params[cpu].p_buf=SG_MALLOC(float64_t, N);
1269  params[cpu].q_buf=SG_MALLOC(float64_t, N);
1270  params[cpu].a_buf=SG_MALLOC(float64_t, N*N);
1271  params[cpu].b_buf=SG_MALLOC(float64_t, N*M);
1272  pthread_create(&threads[cpu], NULL, bw_dim_prefetch, (void*)&params[cpu]);
1273  }
1274 
1275  for (int32_t cpu=0; cpu<parallel->get_num_threads(); cpu++)
1276  {
1277  pthread_join(threads[cpu], NULL);
1278  mod_prob+=params[cpu].ret;
1279  }
1280 
1281  for (int32_t i=0; i<parallel->get_num_threads(); i++)
1282  {
1283  SG_FREE(params[i].p_buf);
1284  SG_FREE(params[i].q_buf);
1285  SG_FREE(params[i].a_buf);
1286  SG_FREE(params[i].b_buf);
1287  }
1288 
1289  SG_FREE(threads);
1290  SG_FREE(params);
1291 
1292  mod_prob_updated=true;
1293  return mod_prob;
1294 }
1295 
1296 void* CHMM::bw_dim_prefetch(void* params)
1297 {
1298  CHMM* hmm=((S_BW_THREAD_PARAM*) params)->hmm;
1299  int32_t start=((S_BW_THREAD_PARAM*) params)->dim_start;
1300  int32_t stop=((S_BW_THREAD_PARAM*) params)->dim_stop;
1301  float64_t* p_buf=((S_BW_THREAD_PARAM*) params)->p_buf;
1302  float64_t* q_buf=((S_BW_THREAD_PARAM*) params)->q_buf;
1303  float64_t* a_buf=((S_BW_THREAD_PARAM*) params)->a_buf;
1304  float64_t* b_buf=((S_BW_THREAD_PARAM*) params)->b_buf;
1305  ((S_BW_THREAD_PARAM*)params)->ret=0;
1306 
1307  for (int32_t dim=start; dim<stop; dim++)
1308  {
1309  hmm->forward_comp(hmm->p_observations->get_vector_length(dim), hmm->N-1, dim) ;
1310  hmm->backward_comp(hmm->p_observations->get_vector_length(dim), hmm->N-1, dim) ;
1311  float64_t modprob=hmm->model_probability(dim) ;
1312  hmm->ab_buf_comp(p_buf, q_buf, a_buf, b_buf, dim) ;
1313  ((S_BW_THREAD_PARAM*)params)->ret+= modprob;
1314  }
1315  return NULL ;
1316 }
1317 
1318 void* CHMM::bw_single_dim_prefetch(void * params)
1319 {
1320  CHMM* hmm=((S_BW_THREAD_PARAM*)params)->hmm ;
1321  int32_t dim=((S_DIM_THREAD_PARAM*)params)->dim ;
1322  ((S_DIM_THREAD_PARAM*)params)->prob_sum = hmm->model_probability(dim);
1323  return NULL ;
1324 }
1325 
1326 void* CHMM::vit_dim_prefetch(void * params)
1327 {
1328  CHMM* hmm=((S_DIM_THREAD_PARAM*)params)->hmm ;
1329  int32_t dim=((S_DIM_THREAD_PARAM*)params)->dim ;
1330  ((S_DIM_THREAD_PARAM*)params)->prob_sum = hmm->best_path(dim);
1331  return NULL ;
1332 }
1333 
1334 #endif //USE_HMMPARALLEL
1335 
1336 
1337 #ifdef USE_HMMPARALLEL
1338 
1339 void CHMM::ab_buf_comp(
1340  float64_t* p_buf, float64_t* q_buf, float64_t *a_buf, float64_t* b_buf,
1341  int32_t dim)
1342 {
1343  int32_t i,j,t ;
1344  float64_t a_sum;
1345  float64_t b_sum;
1346 
1347  float64_t dimmodprob=model_probability(dim);
1348 
1349  for (i=0; i<N; i++)
1350  {
1351  //estimate initial+end state distribution numerator
1352  p_buf[i]=get_p(i)+get_b(i,p_observations->get_feature(dim,0))+backward(0,i,dim) - dimmodprob;
1353  q_buf[i]=forward(p_observations->get_vector_length(dim)-1, i, dim)+get_q(i) - dimmodprob;
1354 
1355  //estimate a
1356  for (j=0; j<N; j++)
1357  {
1358  a_sum=-CMath::INFTY;
1359 
1360  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1361  {
1362  a_sum= CMath::logarithmic_sum(a_sum, forward(t,i,dim)+
1363  get_a(i,j)+get_b(j,p_observations->get_feature(dim,t+1))+backward(t+1,j,dim));
1364  }
1365  a_buf[N*i+j]=a_sum-dimmodprob ;
1366  }
1367 
1368  //estimate b
1369  for (j=0; j<M; j++)
1370  {
1371  b_sum=-CMath::INFTY;
1372 
1373  for (t=0; t<p_observations->get_vector_length(dim); t++)
1374  {
1375  if (p_observations->get_feature(dim,t)==j)
1376  b_sum=CMath::logarithmic_sum(b_sum, forward(t,i,dim)+backward(t, i, dim));
1377  }
1378 
1379  b_buf[M*i+j]=b_sum-dimmodprob ;
1380  }
1381  }
1382 }
1383 
1384 //estimates new model lambda out of lambda_train using baum welch algorithm
1386 {
1387  int32_t i,j,cpu;
1388  float64_t fullmodprob=0; //for all dims
1389 
1390  //clear actual model a,b,p,q are used as numerator
1391  for (i=0; i<N; i++)
1392  {
1393  if (hmm->get_p(i)>CMath::ALMOST_NEG_INFTY)
1394  set_p(i,log(PSEUDO));
1395  else
1396  set_p(i,hmm->get_p(i));
1397  if (hmm->get_q(i)>CMath::ALMOST_NEG_INFTY)
1398  set_q(i,log(PSEUDO));
1399  else
1400  set_q(i,hmm->get_q(i));
1401 
1402  for (j=0; j<N; j++)
1403  if (hmm->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1404  set_a(i,j, log(PSEUDO));
1405  else
1406  set_a(i,j,hmm->get_a(i,j));
1407  for (j=0; j<M; j++)
1408  if (hmm->get_b(i,j)>CMath::ALMOST_NEG_INFTY)
1409  set_b(i,j, log(PSEUDO));
1410  else
1411  set_b(i,j,hmm->get_b(i,j));
1412  }
1413  invalidate_model();
1414 
1415  int32_t num_threads = parallel->get_num_threads();
1416 
1417  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
1418  S_BW_THREAD_PARAM *params=SG_MALLOC(S_BW_THREAD_PARAM, num_threads);
1419 
1420  if (p_observations->get_num_vectors()<num_threads)
1421  num_threads=p_observations->get_num_vectors();
1422 
1423  for (cpu=0; cpu<num_threads; cpu++)
1424  {
1425  params[cpu].p_buf=SG_MALLOC(float64_t, N);
1426  params[cpu].q_buf=SG_MALLOC(float64_t, N);
1427  params[cpu].a_buf=SG_MALLOC(float64_t, N*N);
1428  params[cpu].b_buf=SG_MALLOC(float64_t, N*M);
1429 
1430  params[cpu].hmm=hmm;
1431  int32_t start = p_observations->get_num_vectors()*cpu / num_threads;
1432  int32_t stop=p_observations->get_num_vectors()*(cpu+1) / num_threads;
1433 
1434  if (cpu == parallel->get_num_threads()-1)
1436 
1437  ASSERT(start<stop);
1438  params[cpu].dim_start=start;
1439  params[cpu].dim_stop=stop;
1440 
1441  pthread_create(&threads[cpu], NULL, bw_dim_prefetch, &params[cpu]);
1442  }
1443 
1444  for (cpu=0; cpu<num_threads; cpu++)
1445  {
1446  pthread_join(threads[cpu], NULL);
1447 
1448  for (i=0; i<N; i++)
1449  {
1450  //estimate initial+end state distribution numerator
1451  set_p(i, CMath::logarithmic_sum(get_p(i), params[cpu].p_buf[i]));
1452  set_q(i, CMath::logarithmic_sum(get_q(i), params[cpu].q_buf[i]));
1453 
1454  //estimate numerator for a
1455  for (j=0; j<N; j++)
1456  set_a(i,j, CMath::logarithmic_sum(get_a(i,j), params[cpu].a_buf[N*i+j]));
1457 
1458  //estimate numerator for b
1459  for (j=0; j<M; j++)
1460  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), params[cpu].b_buf[M*i+j]));
1461  }
1462 
1463  fullmodprob+=params[cpu].ret;
1464 
1465  }
1466 
1467  for (cpu=0; cpu<num_threads; cpu++)
1468  {
1469  SG_FREE(params[cpu].p_buf);
1470  SG_FREE(params[cpu].q_buf);
1471  SG_FREE(params[cpu].a_buf);
1472  SG_FREE(params[cpu].b_buf);
1473  }
1474 
1475  SG_FREE(threads);
1476  SG_FREE(params);
1477 
1478  //cache hmm model probability
1479  hmm->mod_prob=fullmodprob;
1480  hmm->mod_prob_updated=true ;
1481 
1482  //new model probability is unknown
1483  normalize();
1484  invalidate_model();
1485 }
1486 
1487 #else // USE_HMMPARALLEL
1488 
1489 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1491 {
1492  int32_t i,j,t,dim;
1493  float64_t a_sum, b_sum; //numerator
1494  float64_t dimmodprob=0; //model probability for dim
1495  float64_t fullmodprob=0; //for all dims
1496 
1497  //clear actual model a,b,p,q are used as numerator
1498  for (i=0; i<N; i++)
1499  {
1500  if (estimate->get_p(i)>CMath::ALMOST_NEG_INFTY)
1501  set_p(i,log(PSEUDO));
1502  else
1503  set_p(i,estimate->get_p(i));
1504  if (estimate->get_q(i)>CMath::ALMOST_NEG_INFTY)
1505  set_q(i,log(PSEUDO));
1506  else
1507  set_q(i,estimate->get_q(i));
1508 
1509  for (j=0; j<N; j++)
1510  if (estimate->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1511  set_a(i,j, log(PSEUDO));
1512  else
1513  set_a(i,j,estimate->get_a(i,j));
1514  for (j=0; j<M; j++)
1515  if (estimate->get_b(i,j)>CMath::ALMOST_NEG_INFTY)
1516  set_b(i,j, log(PSEUDO));
1517  else
1518  set_b(i,j,estimate->get_b(i,j));
1519  }
1520  invalidate_model();
1521 
1522  //change summation order to make use of alpha/beta caches
1523  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1524  {
1525  dimmodprob=estimate->model_probability(dim);
1526  fullmodprob+=dimmodprob ;
1527 
1528  for (i=0; i<N; i++)
1529  {
1530  //estimate initial+end state distribution numerator
1531  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->get_p(i)+estimate->get_b(i,p_observations->get_feature(dim,0))+estimate->backward(0,i,dim) - dimmodprob));
1532  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+estimate->get_q(i) - dimmodprob ));
1533 
1534  int32_t num = trans_list_backward_cnt[i] ;
1535 
1536  //estimate a
1537  for (j=0; j<num; j++)
1538  {
1539  int32_t jj = trans_list_backward[i][j] ;
1540  a_sum=-CMath::INFTY;
1541 
1542  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1543  {
1544  a_sum= CMath::logarithmic_sum(a_sum, estimate->forward(t,i,dim)+
1545  estimate->get_a(i,jj)+estimate->get_b(jj,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,jj,dim));
1546  }
1547  set_a(i,jj, CMath::logarithmic_sum(get_a(i,jj), a_sum-dimmodprob));
1548  }
1549 
1550  //estimate b
1551  for (j=0; j<M; j++)
1552  {
1553  b_sum=-CMath::INFTY;
1554 
1555  for (t=0; t<p_observations->get_vector_length(dim); t++)
1556  {
1557  if (p_observations->get_feature(dim,t)==j)
1558  b_sum=CMath::logarithmic_sum(b_sum, estimate->forward(t,i,dim)+estimate->backward(t, i, dim));
1559  }
1560 
1561  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), b_sum-dimmodprob));
1562  }
1563  }
1564  }
1565 
1566  //cache estimate model probability
1567  estimate->mod_prob=fullmodprob;
1568  estimate->mod_prob_updated=true ;
1569 
1570  //new model probability is unknown
1571  normalize();
1572  invalidate_model();
1573 }
1574 
1575 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1577 {
1578  int32_t i,j,t,dim;
1579  float64_t a_sum, b_sum; //numerator
1580  float64_t dimmodprob=0; //model probability for dim
1581  float64_t fullmodprob=0; //for all dims
1582 
1583  //clear actual model a,b,p,q are used as numerator
1584  for (i=0; i<N; i++)
1585  {
1586  if (estimate->get_p(i)>CMath::ALMOST_NEG_INFTY)
1587  set_p(i,log(PSEUDO));
1588  else
1589  set_p(i,estimate->get_p(i));
1590  if (estimate->get_q(i)>CMath::ALMOST_NEG_INFTY)
1591  set_q(i,log(PSEUDO));
1592  else
1593  set_q(i,estimate->get_q(i));
1594 
1595  for (j=0; j<N; j++)
1596  if (estimate->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1597  set_a(i,j, log(PSEUDO));
1598  else
1599  set_a(i,j,estimate->get_a(i,j));
1600  for (j=0; j<M; j++)
1601  if (estimate->get_b(i,j)>CMath::ALMOST_NEG_INFTY)
1602  set_b(i,j, log(PSEUDO));
1603  else
1604  set_b(i,j,estimate->get_b(i,j));
1605  }
1606  invalidate_model();
1607 
1608  //change summation order to make use of alpha/beta caches
1609  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1610  {
1611  dimmodprob=estimate->model_probability(dim);
1612  fullmodprob+=dimmodprob ;
1613 
1614  for (i=0; i<N; i++)
1615  {
1616  //estimate initial+end state distribution numerator
1617  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->get_p(i)+estimate->get_b(i,p_observations->get_feature(dim,0))+estimate->backward(0,i,dim) - dimmodprob));
1618  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+estimate->get_q(i) - dimmodprob ));
1619 
1620  //estimate a
1621  for (j=0; j<N; j++)
1622  {
1623  a_sum=-CMath::INFTY;
1624 
1625  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1626  {
1627  a_sum= CMath::logarithmic_sum(a_sum, estimate->forward(t,i,dim)+
1628  estimate->get_a(i,j)+estimate->get_b(j,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,j,dim));
1629  }
1630  set_a(i,j, CMath::logarithmic_sum(get_a(i,j), a_sum-dimmodprob));
1631  }
1632 
1633  //estimate b
1634  for (j=0; j<M; j++)
1635  {
1636  b_sum=-CMath::INFTY;
1637 
1638  for (t=0; t<p_observations->get_vector_length(dim); t++)
1639  {
1640  if (p_observations->get_feature(dim,t)==j)
1641  b_sum=CMath::logarithmic_sum(b_sum, estimate->forward(t,i,dim)+estimate->backward(t, i, dim));
1642  }
1643 
1644  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), b_sum-dimmodprob));
1645  }
1646  }
1647  }
1648 
1649  //cache estimate model probability
1650  estimate->mod_prob=fullmodprob;
1651  estimate->mod_prob_updated=true ;
1652 
1653  //new model probability is unknown
1654  normalize();
1655  invalidate_model();
1656 }
1657 #endif // USE_HMMPARALLEL
1658 
1659 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1660 // optimize only p, q, a but not b
1662 {
1663  int32_t i,j,t,dim;
1664  float64_t a_sum; //numerator
1665  float64_t dimmodprob=0; //model probability for dim
1666  float64_t fullmodprob=0; //for all dims
1667 
1668  //clear actual model a,b,p,q are used as numerator
1669  for (i=0; i<N; i++)
1670  {
1671  if (estimate->get_p(i)>CMath::ALMOST_NEG_INFTY)
1672  set_p(i,log(PSEUDO));
1673  else
1674  set_p(i,estimate->get_p(i));
1675  if (estimate->get_q(i)>CMath::ALMOST_NEG_INFTY)
1676  set_q(i,log(PSEUDO));
1677  else
1678  set_q(i,estimate->get_q(i));
1679 
1680  for (j=0; j<N; j++)
1681  if (estimate->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1682  set_a(i,j, log(PSEUDO));
1683  else
1684  set_a(i,j,estimate->get_a(i,j));
1685  for (j=0; j<M; j++)
1686  set_b(i,j,estimate->get_b(i,j));
1687  }
1688  invalidate_model();
1689 
1690  //change summation order to make use of alpha/beta caches
1691  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1692  {
1693  dimmodprob=estimate->model_probability(dim);
1694  fullmodprob+=dimmodprob ;
1695 
1696  for (i=0; i<N; i++)
1697  {
1698  //estimate initial+end state distribution numerator
1699  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->get_p(i)+estimate->get_b(i,p_observations->get_feature(dim,0))+estimate->backward(0,i,dim) - dimmodprob));
1700  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+estimate->get_q(i) - dimmodprob ));
1701 
1702  int32_t num = trans_list_backward_cnt[i] ;
1703  //estimate a
1704  for (j=0; j<num; j++)
1705  {
1706  int32_t jj = trans_list_backward[i][j] ;
1707  a_sum=-CMath::INFTY;
1708 
1709  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1710  {
1711  a_sum= CMath::logarithmic_sum(a_sum, estimate->forward(t,i,dim)+
1712  estimate->get_a(i,jj)+estimate->get_b(jj,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,jj,dim));
1713  }
1714  set_a(i,jj, CMath::logarithmic_sum(get_a(i,jj), a_sum-dimmodprob));
1715  }
1716  }
1717  }
1718 
1719  //cache estimate model probability
1720  estimate->mod_prob=fullmodprob;
1721  estimate->mod_prob_updated=true ;
1722 
1723  //new model probability is unknown
1724  normalize();
1725  invalidate_model();
1726 }
1727 
1728 
1729 
1730 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1732 {
1733  int32_t i,j,old_i,k,t,dim;
1734  float64_t a_sum_num, b_sum_num; //numerator
1735  float64_t a_sum_denom, b_sum_denom; //denominator
1736  float64_t dimmodprob=-CMath::INFTY; //model probability for dim
1737  float64_t fullmodprob=0; //for all dims
1738  float64_t* A=ARRAYN1(0);
1739  float64_t* B=ARRAYN2(0);
1740 
1741  //clear actual model a,b,p,q are used as numerator
1742  //A,B as denominator for a,b
1743  for (k=0; (i=model->get_learn_p(k))!=-1; k++)
1744  set_p(i,log(PSEUDO));
1745 
1746  for (k=0; (i=model->get_learn_q(k))!=-1; k++)
1747  set_q(i,log(PSEUDO));
1748 
1749  for (k=0; (i=model->get_learn_a(k,0))!=-1; k++)
1750  {
1751  j=model->get_learn_a(k,1);
1752  set_a(i,j, log(PSEUDO));
1753  }
1754 
1755  for (k=0; (i=model->get_learn_b(k,0))!=-1; k++)
1756  {
1757  j=model->get_learn_b(k,1);
1758  set_b(i,j, log(PSEUDO));
1759  }
1760 
1761  for (i=0; i<N; i++)
1762  {
1763  A[i]=log(PSEUDO);
1764  B[i]=log(PSEUDO);
1765  }
1766 
1767 #ifdef USE_HMMPARALLEL
1768  int32_t num_threads = parallel->get_num_threads();
1769  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
1770  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
1771 
1772  if (p_observations->get_num_vectors()<num_threads)
1773  num_threads=p_observations->get_num_vectors();
1774 #endif
1775 
1776  //change summation order to make use of alpha/beta caches
1777  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1778  {
1779 #ifdef USE_HMMPARALLEL
1780  if (dim%num_threads==0)
1781  {
1782  for (i=0; i<num_threads; i++)
1783  {
1784  if (dim+i<p_observations->get_num_vectors())
1785  {
1786  params[i].hmm=estimate ;
1787  params[i].dim=dim+i ;
1788  pthread_create(&threads[i], NULL, bw_single_dim_prefetch, (void*)&params[i]) ;
1789  }
1790  }
1791  for (i=0; i<num_threads; i++)
1792  {
1793  if (dim+i<p_observations->get_num_vectors())
1794  {
1795  pthread_join(threads[i], NULL);
1796  dimmodprob = params[i].prob_sum;
1797  }
1798  }
1799  }
1800 #else
1801  dimmodprob=estimate->model_probability(dim);
1802 #endif // USE_HMMPARALLEL
1803 
1804  //and denominator
1805  fullmodprob+= dimmodprob;
1806 
1807  //estimate initial+end state distribution numerator
1808  for (k=0; (i=model->get_learn_p(k))!=-1; k++)
1809  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->forward(0,i,dim)+estimate->backward(0,i,dim) - dimmodprob ) );
1810 
1811  for (k=0; (i=model->get_learn_q(k))!=-1; k++)
1812  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+
1813  estimate->backward(p_observations->get_vector_length(dim)-1, i, dim) - dimmodprob ) );
1814 
1815  //estimate a
1816  old_i=-1;
1817  for (k=0; (i=model->get_learn_a(k,0))!=-1; k++)
1818  {
1819  //denominator is constant for j
1820  //therefore calculate it first
1821  if (old_i!=i)
1822  {
1823  old_i=i;
1824  a_sum_denom=-CMath::INFTY;
1825 
1826  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1827  a_sum_denom= CMath::logarithmic_sum(a_sum_denom, estimate->forward(t,i,dim)+estimate->backward(t,i,dim));
1828 
1829  A[i]= CMath::logarithmic_sum(A[i], a_sum_denom-dimmodprob);
1830  }
1831 
1832  j=model->get_learn_a(k,1);
1833  a_sum_num=-CMath::INFTY;
1834  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1835  {
1836  a_sum_num= CMath::logarithmic_sum(a_sum_num, estimate->forward(t,i,dim)+
1837  estimate->get_a(i,j)+estimate->get_b(j,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,j,dim));
1838  }
1839 
1840  set_a(i,j, CMath::logarithmic_sum(get_a(i,j), a_sum_num-dimmodprob));
1841  }
1842 
1843  //estimate b
1844  old_i=-1;
1845  for (k=0; (i=model->get_learn_b(k,0))!=-1; k++)
1846  {
1847 
1848  //denominator is constant for j
1849  //therefore calculate it first
1850  if (old_i!=i)
1851  {
1852  old_i=i;
1853  b_sum_denom=-CMath::INFTY;
1854 
1855  for (t=0; t<p_observations->get_vector_length(dim); t++)
1856  b_sum_denom= CMath::logarithmic_sum(b_sum_denom, estimate->forward(t,i,dim)+estimate->backward(t,i,dim));
1857 
1858  B[i]= CMath::logarithmic_sum(B[i], b_sum_denom-dimmodprob);
1859  }
1860 
1861  j=model->get_learn_b(k,1);
1862  b_sum_num=-CMath::INFTY;
1863  for (t=0; t<p_observations->get_vector_length(dim); t++)
1864  {
1865  if (p_observations->get_feature(dim,t)==j)
1866  b_sum_num=CMath::logarithmic_sum(b_sum_num, estimate->forward(t,i,dim)+estimate->backward(t, i, dim));
1867  }
1868 
1869  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), b_sum_num-dimmodprob));
1870  }
1871  }
1872 #ifdef USE_HMMPARALLEL
1873  SG_FREE(threads);
1874  SG_FREE(params);
1875 #endif
1876 
1877 
1878  //calculate estimates
1879  for (k=0; (i=model->get_learn_p(k))!=-1; k++)
1880  set_p(i, get_p(i)-log(p_observations->get_num_vectors()+N*PSEUDO) );
1881 
1882  for (k=0; (i=model->get_learn_q(k))!=-1; k++)
1883  set_q(i, get_q(i)-log(p_observations->get_num_vectors()+N*PSEUDO) );
1884 
1885  for (k=0; (i=model->get_learn_a(k,0))!=-1; k++)
1886  {
1887  j=model->get_learn_a(k,1);
1888  set_a(i,j, get_a(i,j) - A[i]);
1889  }
1890 
1891  for (k=0; (i=model->get_learn_b(k,0))!=-1; k++)
1892  {
1893  j=model->get_learn_b(k,1);
1894  set_b(i,j, get_b(i,j) - B[i]);
1895  }
1896 
1897  //cache estimate model probability
1898  estimate->mod_prob=fullmodprob;
1899  estimate->mod_prob_updated=true ;
1900 
1901  //new model probability is unknown
1902  normalize();
1903  invalidate_model();
1904 }
1905 
1906 //estimates new model lambda out of lambda_estimate using viterbi algorithm
1908 {
1909  int32_t i,j,t;
1910  float64_t sum;
1911  float64_t* P=ARRAYN1(0);
1912  float64_t* Q=ARRAYN2(0);
1913 
1914  path_deriv_updated=false ;
1915 
1916  //initialize with pseudocounts
1917  for (i=0; i<N; i++)
1918  {
1919  for (j=0; j<N; j++)
1920  set_A(i,j, PSEUDO);
1921 
1922  for (j=0; j<M; j++)
1923  set_B(i,j, PSEUDO);
1924 
1925  P[i]=PSEUDO;
1926  Q[i]=PSEUDO;
1927  }
1928 
1929  float64_t allpatprob=0 ;
1930 
1931 #ifdef USE_HMMPARALLEL
1932  int32_t num_threads = parallel->get_num_threads();
1933  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
1934  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
1935 
1936  if (p_observations->get_num_vectors()<num_threads)
1937  num_threads=p_observations->get_num_vectors();
1938 #endif
1939 
1940  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
1941  {
1942 
1943 #ifdef USE_HMMPARALLEL
1944  if (dim%num_threads==0)
1945  {
1946  for (i=0; i<num_threads; i++)
1947  {
1948  if (dim+i<p_observations->get_num_vectors())
1949  {
1950  params[i].hmm=estimate ;
1951  params[i].dim=dim+i ;
1952  pthread_create(&threads[i], NULL, vit_dim_prefetch, (void*)&params[i]) ;
1953  }
1954  }
1955  for (i=0; i<num_threads; i++)
1956  {
1957  if (dim+i<p_observations->get_num_vectors())
1958  {
1959  pthread_join(threads[i], NULL);
1960  allpatprob += params[i].prob_sum;
1961  }
1962  }
1963  }
1964 #else
1965  //using viterbi to find best path
1966  allpatprob += estimate->best_path(dim);
1967 #endif // USE_HMMPARALLEL
1968 
1969  //counting occurences for A and B
1970  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1971  {
1972  set_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1], get_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1])+1);
1973  set_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t), get_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t))+1);
1974  }
1975 
1977 
1978  P[estimate->PATH(dim)[0]]++;
1979  Q[estimate->PATH(dim)[p_observations->get_vector_length(dim)-1]]++;
1980  }
1981 
1982 #ifdef USE_HMMPARALLEL
1983  SG_FREE(threads);
1984  SG_FREE(params);
1985 #endif
1986 
1987  allpatprob/=p_observations->get_num_vectors() ;
1988  estimate->all_pat_prob=allpatprob ;
1989  estimate->all_path_prob_updated=true ;
1990 
1991  //converting A to probability measure a
1992  for (i=0; i<N; i++)
1993  {
1994  sum=0;
1995  for (j=0; j<N; j++)
1996  sum+=get_A(i,j);
1997 
1998  for (j=0; j<N; j++)
1999  set_a(i,j, log(get_A(i,j)/sum));
2000  }
2001 
2002  //converting B to probability measures b
2003  for (i=0; i<N; i++)
2004  {
2005  sum=0;
2006  for (j=0; j<M; j++)
2007  sum+=get_B(i,j);
2008 
2009  for (j=0; j<M; j++)
2010  set_b(i,j, log(get_B(i, j)/sum));
2011  }
2012 
2013  //converting P to probability measure p
2014  sum=0;
2015  for (i=0; i<N; i++)
2016  sum+=P[i];
2017 
2018  for (i=0; i<N; i++)
2019  set_p(i, log(P[i]/sum));
2020 
2021  //converting Q to probability measure q
2022  sum=0;
2023  for (i=0; i<N; i++)
2024  sum+=Q[i];
2025 
2026  for (i=0; i<N; i++)
2027  set_q(i, log(Q[i]/sum));
2028 
2029  //new model probability is unknown
2030  invalidate_model();
2031 }
2032 
2033 // estimate parameters listed in learn_x
2035 {
2036  int32_t i,j,k,t;
2037  float64_t sum;
2038  float64_t* P=ARRAYN1(0);
2039  float64_t* Q=ARRAYN2(0);
2040 
2041  path_deriv_updated=false ;
2042 
2043  //initialize with pseudocounts
2044  for (i=0; i<N; i++)
2045  {
2046  for (j=0; j<N; j++)
2047  set_A(i,j, PSEUDO);
2048 
2049  for (j=0; j<M; j++)
2050  set_B(i,j, PSEUDO);
2051 
2052  P[i]=PSEUDO;
2053  Q[i]=PSEUDO;
2054  }
2055 
2056 #ifdef USE_HMMPARALLEL
2057  int32_t num_threads = parallel->get_num_threads();
2058  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
2059  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
2060 #endif
2061 
2062  float64_t allpatprob=0.0 ;
2063  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
2064  {
2065 
2066 #ifdef USE_HMMPARALLEL
2067  if (dim%num_threads==0)
2068  {
2069  for (i=0; i<num_threads; i++)
2070  {
2071  if (dim+i<p_observations->get_num_vectors())
2072  {
2073  params[i].hmm=estimate ;
2074  params[i].dim=dim+i ;
2075  pthread_create(&threads[i], NULL, vit_dim_prefetch, (void*)&params[i]) ;
2076  }
2077  }
2078  for (i=0; i<num_threads; i++)
2079  {
2080  if (dim+i<p_observations->get_num_vectors())
2081  {
2082  pthread_join(threads[i], NULL);
2083  allpatprob += params[i].prob_sum;
2084  }
2085  }
2086  }
2087 #else // USE_HMMPARALLEL
2088  //using viterbi to find best path
2089  allpatprob += estimate->best_path(dim);
2090 #endif // USE_HMMPARALLEL
2091 
2092 
2093  //counting occurences for A and B
2094  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
2095  {
2096  set_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1], get_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1])+1);
2097  set_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t), get_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t))+1);
2098  }
2099 
2101 
2102  P[estimate->PATH(dim)[0]]++;
2103  Q[estimate->PATH(dim)[p_observations->get_vector_length(dim)-1]]++;
2104  }
2105 
2106 #ifdef USE_HMMPARALLEL
2107  SG_FREE(threads);
2108  SG_FREE(params);
2109 #endif
2110 
2111  //estimate->invalidate_model() ;
2112  //float64_t q=estimate->best_path(-1) ;
2113 
2114  allpatprob/=p_observations->get_num_vectors() ;
2115  estimate->all_pat_prob=allpatprob ;
2116  estimate->all_path_prob_updated=true ;
2117 
2118 
2119  //copy old model
2120  for (i=0; i<N; i++)
2121  {
2122  for (j=0; j<N; j++)
2123  set_a(i,j, estimate->get_a(i,j));
2124 
2125  for (j=0; j<M; j++)
2126  set_b(i,j, estimate->get_b(i,j));
2127  }
2128 
2129  //converting A to probability measure a
2130  i=0;
2131  sum=0;
2132  j=model->get_learn_a(i,0);
2133  k=i;
2134  while (model->get_learn_a(i,0)!=-1 || k<i)
2135  {
2136  if (j==model->get_learn_a(i,0))
2137  {
2138  sum+=get_A(model->get_learn_a(i,0), model->get_learn_a(i,1));
2139  i++;
2140  }
2141  else
2142  {
2143  while (k<i)
2144  {
2145  set_a(model->get_learn_a(k,0), model->get_learn_a(k,1), log (get_A(model->get_learn_a(k,0), model->get_learn_a(k,1)) / sum));
2146  k++;
2147  }
2148 
2149  sum=0;
2150  j=model->get_learn_a(i,0);
2151  k=i;
2152  }
2153  }
2154 
2155  //converting B to probability measures b
2156  i=0;
2157  sum=0;
2158  j=model->get_learn_b(i,0);
2159  k=i;
2160  while (model->get_learn_b(i,0)!=-1 || k<i)
2161  {
2162  if (j==model->get_learn_b(i,0))
2163  {
2164  sum+=get_B(model->get_learn_b(i,0),model->get_learn_b(i,1));
2165  i++;
2166  }
2167  else
2168  {
2169  while (k<i)
2170  {
2171  set_b(model->get_learn_b(k,0),model->get_learn_b(k,1), log (get_B(model->get_learn_b(k,0), model->get_learn_b(k,1)) / sum));
2172  k++;
2173  }
2174 
2175  sum=0;
2176  j=model->get_learn_b(i,0);
2177  k=i;
2178  }
2179  }
2180 
2181  i=0;
2182  sum=0;
2183  while (model->get_learn_p(i)!=-1)
2184  {
2185  sum+=P[model->get_learn_p(i)] ;
2186  i++ ;
2187  } ;
2188  i=0 ;
2189  while (model->get_learn_p(i)!=-1)
2190  {
2191  set_p(model->get_learn_p(i), log(P[model->get_learn_p(i)]/sum));
2192  i++ ;
2193  } ;
2194 
2195  i=0;
2196  sum=0;
2197  while (model->get_learn_q(i)!=-1)
2198  {
2199  sum+=Q[model->get_learn_q(i)] ;
2200  i++ ;
2201  } ;
2202  i=0 ;
2203  while (model->get_learn_q(i)!=-1)
2204  {
2205  set_q(model->get_learn_q(i), log(Q[model->get_learn_q(i)]/sum));
2206  i++ ;
2207  } ;
2208 
2209 
2210  //new model probability is unknown
2211  invalidate_model();
2212 }
2213 //------------------------------------------------------------------------------------//
2214 
2215 //to give an idea what the model looks like
2216 void CHMM::output_model(bool verbose)
2217 {
2218  int32_t i,j;
2219  float64_t checksum;
2220 
2221  //generic info
2222  SG_INFO( "log(Pr[O|model])=%e, #states: %i, #observationssymbols: %i, #observations: %ix%i\n",
2225 
2226  if (verbose)
2227  {
2228  // tranisition matrix a
2229  SG_INFO( "\ntransition matrix\n");
2230  for (i=0; i<N; i++)
2231  {
2232  checksum= get_q(i);
2233  for (j=0; j<N; j++)
2234  {
2235  checksum= CMath::logarithmic_sum(checksum, get_a(i,j));
2236 
2237  SG_INFO( "a(%02i,%02i)=%1.4f ",i,j, (float32_t) exp(get_a(i,j)));
2238 
2239  if (j % 4 == 3)
2240  SG_PRINT( "\n");
2241  }
2242  if (fabs(checksum)>1e-5)
2243  SG_DEBUG( " checksum % E ******* \n",checksum);
2244  else
2245  SG_DEBUG( " checksum % E\n",checksum);
2246  }
2247 
2248  // distribution of start states p
2249  SG_INFO( "\ndistribution of start states\n");
2250  checksum=-CMath::INFTY;
2251  for (i=0; i<N; i++)
2252  {
2253  checksum= CMath::logarithmic_sum(checksum, get_p(i));
2254  SG_INFO( "p(%02i)=%1.4f ",i, (float32_t) exp(get_p(i)));
2255  if (i % 4 == 3)
2256  SG_PRINT( "\n");
2257  }
2258  if (fabs(checksum)>1e-5)
2259  SG_DEBUG( " checksum % E ******* \n",checksum);
2260  else
2261  SG_DEBUG( " checksum=% E\n", checksum);
2262 
2263  // distribution of terminal states p
2264  SG_INFO( "\ndistribution of terminal states\n");
2265  checksum=-CMath::INFTY;
2266  for (i=0; i<N; i++)
2267  {
2268  checksum= CMath::logarithmic_sum(checksum, get_q(i));
2269  SG_INFO("q(%02i)=%1.4f ",i, (float32_t) exp(get_q(i)));
2270  if (i % 4 == 3)
2271  SG_INFO("\n");
2272  }
2273  if (fabs(checksum)>1e-5)
2274  SG_DEBUG( " checksum % E ******* \n",checksum);
2275  else
2276  SG_DEBUG( " checksum=% E\n", checksum);
2277 
2278  // distribution of observations given the state b
2279  SG_INFO("\ndistribution of observations given the state\n");
2280  for (i=0; i<N; i++)
2281  {
2282  checksum=-CMath::INFTY;
2283  for (j=0; j<M; j++)
2284  {
2285  checksum=CMath::logarithmic_sum(checksum, get_b(i,j));
2286  SG_INFO("b(%02i,%02i)=%1.4f ",i,j, (float32_t) exp(get_b(i,j)));
2287  if (j % 4 == 3)
2288  SG_PRINT("\n");
2289  }
2290  if (fabs(checksum)>1e-5)
2291  SG_DEBUG( " checksum % E ******* \n",checksum);
2292  else
2293  SG_DEBUG( " checksum % E\n",checksum);
2294  }
2295  }
2296  SG_PRINT("\n");
2297 }
2298 
2299 //to give an idea what the model looks like
2300 void CHMM::output_model_defined(bool verbose)
2301 {
2302  int32_t i,j;
2303  if (!model)
2304  return ;
2305 
2306  //generic info
2307  SG_INFO("log(Pr[O|model])=%e, #states: %i, #observationssymbols: %i, #observations: %ix%i\n",
2310 
2311  if (verbose)
2312  {
2313  // tranisition matrix a
2314  SG_INFO("\ntransition matrix\n");
2315 
2316  //initialize a values that have to be learned
2317  i=0;
2318  j=model->get_learn_a(i,0);
2319  while (model->get_learn_a(i,0)!=-1)
2320  {
2321  if (j!=model->get_learn_a(i,0))
2322  {
2323  j=model->get_learn_a(i,0);
2324  SG_PRINT("\n");
2325  }
2326 
2327  SG_INFO("a(%02i,%02i)=%1.4f ",model->get_learn_a(i,0), model->get_learn_a(i,1), (float32_t) exp(get_a(model->get_learn_a(i,0), model->get_learn_a(i,1))));
2328  i++;
2329  }
2330 
2331  // distribution of observations given the state b
2332  SG_INFO("\n\ndistribution of observations given the state\n");
2333  i=0;
2334  j=model->get_learn_b(i,0);
2335  while (model->get_learn_b(i,0)!=-1)
2336  {
2337  if (j!=model->get_learn_b(i,0))
2338  {
2339  j=model->get_learn_b(i,0);
2340  SG_PRINT("\n");
2341  }
2342 
2343  SG_INFO("b(%02i,%02i)=%1.4f ",model->get_learn_b(i,0),model->get_learn_b(i,1), (float32_t) exp(get_b(model->get_learn_b(i,0),model->get_learn_b(i,1))));
2344  i++;
2345  }
2346 
2347  SG_PRINT("\n");
2348  }
2349  SG_PRINT("\n");
2350 }
2351 
2352 //------------------------------------------------------------------------------------//
2353 
2354 //convert model to log probabilities
2356 {
2357  int32_t i,j;
2358 
2359  for (i=0; i<N; i++)
2360  {
2361  if (get_p(i)!=0)
2362  set_p(i, log(get_p(i)));
2363  else
2364  set_p(i, -CMath::INFTY);;
2365  }
2366 
2367  for (i=0; i<N; i++)
2368  {
2369  if (get_q(i)!=0)
2370  set_q(i, log(get_q(i)));
2371  else
2372  set_q(i, -CMath::INFTY);;
2373  }
2374 
2375  for (i=0; i<N; i++)
2376  {
2377  for (j=0; j<N; j++)
2378  {
2379  if (get_a(i,j)!=0)
2380  set_a(i,j, log(get_a(i,j)));
2381  else
2382  set_a(i,j, -CMath::INFTY);
2383  }
2384  }
2385 
2386  for (i=0; i<N; i++)
2387  {
2388  for (j=0; j<M; j++)
2389  {
2390  if (get_b(i,j)!=0)
2391  set_b(i,j, log(get_b(i,j)));
2392  else
2393  set_b(i,j, -CMath::INFTY);
2394  }
2395  }
2396  loglikelihood=true;
2397 
2398  invalidate_model();
2399 }
2400 
2401 //init model with random values
2403 {
2404  const float64_t MIN_RAND=23e-3;
2405 
2406  float64_t sum;
2407  int32_t i,j;
2408 
2409  //initialize a with random values
2410  for (i=0; i<N; i++)
2411  {
2412  sum=0;
2413  for (j=0; j<N; j++)
2414  {
2415  set_a(i,j, CMath::random(MIN_RAND, 1.0));
2416 
2417  sum+=get_a(i,j);
2418  }
2419 
2420  for (j=0; j<N; j++)
2421  set_a(i,j, get_a(i,j)/sum);
2422  }
2423 
2424  //initialize pi with random values
2425  sum=0;
2426  for (i=0; i<N; i++)
2427  {
2428  set_p(i, CMath::random(MIN_RAND, 1.0));
2429 
2430  sum+=get_p(i);
2431  }
2432 
2433  for (i=0; i<N; i++)
2434  set_p(i, get_p(i)/sum);
2435 
2436  //initialize q with random values
2437  sum=0;
2438  for (i=0; i<N; i++)
2439  {
2440  set_q(i, CMath::random(MIN_RAND, 1.0));
2441 
2442  sum+=get_q(i);
2443  }
2444 
2445  for (i=0; i<N; i++)
2446  set_q(i, get_q(i)/sum);
2447 
2448  //initialize b with random values
2449  for (i=0; i<N; i++)
2450  {
2451  sum=0;
2452  for (j=0; j<M; j++)
2453  {
2454  set_b(i,j, CMath::random(MIN_RAND, 1.0));
2455 
2456  sum+=get_b(i,j);
2457  }
2458 
2459  for (j=0; j<M; j++)
2460  set_b(i,j, get_b(i,j)/sum);
2461  }
2462 
2463  //initialize pat/mod_prob as not calculated
2464  invalidate_model();
2465 }
2466 
2467 //init model according to const_x
2469 {
2470  int32_t i,j,k,r;
2471  float64_t sum;
2472  const float64_t MIN_RAND=23e-3;
2473 
2474  //initialize a with zeros
2475  for (i=0; i<N; i++)
2476  for (j=0; j<N; j++)
2477  set_a(i,j, 0);
2478 
2479  //initialize p with zeros
2480  for (i=0; i<N; i++)
2481  set_p(i, 0);
2482 
2483  //initialize q with zeros
2484  for (i=0; i<N; i++)
2485  set_q(i, 0);
2486 
2487  //initialize b with zeros
2488  for (i=0; i<N; i++)
2489  for (j=0; j<M; j++)
2490  set_b(i,j, 0);
2491 
2492 
2493  //initialize a values that have to be learned
2494  float64_t *R=SG_MALLOC(float64_t, N);
2495  for (r=0; r<N; r++) R[r]=CMath::random(MIN_RAND,1.0);
2496  i=0; sum=0; k=i;
2497  j=model->get_learn_a(i,0);
2498  while (model->get_learn_a(i,0)!=-1 || k<i)
2499  {
2500  if (j==model->get_learn_a(i,0))
2501  {
2502  sum+=R[model->get_learn_a(i,1)] ;
2503  i++;
2504  }
2505  else
2506  {
2507  while (k<i)
2508  {
2509  set_a(model->get_learn_a(k,0), model->get_learn_a(k,1),
2510  R[model->get_learn_a(k,1)]/sum);
2511  k++;
2512  }
2513  j=model->get_learn_a(i,0);
2514  k=i;
2515  sum=0;
2516  for (r=0; r<N; r++) R[r]=CMath::random(MIN_RAND,1.0);
2517  }
2518  }
2519  SG_FREE(R); R=NULL ;
2520 
2521  //initialize b values that have to be learned
2522  R=SG_MALLOC(float64_t, M);
2523  for (r=0; r<M; r++) R[r]=CMath::random(MIN_RAND,1.0);
2524  i=0; sum=0; k=0 ;
2525  j=model->get_learn_b(i,0);
2526  while (model->get_learn_b(i,0)!=-1 || k<i)
2527  {
2528  if (j==model->get_learn_b(i,0))
2529  {
2530  sum+=R[model->get_learn_b(i,1)] ;
2531  i++;
2532  }
2533  else
2534  {
2535  while (k<i)
2536  {
2537  set_b(model->get_learn_b(k,0),model->get_learn_b(k,1),
2538  R[model->get_learn_b(k,1)]/sum);
2539  k++;
2540  }
2541 
2542  j=model->get_learn_b(i,0);
2543  k=i;
2544  sum=0;
2545  for (r=0; r<M; r++) R[r]=CMath::random(MIN_RAND,1.0);
2546  }
2547  }
2548  SG_FREE(R); R=NULL ;
2549 
2550  //set consts into a
2551  i=0;
2552  while (model->get_const_a(i,0) != -1)
2553  {
2555  i++;
2556  }
2557 
2558  //set consts into b
2559  i=0;
2560  while (model->get_const_b(i,0) != -1)
2561  {
2563  i++;
2564  }
2565 
2566  //set consts into p
2567  i=0;
2568  while (model->get_const_p(i) != -1)
2569  {
2571  i++;
2572  }
2573 
2574  //initialize q with zeros
2575  for (i=0; i<N; i++)
2576  set_q(i, 0.0);
2577 
2578  //set consts into q
2579  i=0;
2580  while (model->get_const_q(i) != -1)
2581  {
2583  i++;
2584  }
2585 
2586  // init p
2587  i=0;
2588  sum=0;
2589  while (model->get_learn_p(i)!=-1)
2590  {
2591  set_p(model->get_learn_p(i),CMath::random(MIN_RAND,1.0)) ;
2592  sum+=get_p(model->get_learn_p(i)) ;
2593  i++ ;
2594  } ;
2595  i=0 ;
2596  while (model->get_learn_p(i)!=-1)
2597  {
2598  set_p(model->get_learn_p(i), get_p(model->get_learn_p(i))/sum);
2599  i++ ;
2600  } ;
2601 
2602  // initialize q
2603  i=0;
2604  sum=0;
2605  while (model->get_learn_q(i)!=-1)
2606  {
2607  set_q(model->get_learn_q(i),CMath::random(MIN_RAND,1.0)) ;
2608  sum+=get_q(model->get_learn_q(i)) ;
2609  i++ ;
2610  } ;
2611  i=0 ;
2612  while (model->get_learn_q(i)!=-1)
2613  {
2614  set_q(model->get_learn_q(i), get_q(model->get_learn_q(i))/sum);
2615  i++ ;
2616  } ;
2617 
2618  //initialize pat/mod_prob as not calculated
2619  invalidate_model();
2620 }
2621 
2623 {
2624  int32_t i,j;
2625  for (i=0; i<N; i++)
2626  {
2627  set_p(i, log(PSEUDO));
2628  set_q(i, log(PSEUDO));
2629 
2630  for (j=0; j<N; j++)
2631  set_a(i,j, log(PSEUDO));
2632 
2633  for (j=0; j<M; j++)
2634  set_b(i,j, log(PSEUDO));
2635  }
2636 }
2637 
2639 {
2640  int32_t i,j,k;
2641 
2642  for (i=0; (j=model->get_learn_p(i))!=-1; i++)
2643  set_p(j, log(PSEUDO));
2644 
2645  for (i=0; (j=model->get_learn_q(i))!=-1; i++)
2646  set_q(j, log(PSEUDO));
2647 
2648  for (i=0; (j=model->get_learn_a(i,0))!=-1; i++)
2649  {
2650  k=model->get_learn_a(i,1); // catch (j,k) as indizes to be learned
2651  set_a(j,k, log(PSEUDO));
2652  }
2653 
2654  for (i=0; (j=model->get_learn_b(i,0))!=-1; i++)
2655  {
2656  k=model->get_learn_b(i,1); // catch (j,k) as indizes to be learned
2657  set_b(j,k, log(PSEUDO));
2658  }
2659 }
2660 
2662 {
2663  int32_t i,j;
2664  for (i=0; i<N; i++)
2665  {
2666  set_p(i, l->get_p(i));
2667  set_q(i, l->get_q(i));
2668 
2669  for (j=0; j<N; j++)
2670  set_a(i,j, l->get_a(i,j));
2671 
2672  for (j=0; j<M; j++)
2673  set_b(i,j, l->get_b(i,j));
2674  }
2675 }
2676 
2678 {
2679  //initialize pat/mod_prob/alpha/beta cache as not calculated
2680  this->mod_prob=0.0;
2681  this->mod_prob_updated=false;
2682 
2683  if (mem_initialized)
2684  {
2685  if (trans_list_forward_cnt)
2686  SG_FREE(trans_list_forward_cnt);
2687  trans_list_forward_cnt=NULL ;
2688  if (trans_list_backward_cnt)
2689  SG_FREE(trans_list_backward_cnt);
2690  trans_list_backward_cnt=NULL ;
2691  if (trans_list_forward)
2692  {
2693  for (int32_t i=0; i<trans_list_len; i++)
2694  if (trans_list_forward[i])
2695  SG_FREE(trans_list_forward[i]);
2696  SG_FREE(trans_list_forward);
2697  trans_list_forward=NULL ;
2698  }
2699  if (trans_list_backward)
2700  {
2701  for (int32_t i=0; i<trans_list_len; i++)
2702  if (trans_list_backward[i])
2703  SG_FREE(trans_list_backward[i]);
2704  SG_FREE(trans_list_backward);
2705  trans_list_backward = NULL ;
2706  } ;
2707 
2708  trans_list_len = N ;
2709  trans_list_forward = SG_MALLOC(T_STATES*, N);
2710  trans_list_forward_cnt = SG_MALLOC(T_STATES, N);
2711 
2712  for (int32_t j=0; j<N; j++)
2713  {
2714  trans_list_forward_cnt[j]= 0 ;
2715  trans_list_forward[j]= SG_MALLOC(T_STATES, N);
2716  for (int32_t i=0; i<N; i++)
2717  if (get_a(i,j)>CMath::ALMOST_NEG_INFTY)
2718  {
2719  trans_list_forward[j][trans_list_forward_cnt[j]]=i ;
2720  trans_list_forward_cnt[j]++ ;
2721  }
2722  } ;
2723 
2724  trans_list_backward = SG_MALLOC(T_STATES*, N);
2725  trans_list_backward_cnt = SG_MALLOC(T_STATES, N);
2726 
2727  for (int32_t i=0; i<N; i++)
2728  {
2729  trans_list_backward_cnt[i]= 0 ;
2730  trans_list_backward[i]= SG_MALLOC(T_STATES, N);
2731  for (int32_t j=0; j<N; j++)
2732  if (get_a(i,j)>CMath::ALMOST_NEG_INFTY)
2733  {
2734  trans_list_backward[i][trans_list_backward_cnt[i]]=j ;
2735  trans_list_backward_cnt[i]++ ;
2736  }
2737  } ;
2738  } ;
2739  this->all_pat_prob=0.0;
2740  this->pat_prob=0.0;
2741  this->path_deriv_updated=false ;
2742  this->path_deriv_dimension=-1 ;
2743  this->all_path_prob_updated=false;
2744 
2745 #ifdef USE_HMMPARALLEL_STRUCTURES
2746  {
2747  for (int32_t i=0; i<parallel->get_num_threads(); i++)
2748  {
2749  this->alpha_cache[i].updated=false;
2750  this->beta_cache[i].updated=false;
2751  path_prob_updated[i]=false ;
2752  path_prob_dimension[i]=-1 ;
2753  } ;
2754  }
2755 #else // USE_HMMPARALLEL_STRUCTURES
2756  this->alpha_cache.updated=false;
2757  this->beta_cache.updated=false;
2758  this->path_prob_dimension=-1;
2759  this->path_prob_updated=false;
2760 
2761 #endif // USE_HMMPARALLEL_STRUCTURES
2762 }
2763 
2764 void CHMM::open_bracket(FILE* file)
2765 {
2766  int32_t value;
2767  while (((value=fgetc(file)) != EOF) && (value!='[')) //skip possible spaces and end if '[' occurs
2768  {
2769  if (value=='\n')
2770  line++;
2771  }
2772 
2773  if (value==EOF)
2774  error(line, "expected \"[\" in input file");
2775 
2776  while (((value=fgetc(file)) != EOF) && (isspace(value))) //skip possible spaces
2777  {
2778  if (value=='\n')
2779  line++;
2780  }
2781 
2782  ungetc(value, file);
2783 }
2784 
2785 void CHMM::close_bracket(FILE* file)
2786 {
2787  int32_t value;
2788  while (((value=fgetc(file)) != EOF) && (value!=']')) //skip possible spaces and end if ']' occurs
2789  {
2790  if (value=='\n')
2791  line++;
2792  }
2793 
2794  if (value==EOF)
2795  error(line, "expected \"]\" in input file");
2796 }
2797 
2798 bool CHMM::comma_or_space(FILE* file)
2799 {
2800  int32_t value;
2801  while (((value=fgetc(file)) != EOF) && (value!=',') && (value!=';') && (value!=']')) //skip possible spaces and end if ',' or ';' occurs
2802  {
2803  if (value=='\n')
2804  line++;
2805  }
2806  if (value==']')
2807  {
2808  ungetc(value, file);
2809  SG_ERROR( "found ']' instead of ';' or ','\n") ;
2810  return false ;
2811  } ;
2812 
2813  if (value==EOF)
2814  error(line, "expected \";\" or \",\" in input file");
2815 
2816  while (((value=fgetc(file)) != EOF) && (isspace(value))) //skip possible spaces
2817  {
2818  if (value=='\n')
2819  line++;
2820  }
2821  ungetc(value, file);
2822  return true ;
2823 }
2824 
2825 bool CHMM::get_numbuffer(FILE* file, char* buffer, int32_t length)
2826 {
2827  signed char value;
2828 
2829  while (((value=fgetc(file)) != EOF) &&
2830  !isdigit(value) && (value!='A')
2831  && (value!='C') && (value!='G') && (value!='T')
2832  && (value!='N') && (value!='n')
2833  && (value!='.') && (value!='-') && (value!='e') && (value!=']')) //skip possible spaces+crap
2834  {
2835  if (value=='\n')
2836  line++;
2837  }
2838  if (value==']')
2839  {
2840  ungetc(value,file) ;
2841  return false ;
2842  } ;
2843  if (value!=EOF)
2844  {
2845  int32_t i=0;
2846  switch (value)
2847  {
2848  case 'A':
2849  value='0' +CAlphabet::B_A;
2850  break;
2851  case 'C':
2852  value='0' +CAlphabet::B_C;
2853  break;
2854  case 'G':
2855  value='0' +CAlphabet::B_G;
2856  break;
2857  case 'T':
2858  value='0' +CAlphabet::B_T;
2859  break;
2860  };
2861 
2862  buffer[i++]=value;
2863 
2864  while (((value=fgetc(file)) != EOF) &&
2865  (isdigit(value) || (value=='.') || (value=='-') || (value=='e')
2866  || (value=='A') || (value=='C') || (value=='G')|| (value=='T')
2867  || (value=='N') || (value=='n')) && (i<length))
2868  {
2869  switch (value)
2870  {
2871  case 'A':
2872  value='0' +CAlphabet::B_A;
2873  break;
2874  case 'C':
2875  value='0' +CAlphabet::B_C;
2876  break;
2877  case 'G':
2878  value='0' +CAlphabet::B_G;
2879  break;
2880  case 'T':
2881  value='0' +CAlphabet::B_T;
2882  break;
2883  case '1': case '2': case'3': case '4': case'5':
2884  case '6': case '7': case'8': case '9': case '0': break ;
2885  case '.': case 'e': case '-': break ;
2886  default:
2887  SG_ERROR( "found crap: %i %c (pos:%li)\n",i,value,ftell(file)) ;
2888  };
2889  buffer[i++]=value;
2890  }
2891  ungetc(value, file);
2892  buffer[i]='\0';
2893 
2894  return (i<=length) && (i>0);
2895  }
2896  return false;
2897 }
2898 
2899 /*
2900  -format specs: model_file (model.hmm)
2901  % HMM - specification
2902  % N - number of states
2903  % M - number of observation_tokens
2904  % a is state_transition_matrix
2905  % size(a)= [N,N]
2906  %
2907  % b is observation_per_state_matrix
2908  % size(b)= [N,M]
2909  %
2910  % p is initial distribution
2911  % size(p)= [1, N]
2912 
2913  N=<int32_t>;
2914  M=<int32_t>;
2915 
2916  p=[<float64_t>,<float64_t>...<DOUBLE>];
2917  q=[<DOUBLE>,<DOUBLE>...<DOUBLE>];
2918 
2919  a=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2920  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2921  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2922  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2923  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2924  ];
2925 
2926  b=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2927  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2928  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2929  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2930  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2931  ];
2932  */
2933 
2934 bool CHMM::load_model(FILE* file)
2935 {
2936  int32_t received_params=0; //a,b,p,N,M,O
2937 
2938  bool result=false;
2939  E_STATE state=INITIAL;
2940  char buffer[1024];
2941 
2942  line=1;
2943  int32_t i,j;
2944 
2945  if (file)
2946  {
2947  while (state!=END)
2948  {
2949  int32_t value=fgetc(file);
2950 
2951  if (value=='\n')
2952  line++;
2953  if (value==EOF)
2954  state=END;
2955 
2956  switch (state)
2957  {
2958  case INITIAL: // in the initial state only N,M initialisations and comments are allowed
2959  if (value=='N')
2960  {
2961  if (received_params & GOTN)
2962  error(line, "in model file: \"p double defined\"");
2963  else
2964  state=GET_N;
2965  }
2966  else if (value=='M')
2967  {
2968  if (received_params & GOTM)
2969  error(line, "in model file: \"p double defined\"");
2970  else
2971  state=GET_M;
2972  }
2973  else if (value=='%')
2974  {
2975  state=COMMENT;
2976  }
2977  case ARRAYs: // when n,m, order are known p,a,b arrays are allowed to be read
2978  if (value=='p')
2979  {
2980  if (received_params & GOTp)
2981  error(line, "in model file: \"p double defined\"");
2982  else
2983  state=GET_p;
2984  }
2985  if (value=='q')
2986  {
2987  if (received_params & GOTq)
2988  error(line, "in model file: \"q double defined\"");
2989  else
2990  state=GET_q;
2991  }
2992  else if (value=='a')
2993  {
2994  if (received_params & GOTa)
2995  error(line, "in model file: \"a double defined\"");
2996  else
2997  state=GET_a;
2998  }
2999  else if (value=='b')
3000  {
3001  if (received_params & GOTb)
3002  error(line, "in model file: \"b double defined\"");
3003  else
3004  state=GET_b;
3005  }
3006  else if (value=='%')
3007  {
3008  state=COMMENT;
3009  }
3010  break;
3011  case GET_N:
3012  if (value=='=')
3013  {
3014  if (get_numbuffer(file, buffer, 4)) //get num
3015  {
3016  this->N= atoi(buffer);
3017  received_params|=GOTN;
3018  state= (received_params == (GOTN | GOTM | GOTO)) ? ARRAYs : INITIAL;
3019  }
3020  else
3021  state=END; //end if error
3022  }
3023  break;
3024  case GET_M:
3025  if (value=='=')
3026  {
3027  if (get_numbuffer(file, buffer, 4)) //get num
3028  {
3029  this->M= atoi(buffer);
3030  received_params|=GOTM;
3031  state= (received_params == (GOTN | GOTM | GOTO)) ? ARRAYs : INITIAL;
3032  }
3033  else
3034  state=END; //end if error
3035  }
3036  break;
3037  case GET_a:
3038  if (value=='=')
3039  {
3040  float64_t f;
3041 
3043  open_bracket(file);
3044  for (i=0; i<this->N; i++)
3045  {
3046  open_bracket(file);
3047 
3048  for (j=0; j<this->N ; j++)
3049  {
3050 
3051  if (fscanf( file, "%le", &f ) != 1)
3052  error(line, "float64_t expected");
3053  else
3054  set_a(i,j, f);
3055 
3056  if (j<this->N-1)
3057  comma_or_space(file);
3058  else
3059  close_bracket(file);
3060  }
3061 
3062  if (i<this->N-1)
3063  comma_or_space(file);
3064  else
3065  close_bracket(file);
3066  }
3067  received_params|=GOTa;
3068  }
3069  state= (received_params == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3070  break;
3071  case GET_b:
3072  if (value=='=')
3073  {
3074  float64_t f;
3075 
3077  open_bracket(file);
3078  for (i=0; i<this->N; i++)
3079  {
3080  open_bracket(file);
3081 
3082  for (j=0; j<this->M ; j++)
3083  {
3084 
3085  if (fscanf( file, "%le", &f ) != 1)
3086  error(line, "float64_t expected");
3087  else
3088  set_b(i,j, f);
3089 
3090  if (j<this->M-1)
3091  comma_or_space(file);
3092  else
3093  close_bracket(file);
3094  }
3095 
3096  if (i<this->N-1)
3097  comma_or_space(file);
3098  else
3099  close_bracket(file);
3100  }
3101  received_params|=GOTb;
3102  }
3103  state= ((received_params & (GOTa | GOTb | GOTp | GOTq)) == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3104  break;
3105  case GET_p:
3106  if (value=='=')
3107  {
3108  float64_t f;
3109 
3111  open_bracket(file);
3112  for (i=0; i<this->N ; i++)
3113  {
3114  if (fscanf( file, "%le", &f ) != 1)
3115  error(line, "float64_t expected");
3116  else
3117  set_p(i, f);
3118 
3119  if (i<this->N-1)
3120  comma_or_space(file);
3121  else
3122  close_bracket(file);
3123  }
3124  received_params|=GOTp;
3125  }
3126  state= (received_params == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3127  break;
3128  case GET_q:
3129  if (value=='=')
3130  {
3131  float64_t f;
3132 
3134  open_bracket(file);
3135  for (i=0; i<this->N ; i++)
3136  {
3137  if (fscanf( file, "%le", &f ) != 1)
3138  error(line, "float64_t expected");
3139  else
3140  set_q(i, f);
3141 
3142  if (i<this->N-1)
3143  comma_or_space(file);
3144  else
3145  close_bracket(file);
3146  }
3147  received_params|=GOTq;
3148  }
3149  state= (received_params == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3150  break;
3151  case COMMENT:
3152  if (value==EOF)
3153  state=END;
3154  else if (value=='\n')
3155  {
3156  line++;
3157  state=INITIAL;
3158  }
3159  break;
3160 
3161  default:
3162  break;
3163  }
3164  }
3165  result= (received_params== (GOTa | GOTb | GOTp | GOTq | GOTN | GOTM | GOTO));
3166  }
3167 
3168  SG_WARNING( "not normalizing anymore, call normalize_hmm to make sure the hmm is valid!!\n");
3170  return result;
3171 }
3172 
3173 /*
3174  -format specs: train_file (train.trn)
3175  % HMM-TRAIN - specification
3176  % learn_a - elements in state_transition_matrix to be learned
3177  % learn_b - elements in oberservation_per_state_matrix to be learned
3178  % note: each line stands for
3179  % <state>, <observation(0)>, observation(1)...observation(NOW)>
3180  % learn_p - elements in initial distribution to be learned
3181  % learn_q - elements in the end-state distribution to be learned
3182  %
3183  % const_x - specifies initial values of elements
3184  % rest is assumed to be 0.0
3185  %
3186  % NOTE: IMPLICIT DEFINES:
3187  % #define A 0
3188  % #define C 1
3189  % #define G 2
3190  % #define T 3
3191  %
3192 
3193  learn_a=[ [<int32_t>,<int32_t>];
3194  [<int32_t>,<int32_t>];
3195  [<int32_t>,<int32_t>];
3196  ........
3197  [<int32_t>,<int32_t>];
3198  [-1,-1];
3199  ];
3200 
3201  learn_b=[ [<int32_t>,<int32_t>];
3202  [<int32_t>,<int32_t>];
3203  [<int32_t>,<int32_t>];
3204  ........
3205  [<int32_t>,<int32_t>];
3206  [-1,-1];
3207  ];
3208 
3209  learn_p= [ <int32_t>, ... , <int32_t>, -1 ];
3210  learn_q= [ <int32_t>, ... , <int32_t>, -1 ];
3211 
3212 
3213  const_a=[ [<int32_t>,<int32_t>,<DOUBLE>];
3214  [<int32_t>,<int32_t>,<DOUBLE>];
3215  [<int32_t>,<int32_t>,<DOUBLE>];
3216  ........
3217  [<int32_t>,<int32_t>,<DOUBLE>];
3218  [-1,-1,-1];
3219  ];
3220 
3221  const_b=[ [<int32_t>,<int32_t>,<DOUBLE>];
3222  [<int32_t>,<int32_t>,<DOUBLE>];
3223  [<int32_t>,<int32_t>,<DOUBLE];
3224  ........
3225  [<int32_t>,<int32_t>,<DOUBLE>];
3226  [-1,-1];
3227  ];
3228 
3229  const_p[]=[ [<int32_t>, <DOUBLE>], ... , [<int32_t>,<DOUBLE>], [-1,-1] ];
3230  const_q[]=[ [<int32_t>, <DOUBLE>], ... , [<int32_t>,<DOUBLE>], [-1,-1] ];
3231  */
3232 bool CHMM::load_definitions(FILE* file, bool verbose, bool _initialize)
3233 {
3234  if (model)
3235  delete model ;
3236  model=new Model();
3237 
3238  int32_t received_params=0x0000000; //a,b,p,q,N,M,O
3239  char buffer[1024];
3240 
3241  bool result=false;
3242  E_STATE state=INITIAL;
3243 
3244  { // do some useful initializations
3245  model->set_learn_a(0, -1);
3246  model->set_learn_a(1, -1);
3247  model->set_const_a(0, -1);
3248  model->set_const_a(1, -1);
3249  model->set_const_a_val(0, 1.0);
3250  model->set_learn_b(0, -1);
3251  model->set_const_b(0, -1);
3252  model->set_const_b_val(0, 1.0);
3253  model->set_learn_p(0, -1);
3254  model->set_learn_q(0, -1);
3255  model->set_const_p(0, -1);
3256  model->set_const_q(0, -1);
3257  } ;
3258 
3259  line=1;
3260 
3261  if (file)
3262  {
3263  while (state!=END)
3264  {
3265  int32_t value=fgetc(file);
3266 
3267  if (value=='\n')
3268  line++;
3269 
3270  if (value==EOF)
3271  state=END;
3272 
3273  switch (state)
3274  {
3275  case INITIAL:
3276  if (value=='l')
3277  {
3278  if (fgetc(file)=='e' && fgetc(file)=='a' && fgetc(file)=='r' && fgetc(file)=='n' && fgetc(file)=='_')
3279  {
3280  switch(fgetc(file))
3281  {
3282  case 'a':
3283  state=GET_learn_a;
3284  break;
3285  case 'b':
3286  state=GET_learn_b;
3287  break;
3288  case 'p':
3289  state=GET_learn_p;
3290  break;
3291  case 'q':
3292  state=GET_learn_q;
3293  break;
3294  default:
3295  error(line, "a,b,p or q expected in train definition file");
3296  };
3297  }
3298  }
3299  else if (value=='c')
3300  {
3301  if (fgetc(file)=='o' && fgetc(file)=='n' && fgetc(file)=='s'
3302  && fgetc(file)=='t' && fgetc(file)=='_')
3303  {
3304  switch(fgetc(file))
3305  {
3306  case 'a':
3307  state=GET_const_a;
3308  break;
3309  case 'b':
3310  state=GET_const_b;
3311  break;
3312  case 'p':
3313  state=GET_const_p;
3314  break;
3315  case 'q':
3316  state=GET_const_q;
3317  break;
3318  default:
3319  error(line, "a,b,p or q expected in train definition file");
3320  };
3321  }
3322  }
3323  else if (value=='%')
3324  {
3325  state=COMMENT;
3326  }
3327  else if (value==EOF)
3328  {
3329  state=END;
3330  }
3331  break;
3332  case GET_learn_a:
3333  if (value=='=')
3334  {
3335  open_bracket(file);
3336  bool finished=false;
3337  int32_t i=0;
3338 
3339  if (verbose)
3340  SG_DEBUG( "\nlearn for transition matrix: ") ;
3341  while (!finished)
3342  {
3343  open_bracket(file);
3344 
3345  if (get_numbuffer(file, buffer, 4)) //get num
3346  {
3347  value=atoi(buffer);
3348  model->set_learn_a(i++, value);
3349 
3350  if (value<0)
3351  {
3352  finished=true;
3353  break;
3354  }
3355  if (value>=N)
3356  SG_ERROR( "invalid value for learn_a(%i,0): %i\n",i/2,(int)value) ;
3357  }
3358  else
3359  break;
3360 
3361  comma_or_space(file);
3362 
3363  if (get_numbuffer(file, buffer, 4)) //get num
3364  {
3365  value=atoi(buffer);
3366  model->set_learn_a(i++, value);
3367 
3368  if (value<0)
3369  {
3370  finished=true;
3371  break;
3372  }
3373  if (value>=N)
3374  SG_ERROR( "invalid value for learn_a(%i,1): %i\n",i/2-1,(int)value) ;
3375 
3376  }
3377  else
3378  break;
3379  close_bracket(file);
3380  }
3381  close_bracket(file);
3382  if (verbose)
3383  SG_DEBUG( "%i Entries",(int)(i/2)) ;
3384 
3385  if (finished)
3386  {
3387  received_params|=GOTlearn_a;
3388 
3389  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3390  }
3391  else
3392  state=END;
3393  }
3394  break;
3395  case GET_learn_b:
3396  if (value=='=')
3397  {
3398  open_bracket(file);
3399  bool finished=false;
3400  int32_t i=0;
3401 
3402  if (verbose)
3403  SG_DEBUG( "\nlearn for emission matrix: ") ;
3404 
3405  while (!finished)
3406  {
3407  open_bracket(file);
3408 
3409  int32_t combine=0;
3410 
3411  for (int32_t j=0; j<2; j++)
3412  {
3413  if (get_numbuffer(file, buffer, 4)) //get num
3414  {
3415  value=atoi(buffer);
3416 
3417  if (j==0)
3418  {
3419  model->set_learn_b(i++, value);
3420 
3421  if (value<0)
3422  {
3423  finished=true;
3424  break;
3425  }
3426  if (value>=N)
3427  SG_ERROR( "invalid value for learn_b(%i,0): %i\n",i/2,(int)value) ;
3428  }
3429  else
3430  combine=value;
3431  }
3432  else
3433  break;
3434 
3435  if (j<1)
3436  comma_or_space(file);
3437  else
3438  close_bracket(file);
3439  }
3440  model->set_learn_b(i++, combine);
3441  if (combine>=M)
3442 
3443  SG_ERROR("invalid value for learn_b(%i,1): %i\n",i/2-1,(int)value) ;
3444  }
3445  close_bracket(file);
3446  if (verbose)
3447  SG_DEBUG( "%i Entries",(int)(i/2-1)) ;
3448 
3449  if (finished)
3450  {
3451  received_params|=GOTlearn_b;
3452  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3453  }
3454  else
3455  state=END;
3456  }
3457  break;
3458  case GET_learn_p:
3459  if (value=='=')
3460  {
3461  open_bracket(file);
3462  bool finished=false;
3463  int32_t i=0;
3464 
3465  if (verbose)
3466  SG_DEBUG( "\nlearn start states: ") ;
3467  while (!finished)
3468  {
3469  if (get_numbuffer(file, buffer, 4)) //get num
3470  {
3471  value=atoi(buffer);
3472 
3473  model->set_learn_p(i++, value);
3474 
3475  if (value<0)
3476  {
3477  finished=true;
3478  break;
3479  }
3480  if (value>=N)
3481  SG_ERROR( "invalid value for learn_p(%i): %i\n",i-1,(int)value) ;
3482  }
3483  else
3484  break;
3485 
3486  comma_or_space(file);
3487  }
3488 
3489  close_bracket(file);
3490  if (verbose)
3491  SG_DEBUG( "%i Entries",i-1) ;
3492 
3493  if (finished)
3494  {
3495  received_params|=GOTlearn_p;
3496  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3497  }
3498  else
3499  state=END;
3500  }
3501  break;
3502  case GET_learn_q:
3503  if (value=='=')
3504  {
3505  open_bracket(file);
3506  bool finished=false;
3507  int32_t i=0;
3508 
3509  if (verbose)
3510  SG_DEBUG( "\nlearn terminal states: ") ;
3511  while (!finished)
3512  {
3513  if (get_numbuffer(file, buffer, 4)) //get num
3514  {
3515  value=atoi(buffer);
3516  model->set_learn_q(i++, value);
3517 
3518  if (value<0)
3519  {
3520  finished=true;
3521  break;
3522  }
3523  if (value>=N)
3524  SG_ERROR( "invalid value for learn_q(%i): %i\n",i-1,(int)value) ;
3525  }
3526  else
3527  break;
3528 
3529  comma_or_space(file);
3530  }
3531 
3532  close_bracket(file);
3533  if (verbose)
3534  SG_DEBUG( "%i Entries",i-1) ;
3535 
3536  if (finished)
3537  {
3538  received_params|=GOTlearn_q;
3539  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3540  }
3541  else
3542  state=END;
3543  }
3544  break;
3545  case GET_const_a:
3546  if (value=='=')
3547  {
3548  open_bracket(file);
3549  bool finished=false;
3550  int32_t i=0;
3551 
3552  if (verbose)
3553 #ifdef USE_HMMDEBUG
3554  SG_DEBUG( "\nconst for transition matrix: \n") ;
3555 #else
3556  SG_DEBUG( "\nconst for transition matrix: ") ;
3557 #endif
3558  while (!finished)
3559  {
3560  open_bracket(file);
3561 
3562  if (get_numbuffer(file, buffer, 4)) //get num
3563  {
3564  value=atoi(buffer);
3565  model->set_const_a(i++, value);
3566 
3567  if (value<0)
3568  {
3569  finished=true;
3570  model->set_const_a(i++, value);
3571  model->set_const_a_val((int32_t)i/2 - 1, value);
3572  break;
3573  }
3574  if (value>=N)
3575  SG_ERROR( "invalid value for const_a(%i,0): %i\n",i/2,(int)value) ;
3576  }
3577  else
3578  break;
3579 
3580  comma_or_space(file);
3581 
3582  if (get_numbuffer(file, buffer, 4)) //get num
3583  {
3584  value=atoi(buffer);
3585  model->set_const_a(i++, value);
3586 
3587  if (value<0)
3588  {
3589  finished=true;
3590  model->set_const_a_val((int32_t)i/2 - 1, value);
3591  break;
3592  }
3593  if (value>=N)
3594  SG_ERROR( "invalid value for const_a(%i,1): %i\n",i/2-1,(int)value) ;
3595  }
3596  else
3597  break;
3598 
3599  if (!comma_or_space(file))
3600  model->set_const_a_val((int32_t)i/2 - 1, 1.0);
3601  else
3602  if (get_numbuffer(file, buffer, 10)) //get num
3603  {
3604  float64_t dvalue=atof(buffer);
3605  model->set_const_a_val((int32_t)i/2 - 1, dvalue);
3606  if (dvalue<0)
3607  {
3608  finished=true;
3609  break;
3610  }
3611  if ((dvalue>1.0) || (dvalue<0.0))
3612  SG_ERROR( "invalid value for const_a_val(%i): %e\n",(int)i/2-1,dvalue) ;
3613  }
3614  else
3615  model->set_const_a_val((int32_t)i/2 - 1, 1.0);
3616 
3617 #ifdef USE_HMMDEBUG
3618  if (verbose)
3619  SG_ERROR("const_a(%i,%i)=%e\n", model->get_const_a((int32_t)i/2-1,0),model->get_const_a((int32_t)i/2-1,1),model->get_const_a_val((int32_t)i/2-1)) ;
3620 #endif
3621  close_bracket(file);
3622  }
3623  close_bracket(file);
3624  if (verbose)
3625  SG_DEBUG( "%i Entries",(int)i/2-1) ;
3626 
3627  if (finished)
3628  {
3629  received_params|=GOTconst_a;
3630  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3631  }
3632  else
3633  state=END;
3634  }
3635  break;
3636 
3637  case GET_const_b:
3638  if (value=='=')
3639  {
3640  open_bracket(file);
3641  bool finished=false;
3642  int32_t i=0;
3643 
3644  if (verbose)
3645 #ifdef USE_HMMDEBUG
3646  SG_DEBUG( "\nconst for emission matrix: \n") ;
3647 #else
3648  SG_DEBUG( "\nconst for emission matrix: ") ;
3649 #endif
3650  while (!finished)
3651  {
3652  open_bracket(file);
3653  int32_t combine=0;
3654  for (int32_t j=0; j<3; j++)
3655  {
3656  if (get_numbuffer(file, buffer, 10)) //get num
3657  {
3658  if (j==0)
3659  {
3660  value=atoi(buffer);
3661 
3662  model->set_const_b(i++, value);
3663 
3664  if (value<0)
3665  {
3666  finished=true;
3667  //model->set_const_b_val((int32_t)(i-1)/2, value);
3668  break;
3669  }
3670  if (value>=N)
3671  SG_ERROR( "invalid value for const_b(%i,0): %i\n",i/2-1,(int)value) ;
3672  }
3673  else if (j==2)
3674  {
3675  float64_t dvalue=atof(buffer);
3676  model->set_const_b_val((int32_t)(i-1)/2, dvalue);
3677  if (dvalue<0)
3678  {
3679  finished=true;
3680  break;
3681  } ;
3682  if ((dvalue>1.0) || (dvalue<0.0))
3683  SG_ERROR( "invalid value for const_b_val(%i,1): %e\n",i/2-1,dvalue) ;
3684  }
3685  else
3686  {
3687  value=atoi(buffer);
3688  combine= value;
3689  } ;
3690  }
3691  else
3692  {
3693  if (j==2)
3694  model->set_const_b_val((int32_t)(i-1)/2, 1.0);
3695  break;
3696  } ;
3697  if (j<2)
3698  if ((!comma_or_space(file)) && (j==1))
3699  {
3700  model->set_const_b_val((int32_t)(i-1)/2, 1.0) ;
3701  break ;
3702  } ;
3703  }
3704  close_bracket(file);
3705  model->set_const_b(i++, combine);
3706  if (combine>=M)
3707  SG_ERROR("invalid value for const_b(%i,1): %i\n",i/2-1, combine) ;
3708 #ifdef USE_HMMDEBUG
3709  if (verbose && !finished)
3710  SG_ERROR("const_b(%i,%i)=%e\n", model->get_const_b((int32_t)i/2-1,0),model->get_const_b((int32_t)i/2-1,1),model->get_const_b_val((int32_t)i/2-1)) ;
3711 #endif
3712  }
3713  close_bracket(file);
3714  if (verbose)
3715  SG_ERROR( "%i Entries",(int)i/2-1) ;
3716 
3717  if (finished)
3718  {
3719  received_params|=GOTconst_b;
3720  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3721  }
3722  else
3723  state=END;
3724  }
3725  break;
3726  case GET_const_p:
3727  if (value=='=')
3728  {
3729  open_bracket(file);
3730  bool finished=false;
3731  int32_t i=0;
3732 
3733  if (verbose)
3734 #ifdef USE_HMMDEBUG
3735  SG_DEBUG( "\nconst for start states: \n") ;
3736 #else
3737  SG_DEBUG( "\nconst for start states: ") ;
3738 #endif
3739  while (!finished)
3740  {
3741  open_bracket(file);
3742 
3743  if (get_numbuffer(file, buffer, 4)) //get num
3744  {
3745  value=atoi(buffer);
3746  model->set_const_p(i, value);
3747 
3748  if (value<0)
3749  {
3750  finished=true;
3751  model->set_const_p_val(i++, value);
3752  break;
3753  }
3754  if (value>=N)
3755  SG_ERROR( "invalid value for const_p(%i): %i\n",i,(int)value) ;
3756 
3757  }
3758  else
3759  break;
3760 
3761  if (!comma_or_space(file))
3762  model->set_const_p_val(i++, 1.0);
3763  else
3764  if (get_numbuffer(file, buffer, 10)) //get num
3765  {
3766  float64_t dvalue=atof(buffer);
3767  model->set_const_p_val(i++, dvalue);
3768  if (dvalue<0)
3769  {
3770  finished=true;
3771  break;
3772  }
3773  if ((dvalue>1) || (dvalue<0))
3774  SG_ERROR( "invalid value for const_p_val(%i): %e\n",i,dvalue) ;
3775  }
3776  else
3777  model->set_const_p_val(i++, 1.0);
3778 
3779  close_bracket(file);
3780 
3781 #ifdef USE_HMMDEBUG
3782  if (verbose)
3783  SG_DEBUG("const_p(%i)=%e\n", model->get_const_p(i-1),model->get_const_p_val(i-1)) ;
3784 #endif
3785  }
3786  if (verbose)
3787  SG_DEBUG( "%i Entries",i-1) ;
3788 
3789  close_bracket(file);
3790 
3791  if (finished)
3792  {
3793  received_params|=GOTconst_p;
3794  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3795  }
3796  else
3797  state=END;
3798  }
3799  break;
3800  case GET_const_q:
3801  if (value=='=')
3802  {
3803  open_bracket(file);
3804  bool finished=false;
3805  if (verbose)
3806 #ifdef USE_HMMDEBUG
3807  SG_DEBUG( "\nconst for terminal states: \n") ;
3808 #else
3809  SG_DEBUG( "\nconst for terminal states: ") ;
3810 #endif
3811  int32_t i=0;
3812 
3813  while (!finished)
3814  {
3815  open_bracket(file) ;
3816  if (get_numbuffer(file, buffer, 4)) //get num
3817  {
3818  value=atoi(buffer);
3819  model->set_const_q(i, value);
3820  if (value<0)
3821  {
3822  finished=true;
3823  model->set_const_q_val(i++, value);
3824  break;
3825  }
3826  if (value>=N)
3827  SG_ERROR( "invalid value for const_q(%i): %i\n",i,(int)value) ;
3828  }
3829  else
3830  break;
3831 
3832  if (!comma_or_space(file))
3833  model->set_const_q_val(i++, 1.0);
3834  else
3835  if (get_numbuffer(file, buffer, 10)) //get num
3836  {
3837  float64_t dvalue=atof(buffer);
3838  model->set_const_q_val(i++, dvalue);
3839  if (dvalue<0)
3840  {
3841  finished=true;
3842  break;
3843  }
3844  if ((dvalue>1) || (dvalue<0))
3845  SG_ERROR("invalid value for const_q_val(%i): %e\n",i,(double) dvalue) ;
3846  }
3847  else
3848  model->set_const_q_val(i++, 1.0);
3849 
3850  close_bracket(file);
3851 #ifdef USE_HMMDEBUG
3852  if (verbose)
3853  SG_DEBUG("const_q(%i)=%e\n", model->get_const_q(i-1),model->get_const_q_val(i-1)) ;
3854 #endif
3855  }
3856  if (verbose)
3857  SG_DEBUG( "%i Entries",i-1) ;
3858 
3859  close_bracket(file);
3860 
3861  if (finished)
3862  {
3863  received_params|=GOTconst_q;
3864  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3865  }
3866  else
3867  state=END;
3868  }
3869  break;
3870  case COMMENT:
3871  if (value==EOF)
3872  state=END;
3873  else if (value=='\n')
3874  state=INITIAL;
3875  break;
3876 
3877  default:
3878  break;
3879  }
3880  }
3881  }
3882 
3883  /*result=((received_params&(GOTlearn_a | GOTconst_a))!=0) ;
3884  result=result && ((received_params&(GOTlearn_b | GOTconst_b))!=0) ;
3885  result=result && ((received_params&(GOTlearn_p | GOTconst_p))!=0) ;
3886  result=result && ((received_params&(GOTlearn_q | GOTconst_q))!=0) ; */
3887  result=1 ;
3888  if (result)
3889  {
3890  model->sort_learn_a() ;
3891  model->sort_learn_b() ;
3892  if (_initialize)
3893  {
3894  init_model_defined(); ;
3895  convert_to_log();
3896  } ;
3897  }
3898  if (verbose)
3899  SG_DEBUG( "\n") ;
3900  return result;
3901 }
3902 
3903 /*
3904  -format specs: model_file (model.hmm)
3905  % HMM - specification
3906  % N - number of states
3907  % M - number of observation_tokens
3908  % a is state_transition_matrix
3909  % size(a)= [N,N]
3910  %
3911  % b is observation_per_state_matrix
3912  % size(b)= [N,M]
3913  %
3914  % p is initial distribution
3915  % size(p)= [1, N]
3916 
3917  N=<int32_t>;
3918  M=<int32_t>;
3919 
3920  p=[<DOUBLE>,<DOUBLE>...<DOUBLE>];
3921 
3922  a=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3923  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3924  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3925  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3926  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3927  ];
3928 
3929  b=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3930  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3931  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3932  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3933  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3934  ];
3935  */
3936 
3937 bool CHMM::save_model(FILE* file)
3938 {
3939  bool result=false;
3940  int32_t i,j;
3941  const float32_t NAN_REPLACEMENT = (float32_t) CMath::ALMOST_NEG_INFTY ;
3942 
3943  if (file)
3944  {
3945  fprintf(file,"%s","% HMM - specification\n% N - number of states\n% M - number of observation_tokens\n% a is state_transition_matrix\n% size(a)= [N,N]\n%\n% b is observation_per_state_matrix\n% size(b)= [N,M]\n%\n% p is initial distribution\n% size(p)= [1, N]\n\n% q is distribution of end states\n% size(q)= [1, N]\n");
3946  fprintf(file,"N=%d;\n",N);
3947  fprintf(file,"M=%d;\n",M);
3948 
3949  fprintf(file,"p=[");
3950  for (i=0; i<N; i++)
3951  {
3952  if (i<N-1) {
3953  if (CMath::is_finite(get_p(i)))
3954  fprintf(file, "%e,", (double)get_p(i));
3955  else
3956  fprintf(file, "%f,", NAN_REPLACEMENT);
3957  }
3958  else {
3959  if (CMath::is_finite(get_p(i)))
3960  fprintf(file, "%e", (double)get_p(i));
3961  else
3962  fprintf(file, "%f", NAN_REPLACEMENT);
3963  }
3964  }
3965 
3966  fprintf(file,"];\n\nq=[");
3967  for (i=0; i<N; i++)
3968  {
3969  if (i<N-1) {
3970  if (CMath::is_finite(get_q(i)))
3971  fprintf(file, "%e,", (double)get_q(i));
3972  else
3973  fprintf(file, "%f,", NAN_REPLACEMENT);
3974  }
3975  else {
3976  if (CMath::is_finite(get_q(i)))
3977  fprintf(file, "%e", (double)get_q(i));
3978  else
3979  fprintf(file, "%f", NAN_REPLACEMENT);
3980  }
3981  }
3982  fprintf(file,"];\n\na=[");
3983 
3984  for (i=0; i<N; i++)
3985  {
3986  fprintf(file, "\t[");
3987 
3988  for (j=0; j<N; j++)
3989  {
3990  if (j<N-1) {
3991  if (CMath::is_finite(get_a(i,j)))
3992  fprintf(file, "%e,", (double)get_a(i,j));
3993  else
3994  fprintf(file, "%f,", NAN_REPLACEMENT);
3995  }
3996  else {
3997  if (CMath::is_finite(get_a(i,j)))
3998  fprintf(file, "%e];\n", (double)get_a(i,j));
3999  else
4000  fprintf(file, "%f];\n", NAN_REPLACEMENT);
4001  }
4002  }
4003  }
4004 
4005  fprintf(file," ];\n\nb=[");
4006 
4007  for (i=0; i<N; i++)
4008  {
4009  fprintf(file, "\t[");
4010 
4011  for (j=0; j<M; j++)
4012  {
4013  if (j<M-1) {
4014  if (CMath::is_finite(get_b(i,j)))
4015  fprintf(file, "%e,", (double)get_b(i,j));
4016  else
4017  fprintf(file, "%f,", NAN_REPLACEMENT);
4018  }
4019  else {
4020  if (CMath::is_finite(get_b(i,j)))
4021  fprintf(file, "%e];\n", (double)get_b(i,j));
4022  else
4023  fprintf(file, "%f];\n", NAN_REPLACEMENT);
4024  }
4025  }
4026  }
4027  result= (fprintf(file," ];\n") == 5);
4028  }
4029 
4030  return result;
4031 }
4032 
4033 T_STATES* CHMM::get_path(int32_t dim, float64_t& prob)
4034 {
4035  T_STATES* result = NULL;
4036 
4037  prob = best_path(dim);
4039 
4040  for (int32_t i=0; i<p_observations->get_vector_length(dim); i++)
4041  result[i]=PATH(dim)[i];
4042 
4043  return result;
4044 }
4045 
4046 bool CHMM::save_path(FILE* file)
4047 {
4048  bool result=false;
4049 
4050  if (file)
4051  {
4052  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4053  {
4054  if (dim%100==0)
4055  SG_PRINT( "%i..", dim) ;
4056  float64_t prob = best_path(dim);
4057  fprintf(file,"%i. path probability:%e\nstate sequence:\n", dim, prob);
4058  for (int32_t i=0; i<p_observations->get_vector_length(dim)-1; i++)
4059  fprintf(file,"%d ", PATH(dim)[i]);
4060  fprintf(file,"%d", PATH(dim)[p_observations->get_vector_length(dim)-1]);
4061  fprintf(file,"\n\n") ;
4062  }
4063  SG_DONE();
4064  result=true;
4065  }
4066 
4067  return result;
4068 }
4069 
4071 {
4072  bool result=false;
4073 
4074  if (file)
4075  {
4076  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4077  {
4078  float32_t prob= (float32_t) model_probability(dim);
4079  fwrite(&prob, sizeof(float32_t), 1, file);
4080  }
4081  result=true;
4082  }
4083 
4084  return result;
4085 }
4086 
4087 bool CHMM::save_likelihood(FILE* file)
4088 {
4089  bool result=false;
4090 
4091  if (file)
4092  {
4093  fprintf(file, "%% likelihood of model per observation\n%% P[O|model]=[ P[O|model]_1 P[O|model]_2 ... P[O|model]_dim ]\n%%\n");
4094 
4095  fprintf(file, "P=[");
4096  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4097  fprintf(file, "%e ", (double) model_probability(dim));
4098 
4099  fprintf(file,"];");
4100  result=true;
4101  }
4102 
4103  return result;
4104 }
4105 
4106 #define FLOATWRITE(file, value) { float32_t rrr=float32_t(value); fwrite(&rrr, sizeof(float32_t), 1, file); num_floats++;}
4107 
4108 bool CHMM::save_model_bin(FILE* file)
4109 {
4110  int32_t i,j,q, num_floats=0 ;
4111  if (!model)
4112  {
4113  if (file)
4114  {
4115  // write id
4117  FLOATWRITE(file, (float32_t) 1);
4118 
4119  //derivates log(dp),log(dq)
4120  for (i=0; i<N; i++)
4121  FLOATWRITE(file, get_p(i));
4122  SG_INFO( "wrote %i parameters for p\n",N) ;
4123 
4124  for (i=0; i<N; i++)
4125  FLOATWRITE(file, get_q(i)) ;
4126  SG_INFO( "wrote %i parameters for q\n",N) ;
4127 
4128  //derivates log(da),log(db)
4129  for (i=0; i<N; i++)
4130  for (j=0; j<N; j++)
4131  FLOATWRITE(file, get_a(i,j));
4132  SG_INFO( "wrote %i parameters for a\n",N*N) ;
4133 
4134  for (i=0; i<N; i++)
4135  for (j=0; j<M; j++)
4136  FLOATWRITE(file, get_b(i,j));
4137  SG_INFO( "wrote %i parameters for b\n",N*M) ;
4138 
4139  // write id
4140  FLOATWRITE(file, (float32_t)CMath::INFTY);
4141  FLOATWRITE(file, (float32_t) 3);
4142 
4143  // write number of parameters
4144  FLOATWRITE(file, (float32_t) N);
4145  FLOATWRITE(file, (float32_t) N);
4146  FLOATWRITE(file, (float32_t) N*N);
4147  FLOATWRITE(file, (float32_t) N*M);
4148  FLOATWRITE(file, (float32_t) N);
4149  FLOATWRITE(file, (float32_t) M);
4150  } ;
4151  }
4152  else
4153  {
4154  if (file)
4155  {
4156  int32_t num_p, num_q, num_a, num_b ;
4157  // write id
4159  FLOATWRITE(file, (float32_t) 2);
4160 
4161  for (i=0; model->get_learn_p(i)>=0; i++)
4162  FLOATWRITE(file, get_p(model->get_learn_p(i)));
4163  num_p=i ;
4164  SG_INFO( "wrote %i parameters for p\n",num_p) ;
4165 
4166  for (i=0; model->get_learn_q(i)>=0; i++)
4167  FLOATWRITE(file, get_q(model->get_learn_q(i)));
4168  num_q=i ;
4169  SG_INFO( "wrote %i parameters for q\n",num_q) ;
4170 
4171  //derivates log(da),log(db)
4172  for (q=0; model->get_learn_a(q,1)>=0; q++)
4173  {
4174  i=model->get_learn_a(q,0) ;
4175  j=model->get_learn_a(q,1) ;
4176  FLOATWRITE(file, (float32_t)i);
4177  FLOATWRITE(file, (float32_t)j);
4178  FLOATWRITE(file, get_a(i,j));
4179  } ;
4180  num_a=q ;
4181  SG_INFO( "wrote %i parameters for a\n",num_a) ;
4182 
4183  for (q=0; model->get_learn_b(q,0)>=0; q++)
4184  {
4185  i=model->get_learn_b(q,0) ;
4186  j=model->get_learn_b(q,1) ;
4187  FLOATWRITE(file, (float32_t)i);
4188  FLOATWRITE(file, (float32_t)j);
4189  FLOATWRITE(file, get_b(i,j));
4190  } ;
4191  num_b=q ;
4192  SG_INFO( "wrote %i parameters for b\n",num_b) ;
4193 
4194  // write id
4195  FLOATWRITE(file, (float32_t)CMath::INFTY);
4196  FLOATWRITE(file, (float32_t) 3);
4197 
4198  // write number of parameters
4199  FLOATWRITE(file, (float32_t) num_p);
4200  FLOATWRITE(file, (float32_t) num_q);
4201  FLOATWRITE(file, (float32_t) num_a);
4202  FLOATWRITE(file, (float32_t) num_b);
4203  FLOATWRITE(file, (float32_t) N);
4204  FLOATWRITE(file, (float32_t) M);
4205  } ;
4206  } ;
4207  return true ;
4208 }
4209 
4210 bool CHMM::save_path_derivatives(FILE* logfile)
4211 {
4212  int32_t dim,i,j;
4213 
4214  if (logfile)
4215  {
4216  fprintf(logfile,"%% lambda denotes the model\n%% O denotes the observation sequence\n%% Q denotes the path\n%% \n%% calculating derivatives of P[O,Q|lambda]=p_{Q1}b_{Q1}(O_1}*a_{Q1}{Q2}b_{Q2}(O2)*...*q_{T-1}{T}b_{QT}(O_T}q_{Q_T} against p,q,a,b\n%%\n");
4217  fprintf(logfile,"%% dPr[...]=[ [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4218  fprintf(logfile,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4219  fprintf(logfile,"%% ............................. \n");
4220  fprintf(logfile,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_MM]\n");
4221  fprintf(logfile,"%% ];\n%%\n\ndPr(log()) = [\n");
4222  }
4223  else
4224  return false ;
4225 
4226  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4227  {
4228  best_path(dim);
4229 
4230  fprintf(logfile, "[ ");
4231 
4232  //derivates dlogp,dlogq
4233  for (i=0; i<N; i++)
4234  fprintf(logfile,"%e, ", (double) path_derivative_p(i,dim) );
4235 
4236  for (i=0; i<N; i++)
4237  fprintf(logfile,"%e, ", (double) path_derivative_q(i,dim) );
4238 
4239  //derivates dloga,dlogb
4240  for (i=0; i<N; i++)
4241  for (j=0; j<N; j++)
4242  fprintf(logfile, "%e,", (double) path_derivative_a(i,j,dim) );
4243 
4244  for (i=0; i<N; i++)
4245  for (j=0; j<M; j++)
4246  fprintf(logfile, "%e,", (double) path_derivative_b(i,j,dim) );
4247 
4248  fseek(logfile,ftell(logfile)-1,SEEK_SET);
4249  fprintf(logfile, " ];\n");
4250  }
4251 
4252  fprintf(logfile, "];");
4253 
4254  return true ;
4255 }
4256 
4258 {
4259  bool result=false;
4260  int32_t dim,i,j,q;
4261  float64_t prob=0 ;
4262  int32_t num_floats=0 ;
4263 
4264  float64_t sum_prob=0.0 ;
4265  if (!model)
4266  SG_WARNING( "No definitions loaded -- writing derivatives of all weights\n") ;
4267  else
4268  SG_INFO( "writing derivatives of changed weights only\n") ;
4269 
4270  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4271  {
4272  if (dim%100==0)
4273  {
4274  SG_PRINT( ".") ;
4275 
4276  } ;
4277 
4278  prob=best_path(dim);
4279  sum_prob+=prob ;
4280 
4281  if (!model)
4282  {
4283  if (logfile)
4284  {
4285  // write prob
4286  FLOATWRITE(logfile, prob);
4287 
4288  for (i=0; i<N; i++)
4289  FLOATWRITE(logfile, path_derivative_p(i,dim));
4290 
4291  for (i=0; i<N; i++)
4292  FLOATWRITE(logfile, path_derivative_q(i,dim));
4293 
4294  for (i=0; i<N; i++)
4295  for (j=0; j<N; j++)
4296  FLOATWRITE(logfile, path_derivative_a(i,j,dim));
4297 
4298  for (i=0; i<N; i++)
4299  for (j=0; j<M; j++)
4300  FLOATWRITE(logfile, path_derivative_b(i,j,dim));
4301 
4302  }
4303  }
4304  else
4305  {
4306  if (logfile)
4307  {
4308  // write prob
4309  FLOATWRITE(logfile, prob);
4310 
4311  for (i=0; model->get_learn_p(i)>=0; i++)
4312  FLOATWRITE(logfile, path_derivative_p(model->get_learn_p(i),dim));
4313 
4314  for (i=0; model->get_learn_q(i)>=0; i++)
4315  FLOATWRITE(logfile, path_derivative_q(model->get_learn_q(i),dim));
4316 
4317  for (q=0; model->get_learn_a(q,0)>=0; q++)
4318  {
4319  i=model->get_learn_a(q,0) ;
4320  j=model->get_learn_a(q,1) ;
4321  FLOATWRITE(logfile, path_derivative_a(i,j,dim));
4322  } ;
4323 
4324  for (q=0; model->get_learn_b(q,0)>=0; q++)
4325  {
4326  i=model->get_learn_b(q,0) ;
4327  j=model->get_learn_b(q,1) ;
4328  FLOATWRITE(logfile, path_derivative_b(i,j,dim));
4329  } ;
4330  }
4331  } ;
4332  }
4333  save_model_bin(logfile) ;
4334 
4335  result=true;
4336  SG_PRINT( "\n") ;
4337  return result;
4338 }
4339 
4341 {
4342  bool result=false;
4343  int32_t dim,i,j,q ;
4344  int32_t num_floats=0 ;
4345 
4346  if (!model)
4347  SG_WARNING( "No definitions loaded -- writing derivatives of all weights\n") ;
4348  else
4349  SG_INFO( "writing derivatives of changed weights only\n") ;
4350 
4351 #ifdef USE_HMMPARALLEL
4352  int32_t num_threads = parallel->get_num_threads();
4353  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
4354  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
4355 
4356  if (p_observations->get_num_vectors()<num_threads)
4357  num_threads=p_observations->get_num_vectors();
4358 #endif
4359 
4360  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4361  {
4362  if (dim%20==0)
4363  {
4364  SG_PRINT( ".") ;
4365 
4366  } ;
4367 
4368 #ifdef USE_HMMPARALLEL
4369  if (dim%num_threads==0)
4370  {
4371  for (i=0; i<num_threads; i++)
4372  {
4373  if (dim+i<p_observations->get_num_vectors())
4374  {
4375  params[i].hmm=this ;
4376  params[i].dim=dim+i ;
4377  pthread_create(&threads[i], NULL, bw_dim_prefetch, (void*)&params[i]) ;
4378  }
4379  }
4380 
4381  for (i=0; i<num_threads; i++)
4382  {
4383  if (dim+i<p_observations->get_num_vectors())
4384  pthread_join(threads[i], NULL);
4385  }
4386  }
4387 #endif
4388 
4389  float64_t prob=model_probability(dim) ;
4390  if (!model)
4391  {
4392  if (file)
4393  {
4394  // write prob
4395  FLOATWRITE(file, prob);
4396 
4397  //derivates log(dp),log(dq)
4398  for (i=0; i<N; i++)
4399  FLOATWRITE(file, model_derivative_p(i,dim));
4400 
4401  for (i=0; i<N; i++)
4402  FLOATWRITE(file, model_derivative_q(i,dim));
4403 
4404  //derivates log(da),log(db)
4405  for (i=0; i<N; i++)
4406  for (j=0; j<N; j++)
4407  FLOATWRITE(file, model_derivative_a(i,j,dim));
4408 
4409  for (i=0; i<N; i++)
4410  for (j=0; j<M; j++)
4411  FLOATWRITE(file, model_derivative_b(i,j,dim));
4412 
4413  if (dim==0)
4414  SG_INFO("Number of parameters (including posterior prob.): %i\n", num_floats) ;
4415  } ;
4416  }
4417  else
4418  {
4419  if (file)
4420  {
4421  // write prob
4422  FLOATWRITE(file, prob);
4423 
4424  for (i=0; model->get_learn_p(i)>=0; i++)
4426 
4427  for (i=0; model->get_learn_q(i)>=0; i++)
4429 
4430  //derivates log(da),log(db)
4431  for (q=0; model->get_learn_a(q,1)>=0; q++)
4432  {
4433  i=model->get_learn_a(q,0) ;
4434  j=model->get_learn_a(q,1) ;
4435  FLOATWRITE(file, model_derivative_a(i,j,dim));
4436  } ;
4437 
4438  for (q=0; model->get_learn_b(q,0)>=0; q++)
4439  {
4440  i=model->get_learn_b(q,0) ;
4441  j=model->get_learn_b(q,1) ;
4442  FLOATWRITE(file, model_derivative_b(i,j,dim));
4443  } ;
4444  if (dim==0)
4445  SG_INFO("Number of parameters (including posterior prob.): %i\n", num_floats) ;
4446  } ;
4447  } ;
4448  }
4449  save_model_bin(file) ;
4450 
4451 #ifdef USE_HMMPARALLEL
4452  SG_FREE(threads);
4453  SG_FREE(params);
4454 #endif
4455 
4456  result=true;
4457  SG_PRINT( "\n") ;
4458  return result;
4459 }
4460 
4462 {
4463  bool result=false;
4464  int32_t dim,i,j;
4465 
4466  if (file)
4467  {
4468 
4469  fprintf(file,"%% lambda denotes the model\n%% O denotes the observation sequence\n%% Q denotes the path\n%%\n%% calculating derivatives of P[O|lambda]=sum_{all Q}p_{Q1}b_{Q1}(O_1}*a_{Q1}{Q2}b_{Q2}(O2)*...*q_{T-1}{T}b_{QT}(O_T}q_{Q_T} against p,q,a,b\n%%\n");
4470  fprintf(file,"%% dPr[...]=[ [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4471  fprintf(file,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4472  fprintf(file,"%% ............................. \n");
4473  fprintf(file,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_MM]\n");
4474  fprintf(file,"%% ];\n%%\n\nlog(dPr) = [\n");
4475 
4476 
4477  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4478  {
4479  fprintf(file, "[ ");
4480 
4481  //derivates log(dp),log(dq)
4482  for (i=0; i<N; i++)
4483  fprintf(file,"%e, ", (double) model_derivative_p(i, dim) ); //log (dp)
4484 
4485  for (i=0; i<N; i++)
4486  fprintf(file,"%e, ", (double) model_derivative_q(i, dim) ); //log (dq)
4487 
4488  //derivates log(da),log(db)
4489  for (i=0; i<N; i++)
4490  for (j=0; j<N; j++)
4491  fprintf(file, "%e,", (double) model_derivative_a(i,j,dim) );
4492 
4493  for (i=0; i<N; i++)
4494  for (j=0; j<M; j++)
4495  fprintf(file, "%e,", (double) model_derivative_b(i,j,dim) );
4496 
4497  fseek(file,ftell(file)-1,SEEK_SET);
4498  fprintf(file, " ];\n");
4499  }
4500 
4501 
4502  fprintf(file, "];");
4503 
4504  result=true;
4505  }
4506  return result;
4507 }
4508 
4510 {
4511  // bool result=false;
4512  const float64_t delta=5e-4 ;
4513 
4514  int32_t i ;
4515  //derivates log(da)
4516  /* for (i=0; i<N; i++)
4517  {
4518  for (int32_t j=0; j<N; j++)
4519  {
4520  float64_t old_a=get_a(i,j) ;
4521 
4522  set_a(i,j, log(exp(old_a)-delta)) ;
4523  invalidate_model() ;
4524  float64_t prob_old=exp(model_probability(-1)*p_observations->get_num_vectors()) ;
4525 
4526  set_a(i,j, log(exp(old_a)+delta)) ;
4527  invalidate_model() ;
4528  float64_t prob_new=exp(model_probability(-1)*p_observations->get_num_vectors());
4529 
4530  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4531 
4532  set_a(i,j, old_a) ;
4533  invalidate_model() ;
4534 
4535  float64_t prod_prob=model_probability(-1)*p_observations->get_num_vectors() ;
4536 
4537  float64_t deriv_calc=0 ;
4538  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4539  deriv_calc+=exp(model_derivative_a(i, j, dim)+
4540  prod_prob-model_probability(dim)) ;
4541 
4542  SG_DEBUG("da(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4543  } ;
4544  } ;*/
4545  //derivates log(db)
4546  i=0;//for (i=0; i<N; i++)
4547  {
4548  for (int32_t j=0; j<M; j++)
4549  {
4550  float64_t old_b=get_b(i,j) ;
4551 
4552  set_b(i,j, log(exp(old_b)-delta)) ;
4553  invalidate_model() ;
4555 
4556  set_b(i,j, log(exp(old_b)+delta)) ;
4557  invalidate_model() ;
4559 
4560  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4561 
4562  set_b(i,j, old_b) ;
4563  invalidate_model() ;
4564 
4565  float64_t deriv_calc=0 ;
4566  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4567  {
4568  deriv_calc+=exp(model_derivative_b(i, j, dim)-model_probability(dim)) ;
4569  if (j==1)
4570  SG_INFO("deriv_calc[%i]=%e\n",dim,deriv_calc) ;
4571  } ;
4572 
4573  SG_ERROR( "b(%i,%i)=%e db(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j,exp(old_b),i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4574  } ;
4575  } ;
4576  return true ;
4577 }
4578 
4580 {
4581  bool result=false;
4582  const float64_t delta=3e-4 ;
4583 
4584  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4585  {
4586  int32_t i ;
4587  //derivates log(dp),log(dq)
4588  for (i=0; i<N; i++)
4589  {
4590  for (int32_t j=0; j<N; j++)
4591  {
4592  float64_t old_a=get_a(i,j) ;
4593 
4594  set_a(i,j, log(exp(old_a)-delta)) ;
4595  invalidate_model() ;
4596  float64_t prob_old=exp(model_probability(dim)) ;
4597 
4598  set_a(i,j, log(exp(old_a)+delta)) ;
4599  invalidate_model() ;
4600  float64_t prob_new=exp(model_probability(dim));
4601 
4602  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4603 
4604  set_a(i,j, old_a) ;
4605  invalidate_model() ;
4606  float64_t deriv_calc=exp(model_derivative_a(i, j, dim)) ;
4607 
4608  SG_DEBUG( "da(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4609  invalidate_model() ;
4610  } ;
4611  } ;
4612  for (i=0; i<N; i++)
4613  {
4614  for (int32_t j=0; j<M; j++)
4615  {
4616  float64_t old_b=get_b(i,j) ;
4617 
4618  set_b(i,j, log(exp(old_b)-delta)) ;
4619  invalidate_model() ;
4620  float64_t prob_old=exp(model_probability(dim)) ;
4621 
4622  set_b(i,j, log(exp(old_b)+delta)) ;
4623  invalidate_model() ;
4624  float64_t prob_new=exp(model_probability(dim));
4625 
4626  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4627 
4628  set_b(i,j, old_b) ;
4629  invalidate_model() ;
4630  float64_t deriv_calc=exp(model_derivative_b(i, j, dim));
4631 
4632  SG_DEBUG( "db(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/(deriv_calc));
4633  } ;
4634  } ;
4635 
4636 #ifdef TEST
4637  for (i=0; i<N; i++)
4638  {
4639  float64_t old_p=get_p(i) ;
4640 
4641  set_p(i, log(exp(old_p)-delta)) ;
4642  invalidate_model() ;
4643  float64_t prob_old=exp(model_probability(dim)) ;
4644 
4645  set_p(i, log(exp(old_p)+delta)) ;
4646  invalidate_model() ;
4647  float64_t prob_new=exp(model_probability(dim));
4648  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4649 
4650  set_p(i, old_p) ;
4651  invalidate_model() ;
4652  float64_t deriv_calc=exp(model_derivative_p(i, dim));
4653 
4654  //if (fabs(deriv_calc_old-deriv)>1e-4)
4655  SG_DEBUG( "dp(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4656  } ;
4657  for (i=0; i<N; i++)
4658  {
4659  float64_t old_q=get_q(i) ;
4660 
4661  set_q(i, log(exp(old_q)-delta)) ;
4662  invalidate_model() ;
4663  float64_t prob_old=exp(model_probability(dim)) ;
4664 
4665  set_q(i, log(exp(old_q)+delta)) ;
4666  invalidate_model() ;
4667  float64_t prob_new=exp(model_probability(dim));
4668 
4669  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4670 
4671  set_q(i, old_q) ;
4672  invalidate_model() ;
4673  float64_t deriv_calc=exp(model_derivative_q(i, dim));
4674 
4675  //if (fabs(deriv_calc_old-deriv)>1e-4)
4676  SG_DEBUG( "dq(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4677  } ;
4678 #endif
4679  }
4680  return result;
4681 }
4682 
4683 #ifdef USE_HMMDEBUG
4684 bool CHMM::check_path_derivatives()
4685 {
4686  bool result=false;
4687  const float64_t delta=1e-4 ;
4688 
4689  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4690  {
4691  int32_t i ;
4692  //derivates log(dp),log(dq)
4693  for (i=0; i<N; i++)
4694  {
4695  for (int32_t j=0; j<N; j++)
4696  {
4697  float64_t old_a=get_a(i,j) ;
4698 
4699  set_a(i,j, log(exp(old_a)-delta)) ;
4700  invalidate_model() ;
4701  float64_t prob_old=best_path(dim) ;
4702 
4703  set_a(i,j, log(exp(old_a)+delta)) ;
4704  invalidate_model() ;
4705  float64_t prob_new=best_path(dim);
4706 
4707  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4708 
4709  set_a(i,j, old_a) ;
4710  invalidate_model() ;
4711  float64_t deriv_calc=path_derivative_a(i, j, dim) ;
4712 
4713  SG_DEBUG( "da(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4714  } ;
4715  } ;
4716  for (i=0; i<N; i++)
4717  {
4718  for (int32_t j=0; j<M; j++)
4719  {
4720  float64_t old_b=get_b(i,j) ;
4721 
4722  set_b(i,j, log(exp(old_b)-delta)) ;
4723  invalidate_model() ;
4724  float64_t prob_old=best_path(dim) ;
4725 
4726  set_b(i,j, log(exp(old_b)+delta)) ;
4727  invalidate_model() ;
4728  float64_t prob_new=best_path(dim);
4729 
4730  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4731 
4732  set_b(i,j, old_b) ;
4733  invalidate_model() ;
4734  float64_t deriv_calc=path_derivative_b(i, j, dim);
4735 
4736  SG_DEBUG( "db(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/(deriv_calc));
4737  } ;
4738  } ;
4739 
4740  for (i=0; i<N; i++)
4741  {
4742  float64_t old_p=get_p(i) ;
4743 
4744  set_p(i, log(exp(old_p)-delta)) ;
4745  invalidate_model() ;
4746  float64_t prob_old=best_path(dim) ;
4747 
4748  set_p(i, log(exp(old_p)+delta)) ;
4749  invalidate_model() ;
4750  float64_t prob_new=best_path(dim);
4751  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4752 
4753  set_p(i, old_p) ;
4754  invalidate_model() ;
4755  float64_t deriv_calc=path_derivative_p(i, dim);
4756 
4757  //if (fabs(deriv_calc_old-deriv)>1e-4)
4758  SG_DEBUG( "dp(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4759  } ;
4760  for (i=0; i<N; i++)
4761  {
4762  float64_t old_q=get_q(i) ;
4763 
4764  set_q(i, log(exp(old_q)-delta)) ;
4765  invalidate_model() ;
4766  float64_t prob_old=best_path(dim) ;
4767 
4768  set_q(i, log(exp(old_q)+delta)) ;
4769  invalidate_model() ;
4770  float64_t prob_new=best_path(dim);
4771 
4772  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4773 
4774  set_q(i, old_q) ;
4775  invalidate_model() ;
4776  float64_t deriv_calc=path_derivative_q(i, dim);
4777 
4778  //if (fabs(deriv_calc_old-deriv)>1e-4)
4779  SG_DEBUG( "dq(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc);
4780  } ;
4781  }
4782  return result;
4783 }
4784 #endif // USE_HMMDEBUG
4785 
4786 //normalize model (sum to one constraint)
4787 void CHMM::normalize(bool keep_dead_states)
4788 {
4789  int32_t i,j;
4790  const float64_t INF=-1e10;
4791  float64_t sum_p =INF;
4792 
4793  for (i=0; i<N; i++)
4794  {
4795  sum_p=CMath::logarithmic_sum(sum_p, get_p(i));
4796 
4797  float64_t sum_b =INF;
4798  float64_t sum_a =get_q(i);
4799 
4800  for (j=0; j<N; j++)
4801  sum_a=CMath::logarithmic_sum(sum_a, get_a(i,j));
4802 
4803  if (sum_a>CMath::ALMOST_NEG_INFTY/N || (!keep_dead_states) )
4804  {
4805  for (j=0; j<N; j++)
4806  set_a(i,j, get_a(i,j)-sum_a);
4807  set_q(i, get_q(i)-sum_a);
4808  }
4809 
4810  for (j=0; j<M; j++)
4811  sum_b=CMath::logarithmic_sum(sum_b, get_b(i,j));
4812  for (j=0; j<M; j++)
4813  set_b(i,j, get_b(i,j)-sum_b);
4814  }
4815 
4816  for (i=0; i<N; i++)
4817  set_p(i, get_p(i)-sum_p);
4818 
4819  invalidate_model();
4820 }
4821 
4822 bool CHMM::append_model(CHMM* app_model)
4823 {
4824  bool result=false;
4825  const int32_t num_states=app_model->get_N();
4826  int32_t i,j;
4827 
4828  SG_DEBUG( "cur N:%d M:%d\n", N, M);
4829  SG_DEBUG( "old N:%d M:%d\n", app_model->get_N(), app_model->get_M());
4830  if (app_model->get_M() == get_M())
4831  {
4832  float64_t* n_p=SG_MALLOC(float64_t, N+num_states);
4833  float64_t* n_q=SG_MALLOC(float64_t, N+num_states);
4834  float64_t* n_a=SG_MALLOC(float64_t, (N+num_states)*(N+num_states));
4835  //SG_PRINT("size n_b: %d\n", (N+num_states)*M);
4836  float64_t* n_b=SG_MALLOC(float64_t, (N+num_states)*M);
4837 
4838  //clear n_x
4839  for (i=0; i<N+num_states; i++)
4840  {
4841  n_p[i]=-CMath::INFTY;
4842  n_q[i]=-CMath::INFTY;
4843 
4844  for (j=0; j<N+num_states; j++)
4845  n_a[(N+num_states)*i+j]=-CMath::INFTY;
4846 
4847  for (j=0; j<M; j++)
4848  n_b[M*i+j]=-CMath::INFTY;
4849  }
4850 
4851  //copy models first
4852  // warning pay attention to the ordering of
4853  // transition_matrix_a, observation_matrix_b !!!
4854 
4855  // cur_model
4856  for (i=0; i<N; i++)
4857  {
4858  n_p[i]=get_p(i);
4859 
4860  for (j=0; j<N; j++)
4861  n_a[(N+num_states)*j+i]=get_a(i,j);
4862 
4863  for (j=0; j<M; j++)
4864  {
4865  n_b[M*i+j]=get_b(i,j);
4866  }
4867  }
4868 
4869  // append_model
4870  for (i=0; i<app_model->get_N(); i++)
4871  {
4872  n_q[i+N]=app_model->get_q(i);
4873 
4874  for (j=0; j<app_model->get_N(); j++)
4875  n_a[(N+num_states)*(j+N)+(i+N)]=app_model->get_a(i,j);
4876  for (j=0; j<app_model->get_M(); j++)
4877  n_b[M*(i+N)+j]=app_model->get_b(i,j);
4878  }
4879 
4880 
4881  // transition to the two and back
4882  for (i=0; i<N; i++)
4883  {
4884  for (j=N; j<N+num_states; j++)
4885  n_a[(N+num_states)*j + i]=CMath::logarithmic_sum(get_q(i)+app_model->get_p(j-N), n_a[(N+num_states)*j + i]);
4886  }
4887 
4889  N+=num_states;
4890 
4892 
4893  //delete + adjust pointers
4898 
4899  transition_matrix_a=n_a;
4903 
4904  SG_WARNING( "not normalizing anymore, call normalize_hmm to make sure the hmm is valid!!\n");
4906  invalidate_model();
4907  }
4908  else
4909  SG_ERROR( "number of observations is different for append model, doing nothing!\n");
4910 
4911  return result;
4912 }
4913 
4914 bool CHMM::append_model(CHMM* app_model, float64_t* cur_out, float64_t* app_out)
4915 {
4916  bool result=false;
4917  const int32_t num_states=app_model->get_N()+2;
4918  int32_t i,j;
4919 
4920  if (app_model->get_M() == get_M())
4921  {
4922  float64_t* n_p=SG_MALLOC(float64_t, N+num_states);
4923  float64_t* n_q=SG_MALLOC(float64_t, N+num_states);
4924  float64_t* n_a=SG_MALLOC(float64_t, (N+num_states)*(N+num_states));
4925  //SG_PRINT("size n_b: %d\n", (N+num_states)*M);
4926  float64_t* n_b=SG_MALLOC(float64_t, (N+num_states)*M);
4927 
4928  //clear n_x
4929  for (i=0; i<N+num_states; i++)
4930  {
4931  n_p[i]=-CMath::INFTY;
4932  n_q[i]=-CMath::INFTY;
4933 
4934  for (j=0; j<N+num_states; j++)
4935  n_a[(N+num_states)*j+i]=-CMath::INFTY;
4936 
4937  for (j=0; j<M; j++)
4938  n_b[M*i+j]=-CMath::INFTY;
4939  }
4940 
4941  //copy models first
4942  // warning pay attention to the ordering of
4943  // transition_matrix_a, observation_matrix_b !!!
4944 
4945  // cur_model
4946  for (i=0; i<N; i++)
4947  {
4948  n_p[i]=get_p(i);
4949 
4950  for (j=0; j<N; j++)
4951  n_a[(N+num_states)*j+i]=get_a(i,j);
4952 
4953  for (j=0; j<M; j++)
4954  {
4955  n_b[M*i+j]=get_b(i,j);
4956  }
4957  }
4958 
4959  // append_model
4960  for (i=0; i<app_model->get_N(); i++)
4961  {
4962  n_q[i+N+2]=app_model->get_q(i);
4963 
4964  for (j=0; j<app_model->get_N(); j++)
4965  n_a[(N+num_states)*(j+N+2)+(i+N+2)]=app_model->get_a(i,j);
4966  for (j=0; j<app_model->get_M(); j++)
4967  n_b[M*(i+N+2)+j]=app_model->get_b(i,j);
4968  }
4969 
4970  //initialize the two special states
4971 
4972  // output
4973  for (i=0; i<M; i++)
4974  {
4975  n_b[M*N+i]=cur_out[i];
4976  n_b[M*(N+1)+i]=app_out[i];
4977  }
4978 
4979  // transition to the two and back
4980  for (i=0; i<N+num_states; i++)
4981  {
4982  // the first state is only connected to the second
4983  if (i==N+1)
4984  n_a[(N+num_states)*i + N]=0;
4985 
4986  // only states of the cur_model can reach the
4987  // first state
4988  if (i<N)
4989  n_a[(N+num_states)*N+i]=get_q(i);
4990 
4991  // the second state is only connected to states of
4992  // the append_model (with probab app->p(i))
4993  if (i>=N+2)
4994  n_a[(N+num_states)*i+(N+1)]=app_model->get_p(i-(N+2));
4995  }
4996 
4998  N+=num_states;
4999 
5001 
5002  //delete + adjust pointers
5007 
5008  transition_matrix_a=n_a;
5012 
5013  SG_WARNING( "not normalizing anymore, call normalize_hmm to make sure the hmm is valid!!\n");
5015  invalidate_model();
5016  }
5017 
5018  return result;
5019 }
5020 
5021 
5022 void CHMM::add_states(int32_t num_states, float64_t default_value)
5023 {
5024  int32_t i,j;
5025  const float64_t MIN_RAND=1e-2; //this is the range of the random values for the new variables
5026  const float64_t MAX_RAND=2e-1;
5027 
5028  float64_t* n_p=SG_MALLOC(float64_t, N+num_states);
5029  float64_t* n_q=SG_MALLOC(float64_t, N+num_states);
5030  float64_t* n_a=SG_MALLOC(float64_t, (N+num_states)*(N+num_states));
5031  //SG_PRINT("size n_b: %d\n", (N+num_states)*M);
5032  float64_t* n_b=SG_MALLOC(float64_t, (N+num_states)*M);
5033 
5034  // warning pay attention to the ordering of
5035  // transition_matrix_a, observation_matrix_b !!!
5036  for (i=0; i<N; i++)
5037  {
5038  n_p[i]=get_p(i);
5039  n_q[i]=get_q(i);
5040 
5041  for (j=0; j<N; j++)
5042  n_a[(N+num_states)*j+i]=get_a(i,j);
5043 
5044  for (j=0; j<M; j++)
5045  n_b[M*i+j]=get_b(i,j);
5046  }
5047 
5048  for (i=N; i<N+num_states; i++)
5049  {
5050  n_p[i]=VAL_MACRO;
5051  n_q[i]=VAL_MACRO;
5052 
5053  for (j=0; j<N; j++)
5054  n_a[(N+num_states)*i+j]=VAL_MACRO;
5055 
5056  for (j=0; j<N+num_states; j++)
5057  n_a[(N+num_states)*j+i]=VAL_MACRO;
5058 
5059  for (j=0; j<M; j++)
5060  n_b[M*i+j]=VAL_MACRO;
5061  }
5063  N+=num_states;
5064 
5066 
5067  //delete + adjust pointers
5072 
5073  transition_matrix_a=n_a;
5077 
5078  invalidate_model();
5079  normalize();
5080 }
5081 
5083 {
5084  for (int32_t i=0; i<N; i++)
5085  {
5086  int32_t j;
5087 
5088  if (exp(get_p(i)) < value)
5090 
5091  if (exp(get_q(i)) < value)
5093 
5094  for (j=0; j<N; j++)
5095  {
5096  if (exp(get_a(i,j)) < value)
5098  }
5099 
5100  for (j=0; j<M; j++)
5101  {
5102  if (exp(get_b(i,j)) < value)
5104  }
5105  }
5106  normalize();
5107  invalidate_model();
5108 }
5109 
5110 bool CHMM::linear_train(bool right_align)
5111 {
5112  if (p_observations)
5113  {
5114  int32_t histsize=(get_M()*get_N());
5115  int32_t* hist=SG_MALLOC(int32_t, histsize);
5116  int32_t* startendhist=SG_MALLOC(int32_t, get_N());
5117  int32_t i,dim;
5118 
5120 
5121  for (i=0; i<histsize; i++)
5122  hist[i]=0;
5123 
5124  for (i=0; i<get_N(); i++)
5125  startendhist[i]=0;
5126 
5127  if (right_align)
5128  {
5129  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
5130  {
5131  int32_t len=0;
5132  bool free_vec;
5133  uint16_t* obs=p_observations->get_feature_vector(dim, len, free_vec);
5134 
5135  ASSERT(len<=get_N());
5136  startendhist[(get_N()-len)]++;
5137 
5138  for (i=0;i<len;i++)
5139  hist[(get_N()-len+i)*get_M() + *obs++]++;
5140 
5141  p_observations->free_feature_vector(obs, dim, free_vec);
5142  }
5143 
5144  set_q(get_N()-1, 1);
5145  for (i=0; i<get_N()-1; i++)
5146  set_q(i, 0);
5147 
5148  for (i=0; i<get_N(); i++)
5149  set_p(i, startendhist[i]+PSEUDO);
5150 
5151  for (i=0;i<get_N();i++)
5152  {
5153  for (int32_t j=0; j<get_N(); j++)
5154  {
5155  if (i==j-1)
5156  set_a(i,j, 1);
5157  else
5158  set_a(i,j, 0);
5159  }
5160  }
5161  }
5162  else
5163  {
5164  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
5165  {
5166  int32_t len=0;
5167  bool free_vec;
5168  uint16_t* obs=p_observations->get_feature_vector(dim, len, free_vec);
5169 
5170  ASSERT(len<=get_N());
5171  for (i=0;i<len;i++)
5172  hist[i*get_M() + *obs++]++;
5173 
5174  startendhist[len-1]++;
5175 
5176  p_observations->free_feature_vector(obs, dim, free_vec);
5177  }
5178 
5179  set_p(0, 1);
5180  for (i=1; i<get_N(); i++)
5181  set_p(i, 0);
5182 
5183  for (i=0; i<get_N(); i++)
5184  set_q(i, startendhist[i]+PSEUDO);
5185 
5186  int32_t total=p_observations->get_num_vectors();
5187 
5188  for (i=0;i<get_N();i++)
5189  {
5190  total-= startendhist[i] ;
5191 
5192  for (int32_t j=0; j<get_N(); j++)
5193  {
5194  if (i==j-1)
5195  set_a(i,j, total+PSEUDO);
5196  else
5197  set_a(i,j, 0);
5198  }
5199  }
5200  ASSERT(total==0);
5201  }
5202 
5203  for (i=0;i<get_N();i++)
5204  {
5205  for (int32_t j=0; j<get_M(); j++)
5206  {
5207  float64_t sum=0;
5208  int32_t offs=i*get_M()+ p_observations->get_masked_symbols((uint16_t) j, (uint8_t) 254);
5209 
5210  for (int32_t k=0; k<p_observations->get_original_num_symbols(); k++)
5211  sum+=hist[offs+k];
5212 
5213  set_b(i,j, (PSEUDO+hist[i*get_M()+j])/(sum+PSEUDO*p_observations->get_original_num_symbols()));
5214  }
5215  }
5216 
5217  SG_FREE(hist);
5218  SG_FREE(startendhist);
5219  convert_to_log();
5220  invalidate_model();
5221  return true;
5222  }
5223  else
5224  return false;
5225 }
5226 
5228 {
5229  ASSERT(obs);
5230  p_observations=obs;
5231  SG_REF(obs);
5232 
5233  if (obs)
5234  if (obs->get_num_symbols() > M)
5235  SG_ERROR( "number of symbols in observation (%ld) larger than M (%d)\n", (long) obs->get_num_symbols(), M);
5236 
5237  if (!reused_caches)
5238  {
5239 #ifdef USE_HMMPARALLEL_STRUCTURES
5240  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5241  {
5242  SG_FREE(alpha_cache[i].table);
5243  SG_FREE(beta_cache[i].table);
5245  SG_FREE(path[i]);
5246 
5247  alpha_cache[i].table=NULL;
5248  beta_cache[i].table=NULL;
5250  path[i]=NULL;
5251  } ;
5252 #else
5253  SG_FREE(alpha_cache.table);
5254  SG_FREE(beta_cache.table);
5256  SG_FREE(path);
5257 
5258  alpha_cache.table=NULL;
5259  beta_cache.table=NULL;
5261  path=NULL;
5262 
5263 #endif //USE_HMMPARALLEL_STRUCTURES
5264  }
5265 
5266  invalidate_model();
5267 }
5268 
5270 {
5271  ASSERT(obs);
5272  SG_REF(obs);
5273  p_observations=obs;
5274 
5275  /* from Distribution, necessary for calls to base class methods, like
5276  * get_log_likelihood_sample():
5277  */
5278  SG_REF(obs);
5279  features=obs;
5280 
5281  SG_DEBUG("num symbols alphabet: %ld\n", obs->get_alphabet()->get_num_symbols());
5282  SG_DEBUG("num symbols: %ld\n", obs->get_num_symbols());
5283  SG_DEBUG("M: %d\n", M);
5284 
5285  if (obs)
5286  {
5287  if (obs->get_num_symbols() > M)
5288  {
5289  SG_ERROR( "number of symbols in observation (%ld) larger than M (%d)\n", (long) obs->get_num_symbols(), M);
5290  }
5291  }
5292 
5293  if (!reused_caches)
5294  {
5295 #ifdef USE_HMMPARALLEL_STRUCTURES
5296  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5297  {
5298  SG_FREE(alpha_cache[i].table);
5299  SG_FREE(beta_cache[i].table);
5301  SG_FREE(path[i]);
5302 
5303  alpha_cache[i].table=NULL;
5304