ThreeB 1.1
|
00001 #ifndef FORWARD_ALGORITHM_H 00002 #define FORWARD_ALGORITHM_H 00003 #include <tr1/tuple> 00004 #include <tr1/array> 00005 #include <TooN/TooN.h> 00006 #include <vector> 00007 #include <cmath> 00008 00009 /** Computes the natural logarithm, but returns -1e100 instead of inf 00010 for an input of 0. This prevents trapping of FPU exceptions. 00011 @param x \e x 00012 @return ln \e x 00013 @ingroup gUtility 00014 */ 00015 inline double ln(double x) 00016 { 00017 if(x == 0) 00018 return -1e100; 00019 else 00020 return std::log(x); 00021 } 00022 00023 00024 /** 00025 The forward algorithm is defined as: 00026 \f{align} 00027 \alpha_1(i) &= \pi_i b_i(O_1) \\ 00028 \alpha_t(j) &= b_j(O(t)) \sum_i \alpha_{t-1}(i) a_{ij} 00029 \f} 00030 And the probability of observing the data is just: 00031 \f{equation} 00032 P(O_1 \cdots O_T|\lambda) = P(O|\lambda) \sum_i \alpha_T(i), 00033 \f} 00034 where the state, \f$\lambda = \{ A, \pi, B \}\f$. All multipliers are much less 00035 than 1, to \f$\alpha\f$ rapidly ends up as zero. Instead, store the logarithm: 00036 \f{align} 00037 \delta_t(i) &= \ln \alpha_t(i) \\ 00038 \delta_1(i) &= \ln \pi_i + \ln b_j(O_t) 00039 \f} 00040 and the recursion is: 00041 \f{align} 00042 \delta_t(j) &= \ln b_j(O_t) + \ln \sum_i \alpha_{t-1}(i) a_{ij} \\ 00043 &= \ln b_j(O_t) + \ln \sum_i e^{\delta_{t-1}(i) + \ln a_{ij}} \\ 00044 \f} 00045 including an arbitrary constant, \f$Z_t(j)\f$ gives: 00046 \f{align} 00047 \delta_t(j) &= \ln b_j(O_t) + \ln \sum_i e^{Z_t(j)} e^{\delta_{t-1}(i) + \ln a_{ij} - Z_t(j)} \\ 00048 &= \ln b_j(O_t) + Z_t(j) + \ln \sum_i e^{\delta_{t-1}(i) + \ln a_{ij} - Z_t(j)}. 00049 \f} 00050 In order to prevent a loss of scale on the addition: 00051 \f{equation} 00052 Z_t(j) \overset{\text{def}}{=} \operatorname*{max}_i \delta_{t-1}(i) + \ln a_{ij}, 00053 \f} 00054 so the largest exponent will be exactly 0. The final log probability is, similarly: 00055 \f{equation} 00056 \ln P(O|\lambda) = Z + \ln \sum_i e^{\delta_T(i) - Z}, 00057 \f} 00058 \e Z can take any value, but to keep the numbers within a convenient range: 00059 \f{equation} 00060 Z \overset{\text{def}}{=} \operatorname*{max}_i \delta_T(i). 00061 \f} 00062 00063 For computing derivatives, two useful results are: 00064 \f{align} 00065 \PD{}{x} \ln f(x) &= \frac{f'(x)}{f(x)}\\ 00066 \PD{}{x} e^{f(x)} &= f'(x) e^{f(x)} 00067 \f} 00068 There are \e M parameters of \e B, denoted \f$\phi_1 \ldots \phi_M\f$. The derivatives of \e P are: 00069 \f{align} 00070 \DD P(O|\lambda) &= \SSum \DD e^{\delta_T(i)} \\ 00071 &= \SSum e^{\delta_T(i)} \DD \delta_T(i)\\ 00072 \f} 00073 Taking derivatives of \f$ \ln P\f$ and rearranging to get numerically more convenient 00074 results gives: 00075 \f{align} 00076 \DD \ln P(O|\lambda) & = \frac{\DD P(O|\lambda)}{P(O|\lambda)} \\ 00077 & = \frac{\SSum e^{\delta_T(i)} \DD \delta_T(i)}{P(O|\lambda)}\\ 00078 & = \SSum e^{\delta_T(i) - \ln P(O|\lambda)} \DD \delta_T(i) 00079 \f} 00080 The derivarives of \f$\delta\f$ are: 00081 \f{align} 00082 \gdef\dtj{\delta_T(j)} 00083 \PD{\dtj}{\en} &= \DD \ln \left[ b_j(O_t) \SSum e^{\delta_{t-1}(i) + \ln a_{ij}} \right] \\ 00084 &= \DD \left[ \ln b_j(O_t) \right] + \frac{\SSum \DD e^{\delta_{t-1}(i) + \ln a_{ij}}}{\SSum e^{\delta_{t-1}(i) + \ln a_{ij}}}\\ 00085 \underset{\text{\tt diff\_delta[t][j]}}{\underbrace{\PD{\dtj}{\en}}} &= \underset{\text{\tt B.diff\_log(j, O[t])}}{\underbrace{\DD \left[ \ln b_j(O_t) \right]}} + 00086 00087 \frac{\overset{\text{\tt sum\_top}}{\overbrace{\SSum e^{\delta_{t-1}(i) + \ln a_{ij} - Z_t(j)} \DD \delta_{t-1}(i)}} 00088 }{\underset{\text{\tt sum}}{\underbrace{\SSum e^{\delta_{t-1}(i) + \ln a_{ij} -Z_t(j)}}}}, 00089 \f} 00090 with \f$Z_t(j)\f$ as defined in ::forward_algorithm. 00091 00092 For computing second derivatives, with \f$\Grad\f$ yielding column vectors, two useful results are: 00093 \f{align} 00094 \Hess \ln f(\Vec{x}) & = \frac{\Hess f(\Vec{x})}{f(\Vec{x})} - \Grad f(\Vec{x}) \Grad f(\Vec{x})\Trn \\ 00095 \Hess e^f(\Vec{x}) & = e^{f(\Vec{x})}(\Grad f(\Vec{x}) \Grad f(\Vec{x})\Trn + \Hess f(\Vec{x})), 00096 \f} 00097 therefore: 00098 \f{equation} 00099 \Hess \ln P(O|\lambda) = \frac{\Hess f(\Vec{x})}{P(O|\lambda)} - \Grad P(O|\lambda) \Grad P(O|\lambda)\Trn, 00100 \f} 00101 and: 00102 \f{equation} 00103 \Hess P(O|\lambda) = \sum_i e^{\delta_t(i) - \ln P(O|\lambda)}\left[ \Grad\delta_t \Grad\delta_t\Trn + \Hess \delta_t\right]. 00104 \f} 00105 Define \f$s_t(j)\f$ as: 00106 \f{equation} 00107 s_t(j) = \sum_i e^{\delta_{t-1}(j) + \ln a_{ij}} 00108 \f} 00109 so that: 00110 \f{equation} 00111 \delta_t(j) = \ln b_j(O_t) + \ln s_t(j). 00112 \f} 00113 The derivatives and Hessian recursion are therefore: 00114 \f{align} 00115 \Grad \delta_t(j) &= \Grad\ln b_j(O_t) + \frac{\Grad s_t(j)}{s_t(j)} \\ 00116 \Hess \delta_t(j) &= \Hess\ln b_j(O_t) + \frac{\Hess s_t(j)}{s_t(j)} - \frac{\Grad s_t(j)}{s_t(j)}\frac{\Grad s_t(j)}{s_t(j)}\Trn.\\ 00117 &= \underset{\text{\tt B.hess\_log(j, O[t])}}{\underbrace{\Hess\ln b_j(O_t)}} + 00118 \frac{ 00119 \overset{\text{\tt sum\_top2}}{ 00120 \overbrace{ 00121 \sum_i e^{\delta_{t-1}(j) + \ln a_{ij} - Z_t(j)}\left[\Hess\delta_{t-1}(i) + \Grad\delta_{t-1}(i)\Grad\delta_{t-1}(i)\Trn\right]}} 00122 }{\text{\tt sum}} - \frac{\text{\tt sum\_top sum\_top}\Trn}{\text{\tt sum}^2} 00123 \f} 00124 00125 @ingroup gHMM 00126 @param A \e A: State transition probabilities. 00127 @param pi \e \f$\pi\f$: initial state probabilities. 00128 @param O \e O or \e I: the observed data (ie the images). 00129 @param B \f$B\f$: A function object giving the (log) probability of an observation given a state, and derivatives with respect to the parameters. 00130 @param compute_deriv Whether to compute the derivative, or return zero. 00131 @param compute_hessian Whether to compute the Hessian, or return zero. This implies \c compute_deriv. 00132 @returns the log probability of observing all the data, and the derivatives of the log probability with respect to the parameters, and the Hessian. 00133 */ 00134 template<int States, class Btype, class Otype> std::tr1::tuple<double, TooN::Vector<Btype::NumParameters>, TooN::Matrix<Btype::NumParameters> > forward_algorithm_hessian(TooN::Matrix<States> A, TooN::Vector<States> pi, const Btype& B, const std::vector<Otype>& O, bool compute_deriv=1, bool compute_hessian=1) 00135 { 00136 using namespace TooN; 00137 using namespace std; 00138 using namespace std::tr1; 00139 00140 if(compute_hessian == 1) 00141 compute_deriv=1; 00142 00143 static const int M = Btype::NumParameters; 00144 int states = pi.size(); 00145 00146 //delta[j][i] = delta_t(i) 00147 vector<array<double, States> > delta(O.size()); 00148 00149 //diff_delta[t][j][n] = d/de_n delta_t(j) 00150 vector<array<Vector<M>,States > > diff_delta(O.size()); 00151 00152 00153 //hess_delta[t][j][m][n] = d2/de_n de_m delta_t(j) 00154 vector<array<Matrix<M>,States > > hess_delta(O.size()); 00155 00156 //Initialization: Eqn 19, P 262 00157 //Set initial partial log probabilities: 00158 for(int i=0; i < states; i++) 00159 { 00160 delta[0][i] = ln(pi[i]) + B.log(i, O[0]); 00161 00162 if(compute_deriv) 00163 diff_delta[0][i] = B.diff_log(i, O[0]); 00164 00165 if(compute_hessian) 00166 hess_delta[0][i] = B.hess_log(i, O[0]); 00167 } 00168 00169 //Perform the recursion: Eqn 20, P262 00170 //Note, use T and T-1. Rather than T+1 and T. 00171 for(unsigned int t=1; t < O.size(); t++) 00172 { 00173 for(int j=0; j < states; j++) 00174 { 00175 double Ztj = -HUGE_VAL; //This is Z_t(j) 00176 for(int i=0; i < states; i++) 00177 Ztj = max(Ztj, delta[t-1][i] + ln(A[i][j])); 00178 00179 double sum=0; 00180 for(int i=0; i < states; i++) 00181 sum += exp(delta[t-1][i] + ln(A[i][j]) - Ztj); 00182 00183 delta[t][j] = B.log(j, O[t]) + Ztj + ln(sum); 00184 00185 if(compute_deriv) 00186 { 00187 Vector<M> sum_top = Zeros; 00188 for(int i=0; i < states; i++) 00189 sum_top += diff_delta[t-1][i] * exp(delta[t-1][i] + ln(A[i][j]) - Ztj); 00190 00191 diff_delta[t][j] = B.diff_log(j, O[t]) + (sum_top) / sum; 00192 00193 if(compute_hessian) 00194 { 00195 Matrix<M> sum_top2 = Zeros; 00196 for(int i=0; i < states; i++) 00197 sum_top2 += exp(delta[t-1][i] + ln(A[i][j]) - Ztj) * ( hess_delta[t-1][i] + diff_delta[t-1][i].as_col() * diff_delta[t-1][i].as_row()); 00198 00199 hess_delta[t][j] = B.hess_log(j, O[t]) + sum_top2 / sum - sum_top.as_col() * sum_top.as_row() / (sum*sum); 00200 } 00201 } 00202 } 00203 } 00204 00205 //Compute the log prob using normalization 00206 double Z = -HUGE_VAL; 00207 for(int i=0; i < states; i++) 00208 Z = max(Z, delta.back()[i]); 00209 00210 double sum =0; 00211 for(int i=0; i < states; i++) 00212 sum += exp(delta.back()[i] - Z); 00213 00214 double log_prob = Z + ln(sum); 00215 00216 //Compute the differential of the log 00217 Vector<M> diff_log = Zeros; 00218 //Compute the differential of the log using normalization 00219 //The convenient normalizer is ln P(O|lambda) which makes the bottom 1. 00220 for(int i=0; compute_deriv && i < states; i++) 00221 diff_log += exp(delta.back()[i] - log_prob)*diff_delta.back()[i]; 00222 00223 Matrix<M> hess_log = Zeros; 00224 //Compute the hessian of the log using normalization 00225 //The convenient normalizer is ln P(O|lambda) which makes the bottom 1. 00226 for(int i=0; compute_hessian && i < states; i++) 00227 hess_log += exp(delta.back()[i] - log_prob) * (hess_delta.back()[i] + diff_delta.back()[i].as_col() * diff_delta.back()[i].as_row()); 00228 00229 hess_log -= diff_log.as_col() * diff_log.as_row(); 00230 00231 //Compute the differential of the Hessian 00232 return make_tuple(log_prob, diff_log, hess_log); 00233 } 00234 00235 /** 00236 Run the forward algorithm and return the log probability. 00237 @ingroup gHMM 00238 @param A \e A: State transition probabilities. 00239 @param pi \e \f$\pi\f$: initial state probabilities. 00240 @param O \e O or \e I: the observed data (ie the images). 00241 @param B \f$B\f$: A function object giving the (log) probability of an observation given a state. 00242 @returns the log probability of observing all the data. 00243 */ 00244 template<int States, class Btype, class Otype> double forward_algorithm(TooN::Matrix<States> A, TooN::Vector<States> pi, const Btype& B, const std::vector<Otype>& O) 00245 { 00246 using namespace TooN; 00247 using namespace std; 00248 using namespace std::tr1; 00249 00250 int states = pi.size(); 00251 00252 //delta[j][i] = delta_t(i) 00253 vector<array<double, States> > delta(O.size()); 00254 00255 //Initialization: Eqn 19, P 262 00256 //Set initial partial log probabilities: 00257 for(int i=0; i < states; i++) 00258 delta[0][i] = ln(pi[i]) + B.log(i, O[0]); 00259 00260 //Perform the recursion: Eqn 20, P262 00261 //Note, use T and T-1. Rather than T+1 and T. 00262 for(unsigned int t=1; t < O.size(); t++) 00263 { 00264 for(int j=0; j < states; j++) 00265 { 00266 double Ztj = -HUGE_VAL; //This is Z_t(j) 00267 for(int i=0; i < states; i++) 00268 Ztj = max(Ztj, delta[t-1][i] + ln(A[i][j])); 00269 00270 double sum=0; 00271 for(int i=0; i < states; i++) 00272 sum += exp(delta[t-1][i] + ln(A[i][j]) - Ztj); 00273 00274 delta[t][j] = B.log(j, O[t]) + Ztj + ln(sum); 00275 } 00276 } 00277 00278 //Compute the log prob using normalization 00279 double Z = -HUGE_VAL; 00280 for(int i=0; i < states; i++) 00281 Z = max(Z, delta.back()[i]); 00282 00283 double sum =0; 00284 for(int i=0; i < states; i++) 00285 sum += exp(delta.back()[i] - Z); 00286 00287 double log_prob = Z + ln(sum); 00288 00289 return log_prob; 00290 } 00291 00292 00293 /** 00294 Run the forward algorithm and return the log probability and its derivatives. 00295 @ingroup gHMM 00296 @param A \e A: State transition probabilities. 00297 @param pi \e \f$\pi\f$: initial state probabilities. 00298 @param O \e O or \e I: the observed data (ie the images). 00299 @param B \f$B\f$: A function object giving the (log) probability of an observation given a state, and derivatives with respect to the parameters. 00300 @returns the log probability of observing all the data. 00301 */ 00302 template<int States, class Btype, class Otype> std::pair<double, TooN::Vector<Btype::NumParameters> > forward_algorithm_deriv(TooN::Matrix<States> A, TooN::Vector<States> pi, const Btype& B, const std::vector<Otype>& O) 00303 { 00304 using namespace std::tr1; 00305 double p; 00306 TooN::Vector<Btype::NumParameters> v; 00307 tie(p,v, ignore) = forward_algorithm_hessian(A, pi, B, O, 1, 0); 00308 return make_pair(p,v); 00309 } 00310 00311 00312 /** 00313 Run the forward algorithm and return the log partials (delta) 00314 @ingroup gHMM 00315 @param A \e A: State transition probabilities. 00316 @param pi \e \f$\pi\f$: initial state probabilities. 00317 @param O \e O or \e I: the observed data (ie the images). 00318 @param B \f$B\f$: A function object giving the (log) probability of an observation given a state, and derivatives with respect to the parameters. 00319 @returns the log probability of observing all the data. 00320 */ 00321 template<int States, class Btype, class Otype> 00322 std::vector<std::tr1::array<double, States> > 00323 forward_algorithm_delta(TooN::Matrix<States> A, TooN::Vector<States> pi, const Btype& B, const std::vector<Otype>& O) 00324 { 00325 using namespace TooN; 00326 using namespace std; 00327 using namespace std::tr1; 00328 00329 int states = pi.size(); 00330 00331 //delta[j][i] = delta_t(i) 00332 vector<array<double, States> > delta(O.size()); 00333 00334 //Initialization: Eqn 19, P 262 00335 //Set initial partial log probabilities: 00336 for(int i=0; i < states; i++) 00337 delta[0][i] = ln(pi[i]) + B.log(i, O[0]); 00338 00339 //Forward pass... 00340 //Perform the recursion: Eqn 20, P262 00341 //Note, use T and T-1. Rather than T+1 and T. 00342 for(unsigned int t=1; t < O.size(); t++) 00343 { 00344 for(int j=0; j < states; j++) 00345 { 00346 double Ztj = -HUGE_VAL; //This is Z_t(j) 00347 for(int i=0; i < states; i++) 00348 Ztj = max(Ztj, delta[t-1][i] + ln(A[i][j])); 00349 00350 double sum=0; 00351 for(int i=0; i < states; i++) 00352 sum += exp(delta[t-1][i] + ln(A[i][j]) - Ztj); 00353 00354 delta[t][j] = B.log(j, O[t]) + Ztj + ln(sum); 00355 } 00356 } 00357 00358 return delta; 00359 } 00360 00361 /** 00362 Run the forward algorithm and return the log partials (delta) 00363 @ingroup gHMM 00364 @param A \e A: State transition probabilities. 00365 @param pi \e \f$\pi\f$: initial state probabilities. 00366 @param O \e O or \e I: the observed data (ie the images). 00367 @param B \f$B\f$: A function object giving the (log) probability of an observation given a state, and derivatives with respect to the parameters. 00368 @param delta the \f$\delta\f$ values 00369 */ 00370 template<int States, class Btype, class Otype> 00371 void forward_algorithm_delta2(TooN::Matrix<States> A, TooN::Vector<States> pi, const Btype& B, const std::vector<Otype>& O, std::vector<std::tr1::array<double, States> >& delta) 00372 { 00373 using namespace TooN; 00374 using namespace std; 00375 using namespace std::tr1; 00376 00377 int states = pi.size(); 00378 00379 //delta[j][i] = delta_t(i) 00380 delta.resize(O.size()); 00381 00382 //Initialization: Eqn 19, P 262 00383 //Set initial partial log probabilities: 00384 for(int i=0; i < states; i++) 00385 delta[0][i] = ln(pi[i]) + B.log(i, O[0]); 00386 00387 Matrix<States> lA; 00388 for(int r=0; r < States; r++) 00389 for(int c=0; c < States; c++) 00390 lA[r][c] = ln(A[r][c]); 00391 00392 //Forward pass... 00393 //Perform the recursion: Eqn 20, P262 00394 //Note, use T and T-1. Rather than T+1 and T. 00395 for(unsigned int t=1; t < O.size(); t++) 00396 { 00397 for(int j=0; j < states; j++) 00398 { 00399 double Ztj = -HUGE_VAL; //This is Z_t(j) 00400 for(int i=0; i < states; i++) 00401 Ztj = max(Ztj, delta[t-1][i] + lA[i][j]); 00402 00403 double sum=0; 00404 for(int i=0; i < states; i++) 00405 sum += exp(delta[t-1][i] + lA[i][j] - Ztj); 00406 00407 delta[t][j] = B.log(j, O[t]) + Ztj + ln(sum); 00408 } 00409 } 00410 } 00411 00412 00413 /** 00414 Run the forward-backwards algorithm and return the log partials (delta and epsilon). 00415 @ingroup gHMM 00416 @param A \e A: State transition probabilities. 00417 @param pi \e \f$\pi\f$: initial state probabilities. 00418 @param O \e O or \e I: the observed data (ie the images). 00419 @param B \f$B\f$: A function object giving the (log) probability of an observation given a state, and derivatives with respect to the parameters. 00420 @returns the log probability of observing all the data. 00421 */ 00422 template<int States, class Btype, class Otype> 00423 std::pair<std::vector<std::tr1::array<double, States> >, std::vector<std::tr1::array<double, States> > > 00424 forward_backward_algorithm(TooN::Matrix<States> A, TooN::Vector<States> pi, const Btype& B, const std::vector<Otype>& O) 00425 { 00426 using namespace TooN; 00427 using namespace std; 00428 using namespace std::tr1; 00429 00430 int states = pi.size(); 00431 00432 //delta[j][i] = delta_t(i) 00433 vector<array<double, States> > delta = forward_algorithm_delta(A, pi, B, O); 00434 00435 ///Backward pass 00436 ///Epsilon is log beta 00437 vector<array<double, States> > epsilon(O.size()); 00438 00439 //Initialize beta to 1, ie epsilon to 0 00440 for(int i=0; i < states; i++) 00441 epsilon[O.size()-1][i] = 0; 00442 00443 //Perform the backwards recursion 00444 for(int t=O.size()-2; t >= 0; t--) 00445 { 00446 for(int i=0; i < states; i++) 00447 { 00448 //Find a normalizing constant 00449 double Z = -HUGE_VAL; 00450 00451 for(int j=0; j < states; j++) 00452 Z = max(Z, ln(A[i][j]) + B.log(j, O[t+1]) + epsilon[t+1][j]); 00453 00454 double sum=0; 00455 for(int j= 0; j < states; j++) 00456 sum += exp(ln(A[i][j]) + B.log(j, O[t+1]) + epsilon[t+1][j] - Z); 00457 00458 epsilon[t][i] = ln(sum) + Z; 00459 } 00460 } 00461 00462 return make_pair(delta, epsilon); 00463 } 00464 00465 /*struct RngDrand48 00466 { 00467 double operator()() 00468 { 00469 return drand48(); 00470 } 00471 };*/ 00472 00473 ///Select an element from the container v, assuming that v is a 00474 ///probability distribution over elements up to some scale. 00475 ///@param v Uscaled probability distribution 00476 ///@param scale Scale of v 00477 ///@param rng Random number generator to use 00478 ///@ingroup gHMM 00479 template<class A, class Rng> int select_random_element(const A& v, const double scale, Rng& rng) 00480 { 00481 double total=0, choice = rng()*scale; 00482 00483 for(int i=0; i < (int)v.size(); i++) 00484 { 00485 total += v[i]; 00486 if(choice <= total) 00487 return i; 00488 } 00489 return v.size()-1; 00490 } 00491 00492 ///Select an element from the a, assuming that a stores unscaled 00493 ///log probabilities of the elements 00494 ///@param a Uscaled probability distribution, stored as logarithms. 00495 ///@param rng Random number generator to use 00496 ///@ingroup gHMM 00497 template<int N, class Rng> int sample_unscaled_log(std::tr1::array<double, N> a, Rng& rng) 00498 { 00499 double hi = *max_element(a.begin(), a.end()); 00500 double sum=0; 00501 00502 for(unsigned int i=0; i < a.size(); i++) 00503 { 00504 a[i] = exp(a[i] - hi); 00505 sum += a[i]; 00506 } 00507 00508 return select_random_element(a, sum, rng); 00509 } 00510 00511 ///An implementation of the backwards sampling part of the forwards filtering/backwards sampling algorithm. 00512 /// See `Monte Carlo smoothing for non-linear time series', Godsill and Doucet, JASA 2004 00513 ///@param A HMM transition matrix. 00514 ///@param delta Forward partial probabilities stored as logarithms. 00515 ///@param rng Random number generator to use 00516 ///@returns state at each time step. 00517 ///@ingroup gHMM 00518 template<int States, class StateType, class Rng> 00519 std::vector<StateType> backward_sampling(TooN::Matrix<States> A, const std::vector<std::tr1::array<double, States> >& delta, Rng& rng) 00520 { 00521 //Compute the elementwise log of A 00522 for(int r=0; r < A.num_rows(); r++) 00523 for(int c=0; c < A.num_cols(); c++) 00524 A[r][c] = ln(A[r][c]); 00525 00526 std::vector<StateType> samples(delta.size()); 00527 00528 samples.back() = sample_unscaled_log<States, Rng>(delta.back(), rng); 00529 00530 //A is A[t][t+1] 00531 00532 for(int i=delta.size()-2; i >= 0; i--) 00533 { 00534 std::tr1::array<double, States> reverse_probabilities = delta[i]; 00535 00536 for(int j=0; j < States; j++) 00537 reverse_probabilities[j] += A[j][samples[i+1]]; 00538 00539 samples[i] = sample_unscaled_log<States, Rng>(reverse_probabilities, rng); 00540 } 00541 00542 return samples; 00543 } 00544 /* 00545 template<int States, class StateType> 00546 std::vector<StateType> backward_sampling(const TooN::Matrix<States> &A, const std::vector<std::tr1::array<double, States> >& delta) 00547 { 00548 RngDrand48 d; 00549 return backward_sampling<States, StateType, RngDrand48>(A, delta, d); 00550 } 00551 00552 ///@overload 00553 template<int States> 00554 std::vector<int> backward_sampling(const TooN::Matrix<States>& A, const std::vector<std::tr1::array<double, States> >& delta) 00555 { 00556 RngDrand48 d; 00557 return backward_sampling<States, int, RngDrand48>(A, delta, d); 00558 } 00559 */ 00560 00561 #endif