ThreeB 1.1
|
00001 #include <gvars3/instances.h> 00002 #include <cvd/image_io.h> 00003 #include <cvd/convolution.h> 00004 #include <TooN/wls.h> 00005 #include <tr1/tuple> 00006 00007 #include "storm_imagery.h" 00008 #include "debug.h" 00009 #include "utility.h" 00010 00011 using namespace CVD; 00012 using namespace TooN; 00013 using namespace GVars3; 00014 using namespace std; 00015 using namespace std::tr1; 00016 00017 /**Load all images from disk and do the initial preprocessing. 00018 00019 @param names List of filenames to load. 00020 @return preprocessed images. 00021 @ingroup gStormImages 00022 **/ 00023 vector<Image<float> > load_and_preprocess_images2(const vector<string>& names) 00024 { 00025 vector<Image<float> > ims; 00026 //Load images 00027 for(unsigned int i=0; i < names.size(); i++) 00028 { 00029 Image<float> im = img_load(names[i]); 00030 ims.push_back(im); 00031 00032 if(ims.back().size() != ims[0].size()) 00033 { 00034 cerr << "Error with image " << names[i] << ": all images must be the same size!\n"; 00035 exit(1); 00036 } 00037 } 00038 double mean, variance; 00039 tie(mean, variance) = mean_and_variance(ims); 00040 { 00041 for(unsigned int i=0; i < ims.size(); i++) 00042 transform(ims[i].begin(), ims[i].end(), ims[i].begin(), bind2nd(minus<double>(), mean)); 00043 for(unsigned int i=0; i < ims.size(); i++) 00044 transform(ims[i].begin(), ims[i].end(), ims[i].begin(), bind1st(multiplies<double>(), 1/ sqrt(variance))); 00045 } 00046 00047 tie(mean, variance) = mean_and_variance(ims); 00048 00049 cerr << "Rescaled:\n"; 00050 cerr << "mean = " << mean << endl; 00051 cerr << "std = " << sqrt(variance) << endl; 00052 00053 00054 00055 //Normalize... 00056 00057 //Fit the background model 00058 ImageRef size = ims[0].size(); 00059 Vector<10> p = Zeros; 00060 p[6]=-3; 00061 p[9]=-4; 00062 00063 Image<Vector<6> > monomials(size); 00064 Image<double> polynomial(size); 00065 for(int yy=0; yy < size.y; yy++) 00066 for(int xx=0; xx < size.x; xx++) 00067 { 00068 double x = xx *2./ size.x -1 ; 00069 double x2 = x*x; 00070 double y = yy *2./size.y - 1; 00071 double y2 = yy; 00072 monomials[yy][xx] = makeVector(1, x, y, x2, x*y, y2); 00073 } 00074 00075 00076 for(int i=0;i < 100;i++) 00077 { 00078 for(int yy=0; yy < size.y; yy++) 00079 for(int xx=0; xx < size.x; xx++) 00080 polynomial[yy][xx] = monomials[yy][xx] * p.slice<0,6>(); 00081 00082 WLS<10> wls; 00083 for(unsigned int i=0; i < ims.size(); i++) 00084 for(int yy=0; yy < size.y; yy++) 00085 for(int xx=0; xx < size.x; xx++) 00086 { 00087 double t = i *1. / ims.size(); 00088 double func = polynomial[yy][xx] * (exp(p[6]*t) + p[8]*exp(p[9]*t)) + p[7]; 00089 00090 Vector<10> mJ; 00091 00092 mJ.slice<0,6>() = exp(p[6]*t)* monomials[yy][xx]; 00093 //mJ.slice<3,3>() = Zeros; 00094 mJ[6] = polynomial[yy][xx] * exp(p[6]*t) * t; 00095 //mJ[6] = func * t; 00096 mJ[7] = 1; 00097 00098 mJ[8] = polynomial[yy][xx] * exp(p[9]*t); 00099 mJ[9] = polynomial[yy][xx] * exp(p[9]*t) * t * p[8]; 00100 00101 double err = ims[i][yy][xx] - func; 00102 00103 double w; 00104 00105 00106 if(err > 0) 00107 w = .01 / (abs(err) + .01); 00108 else 00109 w = 1; 00110 00111 wls.add_mJ(func - ims[i][yy][xx], -mJ, w); 00112 } 00113 00114 wls.add_prior(10); 00115 wls.compute(); 00116 00117 p += wls.get_mu(); 00118 00119 cout << p << endl << endl; 00120 } 00121 00122 for(unsigned int i=0; i < ims.size(); i++) 00123 for(int yy=0; yy < size.y; yy++) 00124 for(int xx=0; xx < size.x; xx++) 00125 { 00126 double x = xx *2./ size.x -1 ; 00127 double x2 = x*x; 00128 double y = yy *2./size.y - 1; 00129 double y2 = yy; 00130 double t = i *1. / ims.size(); 00131 Vector<6> f = makeVector(1, x, y, x2, x*y, y2); 00132 00133 double func = f * p.slice<0,6>() * (exp(p[6]*t) + p[8]*exp(p[9]*t)) + p[7]; 00134 ims[i][yy][xx] -= func; 00135 } 00136 00137 tie(mean, variance) = mean_and_variance(ims); 00138 00139 //A sanity check. 00140 cerr << "The mean should be small compared to std:\n"; 00141 cerr << "mean = " << mean << endl; 00142 cerr << "std = " << sqrt(variance) << endl; 00143 00144 //Scale by the variance. 00145 { 00146 for(unsigned int i=0; i < ims.size(); i++) 00147 transform(ims[i].begin(), ims[i].end(), ims[i].begin(), bind1st(multiplies<double>(), 1/ sqrt(variance))); 00148 } 00149 tie(mean, variance) = mean_and_variance(ims); 00150 00151 cerr << "Rescaled:\n"; 00152 cerr << "mean = " << mean << endl; 00153 cerr << "std = " << sqrt(variance) << endl; 00154 00155 return ims; 00156 } 00157 00158 00159 00160 00161 /**Load all images from disk and do the initial preprocessing. Currently 00162 this is a high pass filter to make the resultimg images zero mean. 00163 00164 The filter is controlled with the \c preprocess.lpf and \c preprocess.skip Gvars 00165 00166 See also load_and_preprocess_image() 00167 00168 @param names List of filenames to load. 00169 @return preprocessed images. 00170 @ingroup gStormImages 00171 **/ 00172 vector<Image<float> > load_and_preprocess_images(const vector<string>& names) 00173 { 00174 vector<Image<float> > ims; 00175 00176 //float wide = GV3::get<float>("preprocess.lpf", 0., -1); 00177 //bool p = GV3::get<bool>("preprocess.skip", 0, -1); 00178 00179 for(unsigned int i=0; i < names.size(); i++) 00180 { 00181 Image<float> im = img_load(names[i]); 00182 00183 ims.push_back(preprocess_image(im)); 00184 00185 if(ims.back().size() != ims[0].size()) 00186 { 00187 cerr << "Error with image " << names[i] << ": all images must be the same size!\n"; 00188 exit(1); 00189 } 00190 } 00191 return ims; 00192 } 00193 00194 /**Compute the mean and variance of the (on average) darkest pixels, in order 00195 to find the correct scaling, by examining hte background. 00196 */ 00197 pair<double, double> auto_fixed_scaling(const vector<Image<float> >& ims, double frac) 00198 { 00199 assert_same_size(ims); 00200 00201 //Compute the mean image (ish) 00202 Image<double> ave(ims[0].size()); 00203 ave.fill(0); 00204 for(unsigned int i=0; i < ims.size(); i++) 00205 for(int y=0; y < ave.size().y; y++) 00206 for(int x=0; x < ave.size().x; x++) 00207 ave[y][x] += ims[i][y][x]; 00208 00209 //Find the smallest N% of the pixels... 00210 vector<pair<double, ImageRef> > pixels; 00211 for(int y=0; y < ave.size().y; y++) 00212 for(int x=0; x < ave.size().x; x++) 00213 pixels.push_back(make_pair(ave[y][x], ImageRef(x,y))); 00214 00215 int npix = (int) floor(frac *pixels.size() + 0.5); 00216 npix = max(0, min(npix, (int) pixels.size())); 00217 00218 nth_element(pixels.begin(), pixels.begin() + npix, pixels.end()); 00219 00220 pixels.resize(npix); 00221 00222 //Now compute the mean and variance of those pixels. 00223 double sum=0, sum2=0; 00224 00225 for(unsigned int i=0; i < ims.size(); i++) 00226 { 00227 for(unsigned int j=0; j < pixels.size(); j++) 00228 { 00229 sum += ims[i][pixels[j].second]; 00230 sum2 += sq(ims[i][pixels[j].second]); 00231 } 00232 } 00233 00234 double num = 1.0 * pixels.size() * ims.size(); 00235 double mean = sum / num; 00236 double std = sqrt(((sum2/num) - mean*mean) * num / (num-1)); 00237 00238 cout << "Automatic determination of fixed scaling:" << endl 00239 << "mean = " << mean << endl 00240 << "std = " << std << endl 00241 << "sqrt(mean) = " << sqrt(mean*255)/255 << endl; 00242 00243 return make_pair(mean, std); 00244 } 00245 00246 /**Wrapper for load_and_preprocess_images() to allow more flexible behaviour. 00247 00248 @param files List of filenames to load. 00249 @return preprocessed images. 00250 @ingroup gStormImages 00251 **/ 00252 vector<Image<float> > load_and_normalize_images(const vector<string>& files) 00253 { 00254 //Load the raw data, and then load the spot parameters. 00255 vector<Image<float> > ims = load_and_preprocess_images(files); 00256 double mean, variance; 00257 tie(mean, variance) = mean_and_variance(ims); 00258 00259 if(GV3::get<bool>("preprocess.fixed_scaling", 0, FATAL_IF_NOT_DEFINED)) 00260 { 00261 bool skip = GV3::get<bool>("preprocess.skip"); 00262 if(!skip) 00263 { 00264 cerr << "WARNING WARNING WARNING WARNING!!!!!!!!!!!!!!!\n"; 00265 cerr << "preprocessing and fixed scaling selected!!!\n"; 00266 exit(1); 00267 } 00268 00269 double sub, div; 00270 if(GV3::get<bool>("preprocess.fixed_scaling.auto", 0, FATAL_IF_NOT_DEFINED)) 00271 { 00272 double frac = GV3::get<double>("preprocess.fixed_scaling.auto.proportion", 0, FATAL_IF_NOT_DEFINED); 00273 tie(sub, div) = auto_fixed_scaling(ims, frac); 00274 } 00275 else 00276 { 00277 sub = GV3::get<double>("preprocess.fixed_scaling.subtract", 0, FATAL_IF_NOT_DEFINED); 00278 div = GV3::get<double>("preprocess.fixed_scaling.divide", 0, FATAL_IF_NOT_DEFINED); 00279 } 00280 00281 for(unsigned int i=0; i < ims.size(); i++) 00282 for(Image<float>::iterator j=ims[i].begin(); j != ims[i].end(); j++) 00283 *j = (*j - sub)/div; 00284 } 00285 else 00286 { 00287 //A sanity check. 00288 cerr << "The mean should be small compared to std:\n"; 00289 cerr << "mean = " << mean << endl; 00290 cerr << "std = " << sqrt(variance) << endl; 00291 00292 //Scale by the variance. 00293 { 00294 for(unsigned int i=0; i < ims.size(); i++) 00295 transform(ims[i].begin(), ims[i].end(), ims[i].begin(), bind1st(multiplies<double>(), 1/ sqrt(variance))); 00296 } 00297 } 00298 00299 tie(mean, variance) = mean_and_variance(ims); 00300 00301 //A sanity check. 00302 cerr << "Rescaled:\n"; 00303 cerr << "mean = " << mean << endl; 00304 cerr << "std = " << sqrt(variance) << endl; 00305 00306 return ims; 00307 } 00308 00309 00310