libeblearn
/home/rex/ebltrunk/core/libeblearn/include/ebl_machines.hpp
00001 /***************************************************************************
00002  *   Copyright (C) 2008 by Yann LeCun and Pierre Sermanet *
00003  *   yann@cs.nyu.edu, pierre.sermanet@gmail.com *
00004  *
00005  * Redistribution and use in source and binary forms, with or without
00006  * modification, are permitted provided that the following conditions are met:
00007  *     * Redistributions of source code must retain the above copyright
00008  *       notice, this list of conditions and the following disclaimer.
00009  *     * Redistributions in binary form must reproduce the above copyright
00010  *       notice, this list of conditions and the following disclaimer in the
00011  *       documentation and/or other materials provided with the distribution.
00012  *     * Redistribution under a license not approved by the Open Source
00013  *       Initiative (http://www.opensource.org) must display the
00014  *       following acknowledgement in all advertising material:
00015  *        This product includes software developed at the Courant
00016  *        Institute of Mathematical Sciences (http://cims.nyu.edu).
00017  *     * The names of the authors may not be used to endorse or promote products
00018  *       derived from this software without specific prior written permission.
00019  *
00020  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
00021  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00022  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00023  * DISCLAIMED. IN NO EVENT SHALL ThE AUTHORS BE LIABLE FOR ANY
00024  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
00025  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00026  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
00027  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00028  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00029  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030  ***************************************************************************/
00031 
00032 namespace ebl {
00033 
00035   // net_cscscf
00036 
00037   // the empty constructor (must call init afterwards)
00038   template <typename T, class Tstate>
00039   net_cscscf<T,Tstate>::net_cscscf()
00040     : layers<T,Tstate>(true) {
00041     // owns modules, responsible for deleting it
00042   }
00043 
00044   template <typename T, class Tstate>
00045   net_cscscf<T,Tstate>::net_cscscf(parameter<T,Tstate> &prm, intg ini, intg inj,
00046                                    intg ki0, intg kj0, idx<intg> &tbl0, 
00047                                    intg si0, intg sj0,
00048                                    intg ki1, intg kj1, idx<intg> &tbl1, 
00049                                    intg si1, intg sj1,
00050                                    intg ki2, intg kj2, idx<intg> &tbl2,
00051                                    intg outthick, bool norm, bool mirror,
00052                                    bool tanh, bool shrink, bool diag)
00053     : layers<T,Tstate>(true) {
00054     // owns modules, responsible for deleting it
00055     init(prm, ini, inj, ki0, kj0, tbl0, si0, sj0, ki1, kj1, tbl1, 
00056          si1, sj1, ki2, kj2, tbl2, outthick, norm, mirror, tanh, shrink, diag);
00057   }
00058   
00059   template <typename T, class Tstate>
00060   net_cscscf<T,Tstate>::~net_cscscf() {}
00061 
00062   template <typename T, class Tstate>
00063   void net_cscscf<T,Tstate>::init(parameter<T,Tstate> &prm, intg ini, intg inj,
00064                                   intg ki0, intg kj0, idx<intg> &tbl0, 
00065                                   intg si0, intg sj0, intg ki1, intg kj1, 
00066                                   idx<intg> &tbl1, intg si1, intg sj1, 
00067                                   intg ki2, intg kj2, idx<intg> &tbl2, 
00068                                   intg outthick, bool norm, bool mirror,
00069                                   bool tanh, bool shrink, bool diag) {
00070     // here we compute the thickness of the feature maps based on the
00071     // convolution tables.
00072     idx<intg> tblmax = tbl0.select(1, 1);
00073     intg thick0 = 1 + idx_max(tblmax);
00074     tblmax = tbl1.select(1, 1);
00075     intg thick1 = 1 + idx_max(tblmax);
00076     tblmax = tbl2.select(1, 1);
00077     intg thick2 = 1 + idx_max(tblmax);
00078     // convolution sizes
00079     idxdim ker0(ki0, kj0);
00080     idxdim ker1(ki1, kj1);
00081     idxdim ker2(ki2, kj2);
00082     idxdim stride(1, 1);
00083     // subsampling sizes
00084     idxdim sker0(si0, sj0);
00085     idxdim sker1(si1, sj1);
00086     
00087     // layers was initialized with true so it owns the modules we give it,
00088     // we can add modules with "new".
00089     // we add convolutions (c), subsamplings (s), and full layers (f)
00090     // to form a c-s-c-s-c-f network. and we add state_idx in between
00091     // which serve as temporary buffer to hold the output of a module
00092     // and feed the input of the following module.
00093     
00094     // convolution
00095     this->add_module(new convolution_module<T,Tstate>
00096                (&prm, ker0, stride, tbl0, "c0"));
00097     // bias
00098     this->add_module(new addc_module<T,Tstate>(&prm, thick0, "c0bias"));
00099     // non linearity
00100     if (shrink)
00101       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick0));
00102     else if (tanh)
00103       this->add_module(new tanh_module<T,Tstate>());
00104     else
00105       this->add_module(new stdsigmoid_module<T,Tstate>());
00106     // feature coefficents
00107     if (diag)
00108       this->add_module(new diag_module<T,Tstate>(&prm, thick0));
00109     // absolute rectification + contrast normalization
00110     if (norm) {
00111       this->add_module(new abs_module<T,Tstate>());
00112       this->add_module(new contrast_norm_module<T,Tstate>
00113                  (ker0, thick0, mirror, true, false, NULL, "w0"));
00114     }
00115     // subsampling
00116     this->add_module(new subsampling_layer<T,Tstate>(&prm, thick0,sker0,sker0, tanh));
00117     // convolution
00118     this->add_module(new convolution_module<T,Tstate>
00119                (&prm, ker1, stride, tbl1));
00120     // bias
00121     this->add_module(new addc_module<T,Tstate>(&prm, thick1));
00122     // non linearity
00123     if (shrink)
00124       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick1));
00125     else if (tanh)
00126       this->add_module(new tanh_module<T,Tstate>());
00127     else
00128       this->add_module(new stdsigmoid_module<T,Tstate>());
00129     // feature coefficents
00130     if (diag)
00131       this->add_module(new diag_module<T,Tstate>(&prm, thick1));
00132     // absolute rectification + contrast normalization
00133     if (norm) {
00134       this->add_module(new abs_module<T,Tstate>());
00135       this->add_module(new contrast_norm_module<T,Tstate>
00136                  (ker1, thick1, mirror, true, false, NULL, "w1"));
00137     }
00138     // subsampling
00139     this->add_module(new subsampling_layer<T,Tstate>(&prm, thick1,sker1,sker1, tanh));
00140     // convolution + bias + sigmoid
00141     this->add_module(new convolution_layer<T,Tstate>
00142                (&prm, ker2, stride, tbl2, tanh));
00143     // full
00144     this->add_module(new full_layer<T,Tstate>(&prm, thick2, outthick, tanh));
00145   }
00146 
00148   // net_cscf
00149 
00150   // the empty constructor (must call init afterwards)
00151   template <typename T, class Tstate>
00152   net_cscf<T,Tstate>::net_cscf()
00153     : layers<T,Tstate>(true) {
00154     // owns modules, responsible for deleting it
00155   }
00156 
00157   template <typename T, class Tstate>
00158   net_cscf<T,Tstate>::net_cscf(parameter<T,Tstate> &prm, intg ini, intg inj,
00159                                intg ki0, intg kj0, idx<intg> &tbl0, 
00160                                intg si0, intg sj0,
00161                                intg ki1, intg kj1, idx<intg> &tbl1, 
00162                                intg outthick, bool norm, bool mirror,
00163                                bool tanh, bool shrink, bool diag, bool lut_features,
00164                                idx<T> *lut)
00165     : layers<T,Tstate>(true) {
00166     // owns modules, responsible for deleting it
00167     init(prm, ini, inj, ki0, kj0, tbl0, si0, sj0, ki1, kj1, tbl1, 
00168          outthick, norm, mirror, tanh, shrink, diag, lut_features, lut);
00169   }
00170   
00171   template <typename T, class Tstate>
00172   net_cscf<T,Tstate>::~net_cscf() {}
00173 
00174   template <typename T, class Tstate>
00175   void net_cscf<T,Tstate>::init(parameter<T,Tstate> &prm, intg ini, intg inj,
00176                                 intg ki0, intg kj0, idx<intg> &tbl0, 
00177                                 intg si0, intg sj0, intg ki1, intg kj1, 
00178                                 idx<intg> &tbl1, intg outthick, bool norm,
00179                                 bool mirror, bool tanh, bool shrink, bool diag,
00180                                 bool lut_features, idx<T> *lut) {
00181     // here we compute the thickness of the feature maps based on the
00182     // convolution tables.
00183     idx<intg> tblmax = tbl0.select(1, 1);
00184     intg thick0 = 1 + idx_max(tblmax);
00185     tblmax = tbl1.select(1, 1);
00186     intg thick1 = 1 + idx_max(tblmax);
00187     // convolution sizes
00188     idxdim ker0(ki0, kj0);
00189     idxdim ker1(ki1, kj1);
00190     idxdim stride(1, 1);
00191     // subsampling sizes
00192     idxdim sker0(si0, sj0);
00193 
00194     // layers was initialized with true so it owns the modules we give it,
00195     // we can add modules with "new".
00196     // we add convolutions (c), subsamplings (s), and full layers (f)
00197     // to form a c-s-c-s-c-f network. and we add state_idx in between
00198     // which serve as temporary buffer to hold the output of a module
00199     // and feed the input of the following module.
00200     
00201     // convolution
00202     this->add_module(new convolution_module<T,Tstate>(&prm, ker0, stride,
00203                                                            tbl0, "c0"));
00204     // bias
00205     this->add_module(new addc_module<T,Tstate>(&prm, thick0, "c0bias"));
00206     // non linearity
00207     if (shrink)
00208       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick0));
00209     else if (tanh)
00210       this->add_module(new tanh_module<T,Tstate>());
00211     else
00212       this->add_module(new stdsigmoid_module<T,Tstate>());
00213     // feature coefficents
00214     if (diag)
00215       this->add_module(new diag_module<T,Tstate>(&prm, thick0));
00216     // absolute rectification + contrast normalization
00217     if (norm) {
00218       this->add_module(new abs_module<T,Tstate>());
00219       this->add_module(new contrast_norm_module<T,Tstate>(ker0, thick0, mirror, NULL,
00220                                                     "w0"));
00221     }
00222     // subsampling
00223     this->add_module(new subsampling_layer<T,Tstate>(&prm,thick0,sker0,sker0,tanh,"s0"));
00224     // convolution
00225     this->add_module(new convolution_module<T,Tstate>(&prm,ker1,stride,
00226                                                            tbl1,"c1"));
00227     // bias
00228     this->add_module(new addc_module<T,Tstate>(&prm, thick1, "c1bias"));
00229     // non linearity
00230     if (shrink)
00231       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick1));
00232     else if (tanh)
00233       this->add_module(new tanh_module<T,Tstate>());
00234     else
00235       this->add_module(new stdsigmoid_module<T,Tstate>());
00236     // feature coefficents
00237     if (diag)
00238       this->add_module(new diag_module<T,Tstate>(&prm, thick1));
00239     // absolute rectification + contrast normalization
00240     if (norm) {
00241       this->add_module(new abs_module<T,Tstate>());
00242       this->add_module(new contrast_norm_module<T,Tstate>(ker1, thick1, mirror, NULL,
00243                                                     "w1"));
00244     }
00245     if (lut_features)
00246       this->add_module(new range_lut_module<T,Tstate>(lut));
00247     // full
00248     this->add_module(new full_layer<T,Tstate>(&prm, thick1, outthick, tanh, "f2"));
00249 
00250     // // convolution
00251     // if (norm) // absolute rectification + contrast normalization
00252     //   this->add_module(new convabsnorm_layer<T,Tstate>(&prm, ker0,stride,tbl0,mirror,tanh));
00253     // else // old fashioned way
00254     //   this->add_module(new convolution_layer<T,Tstate>(&prm, ker0, stride, tbl0, tanh));
00255     // // subsampling
00256     // this->add_module(new subsampling_layer<T,Tstate>(&prm,thick0,sker0, sker0, tanh));,
00257     // // convolution
00258     // if (norm) // absolute rectification + contrast normalization
00259     //   this->add_module(new convabsnorm_layer<T,Tstate>(&prm, ker1,stride,tbl1,mirror,tanh));
00260     // else // old fashioned way
00261     //   this->add_module(new convolution_layer<T,Tstate>(&prm, ker1, stride, tbl1, tanh));
00262     // // full
00263     // add_last_module(new full_layer<T,Tstate>(&prm, thick1, outthick, tanh));
00264   }
00265 
00267   // net_cscc
00268 
00269   // the empty constructor (must call init afterwards)
00270   template <typename T, class Tstate>
00271   net_cscc<T,Tstate>::net_cscc()
00272     : layers<T,Tstate>(true) {
00273     // owns modules, responsible for deleting it
00274   }
00275 
00276   template <typename T, class Tstate>
00277   net_cscc<T,Tstate>::net_cscc(parameter<T,Tstate> &prm, intg ini, intg inj,
00278                                intg ki0, intg kj0, idx<intg> &tbl0, 
00279                                intg si0, intg sj0,
00280                                intg ki1, intg kj1, idx<intg> &tbl1,
00281                                idx<intg> &tbl2,
00282                                intg outthick, bool norm, bool mirror,
00283                                bool tanh, bool shrink, bool diag)
00284     : layers<T,Tstate>(true) {
00285     // owns modules, responsible for deleting it
00286     init(prm, ini, inj, ki0, kj0, tbl0, si0, sj0, ki1, kj1, tbl1, 
00287          tbl2, outthick, norm, mirror, tanh, shrink, diag);
00288   }
00289   
00290   template <typename T, class Tstate>
00291   net_cscc<T,Tstate>::~net_cscc() {}
00292 
00293   template <typename T, class Tstate>
00294   void net_cscc<T,Tstate>::init(parameter<T,Tstate> &prm, intg ini, intg inj,
00295                                 intg ki0, intg kj0, idx<intg> &tbl0, 
00296                                 intg si0, intg sj0, intg ki1, intg kj1, 
00297                                 idx<intg> &tbl1, idx<intg> &tbl2,
00298                                 intg outthick, bool norm,
00299                                 bool mirror, bool tanh, bool shrink, bool diag) {
00300     // here we compute the thickness of the feature maps based on the
00301     // convolution tables.
00302     idx<intg> tblmax = tbl0.select(1, 1);
00303     intg thick0 = 1 + idx_max(tblmax);
00304     tblmax = tbl1.select(1, 1);
00305     intg thick1 = 1 + idx_max(tblmax);
00306     // WARNING: those two numbers must be changed
00307     // when image-height/image-width change
00308     // TODO: add assertion test here?
00309     intg ki2 = (((ini - ki0 + 1) / si0) - ki1 + 1);
00310     intg kj2 = (((inj  - kj0 + 1) / sj0) - kj1 + 1);
00311 
00312     // convolution sizes
00313     idxdim ker0(ki0, kj0);
00314     idxdim ker1(ki1, kj1);
00315     idxdim ker2(ki2, kj2);
00316     idxdim stride(1, 1);
00317     // subsampling sizes
00318     idxdim sker0(si0, sj0);
00319 
00320     // convolution
00321     this->add_module(new convolution_module<T,Tstate>(&prm,ker0,stride,
00322                                                            tbl0,"c0"));
00323     // bias
00324     this->add_module(new addc_module<T,Tstate>(&prm, thick0, "c0bias"));
00325     // non linearity
00326     if (shrink)
00327       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick0));
00328     else if (tanh)
00329       this->add_module(new tanh_module<T,Tstate>());
00330     else
00331       this->add_module(new stdsigmoid_module<T,Tstate>());
00332     // feature coefficents
00333     if (diag)
00334       this->add_module(new diag_module<T,Tstate>(&prm, thick0));
00335     // absolute rectification + contrast normalization
00336     if (norm) {
00337       this->add_module(new abs_module<T,Tstate>());
00338       this->add_module(new contrast_norm_module<T,Tstate>(ker0, thick0, mirror,
00339                                                     NULL, "w0"));
00340     }
00341     // subsampling
00342     this->add_module(new subsampling_layer<T,Tstate>(&prm, thick0, sker0, sker0, tanh, "s0"));
00343     // convolution
00344     this->add_module(new convolution_module<T,Tstate>(&prm,ker1,stride,
00345                                                     tbl1,"c1"));
00346     // bias
00347     this->add_module(new addc_module<T,Tstate>(&prm, thick1, "c1bias"));
00348     // non linearity
00349     if (shrink)
00350       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick1));
00351     else if (tanh)
00352       this->add_module(new tanh_module<T,Tstate>());
00353     else
00354       this->add_module(new stdsigmoid_module<T,Tstate>());
00355     // feature coefficents
00356     if (diag)
00357       this->add_module(new diag_module<T,Tstate>(&prm, thick1));
00358     // absolute rectification + contrast normalization
00359     if (norm) {
00360       this->add_module(new abs_module<T,Tstate>());
00361       this->add_module(new contrast_norm_module<T,Tstate>(ker1, thick1, mirror,
00362                                                     NULL, "w1"));
00363     }
00364     // convolution + bias + sigmoid
00365     this->add_module(new convolution_layer<T,Tstate>(&prm, ker2, stride, tbl2, tanh,"c2"));
00366   }
00367 
00369   // net_cscsc
00370 
00371   // the empty constructor (must call init afterwards)
00372   template <typename T, class Tstate>
00373   net_cscsc<T,Tstate>::net_cscsc()
00374     : layers<T,Tstate>(true) { // owns modules, responsible for deleting it
00375   }
00376 
00377   template <typename T, class Tstate>
00378   net_cscsc<T,Tstate>::net_cscsc(parameter<T,Tstate> &prm, intg ini, intg inj,
00379                                  intg ki0, intg kj0, idx<intg> &tbl0, 
00380                                  intg si0, intg sj0,
00381                                  intg ki1, intg kj1, idx<intg> &tbl1, 
00382                                  intg si1, intg sj1,
00383                                  intg ki2, intg kj2, idx<intg> &tbl2,
00384                                  bool norm, bool mirror, bool tanh,
00385                                  bool shrink, bool diag, bool norm_pos)
00386     : layers<T,Tstate>(true) { // owns modules, responsible for deleting it
00387     init(prm, ini, inj, ki0, kj0, tbl0, si0, sj0, ki1, kj1, tbl1, 
00388          si1, sj1, ki2, kj2, tbl2, norm, mirror, tanh, shrink, diag, norm_pos);
00389   }
00390   
00391   template <typename T, class Tstate>
00392   net_cscsc<T,Tstate>::~net_cscsc() {}
00393 
00394   template <typename T, class Tstate>
00395   void net_cscsc<T,Tstate>::init(parameter<T,Tstate> &prm, intg ini, intg inj,
00396                                  intg ki0, intg kj0, idx<intg> &tbl0, 
00397                                  intg si0, intg sj0, intg ki1, intg kj1, 
00398                                  idx<intg> &tbl1, intg si1, intg sj1, 
00399                                  intg ki2, intg kj2, idx<intg> &tbl2,
00400                                  bool norm, bool mirror, bool tanh,
00401                                  bool shrink, bool diag, bool norm_pos) {
00402     // here we compute the thickness of the feature maps based on the
00403     // convolution tables.
00404     idx<intg> tblmax = tbl0.select(1, 1);
00405     intg thick0 = 1 + idx_max(tblmax);
00406     tblmax = tbl1.select(1, 1);
00407     intg thick1 = 1 + idx_max(tblmax);
00408 
00409     // convolution sizes
00410     idxdim ker0(ki0, kj0);
00411     idxdim ker1(ki1, kj1);
00412     idxdim ker2(ki2, kj2);
00413     idxdim stride(1, 1);
00414     // subsampling sizes
00415     idxdim sker0(si0, sj0);
00416     idxdim sker1(si1, sj1);
00417 
00418     // layers was initialized with true so it owns the modules we give it,
00419     // we can add modules with "new".
00420     // we add convolutions (c), subsamplings (s), and full layers (f)
00421     // to form a c-s-c-s-c-f network. and we add state_idx in between
00422     // which serve as temporary buffer to hold the output of a module
00423     // and feed the input of the following module.
00424 
00425     if (norm)
00426       cout << "Using contrast normalization " << (norm_pos?"after":"before")
00427            << " subsampling. " << endl;
00428     // convolution
00429     this->add_module(new convolution_module<T,Tstate>
00430                (&prm, ker0, stride, tbl0, "c0"));
00431     // bias
00432     this->add_module(new addc_module<T,Tstate>(&prm, thick0, "c0bias"));
00433     // non linearity
00434     if (shrink)
00435       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick0));
00436     else if (tanh)
00437       this->add_module(new tanh_module<T,Tstate>());
00438     else
00439       this->add_module(new stdsigmoid_module<T,Tstate>());
00440     // feature coefficents
00441     if (diag)
00442       this->add_module(new diag_module<T,Tstate>(&prm, thick0));
00443     // absolute rectification
00444     if (norm)
00445       this->add_module(new abs_module<T,Tstate>());
00446     // contrast normalization (positioned before sub)
00447     if (norm && !norm_pos)
00448       this->add_module(new contrast_norm_module<T,Tstate>
00449                  (ker0, thick0, mirror, true, false, NULL, "w0"));
00450     // subsampling
00451     this->add_module(new subsampling_layer<T,Tstate>(&prm, thick0, sker0, sker0, tanh, "s0"));
00452     // contrast normalization (positioned after sub)
00453     if (norm && norm_pos)
00454       this->add_module(new contrast_norm_module<T,Tstate>
00455                  (ker1, thick0, mirror, true, false, NULL, "w0"));
00456     // convolution
00457     this->add_module(new convolution_module<T,Tstate>
00458                (&prm, ker1, stride,tbl1, "c1"));
00459     // bias
00460     this->add_module(new addc_module<T,Tstate>(&prm, thick1, "c1bias"));
00461     // non linearity
00462     if (shrink)
00463       this->add_module(new smooth_shrink_module<T,Tstate>(&prm, thick1));
00464     else if (tanh)
00465       this->add_module(new tanh_module<T,Tstate>());
00466     else
00467       this->add_module(new stdsigmoid_module<T,Tstate>());
00468     // feature coefficents
00469     if (diag)
00470       this->add_module(new diag_module<T,Tstate>(&prm, thick1));
00471     // absolute rectification
00472     if (norm)
00473       this->add_module(new abs_module<T,Tstate>());
00474     // contrast normalization (position before sub)
00475     if (norm && !norm_pos)
00476       this->add_module(new contrast_norm_module<T,Tstate>
00477                  (ker1, thick1, mirror, true, false, NULL, "w1"));
00478     // subsampling
00479     this->add_module(new subsampling_layer<T,Tstate>(&prm, thick1, sker1, sker1, tanh, "s1"));
00480     // contrast normalization (position after sub)
00481     if (norm && norm_pos)
00482       this->add_module(new contrast_norm_module<T,Tstate>
00483                  (ker2, thick1, mirror, true, false, NULL, "w1"));
00484     // convolution + bias + sigmoid
00485     this->add_module(new convolution_layer<T,Tstate>
00486                (&prm, ker2, stride, tbl2, tanh,"c2"));
00487   }
00488 
00490   // lenet_cscsc
00491 
00492   template <typename T, class Tstate>
00493   lenet_cscsc<T,Tstate>::
00494   lenet_cscsc(parameter<T,Tstate> &prm, intg image_height, intg image_width,
00495               intg ki0, intg kj0, intg si0, intg sj0, intg ki1,
00496               intg kj1, intg si1, intg sj1,
00497               intg output_size, bool norm, bool color, bool mirror, bool tanh,
00498               bool shrink, bool diag, bool norm_pos, idx<intg> *table0_, idx<intg> *table1_,
00499               idx<intg> *table2_)
00500     : net_cscsc<T,Tstate>() {
00501     idx<intg> table0, table1, table2;
00502     if (!color) { // use smaller tables
00503       table0 = full_table(1, 6);
00504       table1 = idx<intg>(60, 2);
00505       memcpy(table1.idx_ptr(), connection_table_6_16,
00506              table1.nelements() * sizeof (intg));
00507     } else { // for color (assuming 3-layer input), use bigger tables
00508       table0 = idx<intg>(14, 2);
00509       intg tbl0[14][2] = {{0, 0},  {0, 1},  {0, 2}, {0, 3},  {1, 4},  {2, 4},
00510                           {1, 5},  {2, 5},  {0, 6}, {1, 6},  {2, 6},  {0, 7},
00511                           {1, 7}, {2, 7}};
00512       memcpy(table0.idx_ptr(), tbl0, table0.nelements() * sizeof (intg));
00513       table1 = idx<intg>(96, 2);
00514       memcpy(table1.idx_ptr(), connection_table_8_24,
00515              table1.nelements() * sizeof (intg));
00516     }
00517     // overide default tables if defined
00518     if (table0_)
00519       table0 = *table0_;
00520     if (table1_)
00521       table1 = *table1_;
00522     if (table2_)
00523       table2 = *table2_;
00524     else {
00525       // determine max of previous table
00526       idx<intg> outtable = table1.select(1, 1);
00527       table2 = full_table(idx_max(outtable) + 1, output_size);
00528       cout << "Using a full table for table 2: " << idx_max(outtable) + 1
00529            <<  " -> " << output_size << endl;
00530     }
00531       
00532     // WARNING: those two numbers must be changed
00533     // when image-height/image-width change
00534     // TODO: add assertion test here?
00535     intg ki2 = (((image_height - ki0 + 1) / si0) - ki1 + 1) / si1;
00536     intg kj2 = (((image_width  - kj0 + 1) / sj0) - kj1 + 1) / sj1;
00537     
00538     this->init(prm, image_height, image_width, ki0, kj0, table0, si0, sj0,
00539                ki1, kj1, table1, si1, sj1, ki2, kj2, table2, norm, mirror,
00540                tanh, shrink, diag, norm_pos);
00541   }
00542   
00544   // lenet
00545 
00546   template <typename T, class Tstate>
00547   lenet<T,Tstate>::lenet(parameter<T,Tstate> &prm, intg image_height,
00548                          intg image_width, intg ki0, intg kj0, intg si0,
00549                          intg sj0, intg ki1, intg kj1, intg si1, intg sj1,
00550                          intg hid, intg output_size, bool norm, bool color,
00551                          bool mirror, bool tanh, bool shrink, bool diag,
00552                          idx<intg> *table0_, idx<intg> *table1_,
00553                          idx<intg> *table2_)
00554     : net_cscscf<T,Tstate>() {
00555     idx<intg> table0, table1, table2;
00556     if (!color) { // use smaller tables
00557       table0 = full_table(1, 6);
00558       table1 = idx<intg>(60, 2);
00559       memcpy(table1.idx_ptr(), connection_table_6_16,
00560              table1.nelements() * sizeof (intg));
00561     } else { // for color (assuming 3-layer input), use bigger tables
00562       table0 = idx<intg>(14, 2);
00563       intg tbl0[14][2] = {{0, 0},  {0, 1},  {0, 2}, {0, 3},  {1, 4},  {2, 4},
00564                           {1, 5},  {2, 5},  {0, 6}, {1, 6},  {2, 6},  {0, 7},
00565                           {1, 7}, {2, 7}};
00566       memcpy(table0.idx_ptr(), tbl0, table0.nelements() * sizeof (intg));
00567       table1 = idx<intg>(96, 2);
00568       memcpy(table1.idx_ptr(), connection_table_8_24,
00569              table1.nelements() * sizeof (intg));
00570     }
00571     // overide default tables if defined
00572     if (table0_)
00573       table0 = *table0_;
00574     if (table1_)
00575       table1 = *table1_;
00576     // allocate a full table for table2 by default
00577     idx<intg> tb1 = table1.select(1, 1);
00578     intg max1 = idx_max(tb1) + 1;
00579     table2 = full_table(max1, hid);
00580     // override table2 if defined
00581     if (table2_)
00582       table2 = *table2_;
00583     
00584     // WARNING: those two numbers must be changed
00585     // when image-height/image-width change
00586     // TODO: add assertion test here?
00587     intg ki2 = (((image_height - ki0 + 1) / si0) - ki1 + 1) / si1;
00588     intg kj2 = (((image_width  - kj0 + 1) / sj0) - kj1 + 1) / sj1;
00589     
00590     this->init(prm, image_height, image_width, ki0, kj0, table0, si0, sj0,
00591                ki1, kj1, table1, si1, sj1, ki2, kj2, table2, output_size,
00592                norm, mirror, tanh, shrink, diag);
00593   }
00594   
00596   // lenet
00597 
00598   template <typename T, class Tstate>
00599   lenet_cscf<T,Tstate>::lenet_cscf(parameter<T,Tstate> &prm,
00600                                    intg image_height, intg image_width,
00601                                    intg ki0, intg kj0, intg si0, intg sj0,
00602                                    intg ki1, intg kj1, intg output_size,
00603                                    bool norm, bool color, bool mirror,
00604                                    bool tanh, bool shrink, bool diag, idx<intg> *table0_,
00605                                    idx<intg> *table1_)
00606     : net_cscf<T,Tstate>() {
00607     idx<intg> table0, table1;
00608     if (!color) { // use smaller tables
00609       table0 = full_table(1, 6);
00610       table1 = idx<intg>(60, 2);
00611       memcpy(table1.idx_ptr(), connection_table_6_16,
00612              table1.nelements() * sizeof (intg));
00613     } else { // for color (assuming 3-layer input), use bigger tables
00614       table0 = idx<intg>(14, 2);
00615       intg tbl0[14][2] = {{0, 0},  {0, 1},  {0, 2}, {0, 3},  {1, 4},  {2, 4},
00616                           {1, 5},  {2, 5},  {0, 6}, {1, 6},  {2, 6},  {0, 7},
00617                           {1, 7}, {2, 7}};
00618       memcpy(table0.idx_ptr(), tbl0, table0.nelements() * sizeof (intg));
00619       table1 = idx<intg>(96, 2);
00620       memcpy(table1.idx_ptr(), connection_table_8_24,
00621              table1.nelements() * sizeof (intg));
00622     }
00623     if (table0_)
00624       table0 = *table0_;
00625     if (table1_)
00626       table1 = *table1_;
00627     
00628     this->init(prm, image_height, image_width, ki0, kj0, table0, si0, sj0,
00629                ki1, kj1, table1, output_size, norm, mirror, tanh, shrink, diag);
00630   }
00631   
00633   // lenet5
00634 
00635   template <typename T, class Tstate>
00636   lenet5<T,Tstate>::lenet5(parameter<T,Tstate> &prm, intg image_height,
00637                            intg image_width,
00638                            intg ki0, intg kj0, intg si0, intg sj0,
00639                            intg ki1, intg kj1, intg si1, intg sj1,
00640                            intg hid, intg output_size, bool norm, bool mirror,
00641                            bool tanh, bool shrink, bool diag) {
00642     idx<intg> table0 = full_table(1, 6);
00643     // TODO: add idx constructor taking pointer to data with dimensions
00644     // and copies it.
00645     // or if possible operator= taking the array in brackets?
00646     idx<intg> table1(60, 2);
00647     memcpy(table1.idx_ptr(), connection_table_6_16,
00648            table1.nelements() * sizeof (intg));
00649     idx<intg> table2 = full_table(16, hid);
00650 
00651     // WARNING: those two numbers must be changed
00652     // when image-height/image-width change
00653     // TODO: add assertion test here?
00654     intg ki2 = (((image_height - ki0 + 1) / si0) - ki1 + 1) / si1;
00655     intg kj2 = (((image_width  - kj0 + 1) / sj0) - kj1 + 1) / sj1;
00656 
00657     this->init(prm, image_height, image_width, ki0, kj0, table0, si0, sj0,
00658                ki1, kj1, table1, si1, sj1, ki2, kj2, table2, output_size,
00659                norm, mirror, tanh, shrink, diag);
00660   }
00661 
00663   // lenet7
00664 
00665   template <typename T, class Tstate>
00666   lenet7<T,Tstate>::lenet7(parameter<T,Tstate> &prm, intg image_height,
00667                            intg image_width, intg output_size, bool norm,
00668                            bool mirror, bool tanh, bool shrink, bool diag) {
00669     intg ki0 = 5, kj0 = 5;
00670     intg si0 = 4, sj0 = 4;
00671     intg ki1 = 6, kj1 = 6;
00672     intg si1 = 3, sj1 = 3;
00673     intg hid = 100;
00674 
00675     idx<intg> table0 = full_table(1, 8);
00676     idx<intg> table1(96, 2);
00677     memcpy(table1.idx_ptr(), connection_table_8_24,
00678            table1.nelements() * sizeof (intg));
00679     idx<intg> table2 = full_table(24, hid);
00680 
00681     // WARNING: those two numbers must be changed
00682     // when image-height/image-width change
00683     // TODO: add assertion test here?
00684     intg ki2 = (((image_height - ki0 + 1) / si0) - ki1 + 1) / si1;
00685     intg kj2 = (((image_width  - kj0 + 1) / sj0) - kj1 + 1) / sj1;
00686 
00687     this->init(prm, image_height, image_width, ki0, kj0, table0, si0, sj0,
00688                ki1, kj1, table1, si1, sj1, ki2, kj2, table2, output_size,
00689                norm, mirror, tanh, shrink, diag);
00690   }
00691 
00693   // lenet7_binocular
00694 
00695   template <typename T, class Tstate>
00696   lenet7_binocular<T,Tstate>::
00697   lenet7_binocular(parameter<T,Tstate> &prm, intg image_height,
00698                    intg image_width, intg output_size, bool norm, bool mirror,
00699                    bool tanh, bool shrink, bool diag) {
00700     intg ki0 = 5, kj0 = 5;
00701     intg si0 = 4, sj0 = 4;
00702     intg ki1 = 6, kj1 = 6;
00703     intg si1 = 3, sj1 = 3;
00704     intg hid = 100;
00705 
00706     idx<intg> table0(12, 2);
00707     intg tbl0[12][2] =
00708       {{0, 0},  {0, 1},  {1, 2},
00709        {1, 3},  {0, 4},  {1, 4},
00710        {0, 5},  {1, 5},  {0, 6},
00711        {1, 6},  {0, 7},  {1, 7}};
00712     memcpy(table0.idx_ptr(), tbl0, table0.nelements() * sizeof (intg));
00713     idx<intg> table1(96, 2);
00714     memcpy(table1.idx_ptr(), connection_table_8_24,
00715            table1.nelements() * sizeof (intg));
00716     idx<intg> table2 = full_table(24, hid);
00717 
00718     // WARNING: those two numbers must be changed
00719     // when image-height/image-width change
00720     // TODO: add assertion test here?
00721     intg ki2 = (((image_height - ki0 + 1) / si0) - ki1 + 1) / si1;
00722     intg kj2 = (((image_width  - kj0 + 1) / sj0) - kj1 + 1) / sj1;
00723 
00724     this->init(prm, image_height, image_width, ki0, kj0, table0, si0, sj0,
00725                ki1, kj1, table1, si1, sj1, ki2, kj2, table2, output_size,
00726                norm, mirror, tanh, shrink, diag);
00727   }
00728 
00730   // supervised euclidean machine
00731 
00732   template <typename Tdata, class Tlabel, class Tstate>
00733   supervised_euclidean_machine<Tdata,Tlabel,Tstate>::
00734   supervised_euclidean_machine(module_1_1<Tdata,Tstate> &m,
00735                                idx<Tdata> &t,idxdim &dims)
00736     : fc_ebm2<Tdata,Tstate,bbstate_idx<Tlabel>,Tstate>
00737       (m, fout, (ebm_2<Tstate,bbstate_idx<Tlabel>,Tstate>&)fcost),
00738       fcost(t), fout(dims) {
00739   }
00740 
00741   template <typename Tdata, class Tlabel, class Tstate>
00742   supervised_euclidean_machine<Tdata,Tlabel,Tstate>::
00743   ~supervised_euclidean_machine() {
00744   }
00745 
00747 
00748 } // end namespace ebl