pktools  2.6.4
Processing Kernel for geospatial data
pkoptsvm.cc
1 /**********************************************************************
2 pkoptsvm.cc: program to optimize parameters for support vector machine classifier pksvm
3 Copyright (C) 2008-2014 Pieter Kempeneers
4 
5 This file is part of pktools
6 
7 pktools is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11 
12 pktools is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with pktools. If not, see <http://www.gnu.org/licenses/>.
19 ***********************************************************************/
20 #include <iostream>
21 #include <sstream>
22 #include <fstream>
23 #include <vector>
24 #include <math.h>
25 #include <nlopt.hpp>
26 #include "base/Optionpk.h"
27 #include "base/Optionpk.h"
28 #include "algorithms/ConfusionMatrix.h"
29 #include "algorithms/FeatureSelector.h"
30 #include "algorithms/OptFactory.h"
31 #include "algorithms/CostFactorySVM.h"
32 #include "algorithms/svm.h"
33 #include "imageclasses/ImgReaderOgr.h"
34 
35 #ifdef HAVE_CONFIG_H
36 #include <config.h>
37 #endif
38 
39 /******************************************************************************/
104 using namespace std;
105 
106 #define Malloc(type,n) (type *)malloc((n)*sizeof(type))
107  //declare objective function
108 double objFunction(const std::vector<double> &x, std::vector<double> &grad, void *my_func_data);
109 
110 //global parameters used in objective function
111 map<string,short> classValueMap;
112 vector<std::string> nameVector;
113 vector<unsigned int> nctraining;
114 vector<unsigned int> nctest;
115 Optionpk<std::string> svm_type_opt("svmt", "svmtype", "type of SVM (C_SVC, nu_SVC,one_class, epsilon_SVR, nu_SVR)","C_SVC");
116 Optionpk<std::string> kernel_type_opt("kt", "kerneltype", "type of kernel function (linear,polynomial,radial,sigmoid) ","radial");
117 Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel function",3);
118 Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
119 Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu-SVC, one-class SVM, and nu-SVR",0.5);
120 Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss function of epsilon-SVR",0.1);
121 Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
122 Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of termination criterion",0.001);
123 Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the shrinking heuristics",false);
124 Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR model for probability estimates",true,2);
125 Optionpk<bool> costfunction_opt("cf", "cf", "use Overall Accuracy instead of kappa",false);
126 // Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to weight*C, for C-SVC",true);
127 Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
128 Optionpk<string> classname_opt("c", "class", "list of class names.");
129 Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in class opt).");
130 Optionpk<short> verbose_opt("v", "verbose", "use 1 to output intermediate results for plotting",0,2);
131 
132 double objFunction(const std::vector<double> &x, std::vector<double> &grad, void *my_func_data){
133 
134  assert(grad.empty());
135  vector<Vector2d<float> > *tf=reinterpret_cast<vector<Vector2d<float> >*> (my_func_data);
136  float ccost=x[0];
137  float gamma=x[1];
138  double error=1.0/epsilon_tol_opt[0];
139  double kappa=1.0;
140  double oa=1.0;
141 
142  CostFactorySVM costfactory(svm_type_opt[0], kernel_type_opt[0], kernel_degree_opt[0], gamma, coef0_opt[0], ccost, nu_opt[0], epsilon_loss_opt[0], cache_opt[0], epsilon_tol_opt[0], shrinking_opt[0], prob_est_opt[0], cv_opt[0], verbose_opt[0]);
143 
144  assert(tf->size());
145  // if(nctest>0)
146  // costfactory.setCv(0);
147 
148  costfactory.setCv(cv_opt[0]);
149 
150  if(classname_opt.size()){
151  assert(classname_opt.size()==classvalue_opt.size());
152  for(int iclass=0;iclass<classname_opt.size();++iclass)
153  costfactory.setClassValueMap(classname_opt[iclass],classvalue_opt[iclass]);
154  }
155  //set names in confusion matrix using nameVector
156  costfactory.setNameVector(nameVector);
157  // vector<string> nameVector=costfactory.getNameVector();
158  for(int iname=0;iname<nameVector.size();++iname){
159  if(costfactory.getClassValueMap().empty()){
160  costfactory.pushBackClassName(nameVector[iname]);
161  // cm.pushBackClassName(nameVector[iname]);
162  }
163  else if(costfactory.getClassIndex(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]))<0)
164  costfactory.pushBackClassName(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]));
165  }
166 
167  costfactory.setNcTraining(nctraining);
168  costfactory.setNcTest(nctest);
169 
170  kappa=costfactory.getCost(*tf);
171  return(kappa);
172 }
173 
174 int main(int argc, char *argv[])
175 {
176  map<short,int> reclassMap;
177  vector<int> vreclass;
178  Optionpk<string> training_opt("t", "training", "training vector file. A single vector file contains all training features (must be set as: b0, b1, b2,...) for all classes (class numbers identified by label option).");
179  Optionpk<float> ccost_opt("cc", "ccost", "min and max boundaries the parameter C of C-SVC, epsilon-SVR, and nu-SVR (optional: initial value)",1);
180  Optionpk<float> gamma_opt("g", "gamma", "min max boundaries for gamma in kernel function (optional: initial value)",0);
181  Optionpk<double> stepcc_opt("stepcc","stepcc","multiplicative step for ccost in GRID search",2);
182  Optionpk<double> stepg_opt("stepg","stepg","multiplicative step for gamma in GRID search",2);
183  Optionpk<string> input_opt("i", "input", "input test vector file");
184  Optionpk<string> tlayer_opt("tln", "tln", "training layer name(s)");
185  Optionpk<string> label_opt("label", "label", "identifier for class label in training vector file.","label");
186  // Optionpk<unsigned short> reclass_opt("\0", "rc", "reclass code (e.g. --rc=12 --rc=23 to reclass first two classes to 12 and 23 resp.).", 0);
187  Optionpk<unsigned int> balance_opt("bal", "balance", "balance the input data to this number of samples for each class", 0);
188  Optionpk<bool> random_opt("random","random", "in case of balance, randomize input data", true);
189  Optionpk<int> minSize_opt("min", "min", "if number of training pixels is less then min, do not take this class into account", 0);
190  Optionpk<unsigned short> band_opt("b", "band", "band index (starting from 0, either use band option or use start to end)");
191  Optionpk<unsigned short> bstart_opt("sband", "startband", "Start band sequence number");
192  Optionpk<unsigned short> bend_opt("eband", "endband", "End band sequence number");
193  Optionpk<double> offset_opt("offset", "offset", "offset value for each spectral band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
194  Optionpk<double> scale_opt("scale", "scale", "scale value for each spectral band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale min and max in each band to -1.0 and 1.0)", 0.0);
195  Optionpk<unsigned int> maxit_opt("maxit","maxit","maximum number of iterations",500);
196 //Optionpk<string> algorithm_opt("a", "algorithm", "GRID, or any optimization algorithm from http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms","GRID");
197  Optionpk<double> tolerance_opt("tol","tolerance","relative tolerance for stopping criterion",0.0001);
198 
199  input_opt.setHide(1);
200  tlayer_opt.setHide(1);
201  label_opt.setHide(1);
202  balance_opt.setHide(1);
203  random_opt.setHide(1);
204  minSize_opt.setHide(1);
205  band_opt.setHide(1);
206  bstart_opt.setHide(1);
207  bend_opt.setHide(1);
208  offset_opt.setHide(1);
209  scale_opt.setHide(1);
210  svm_type_opt.setHide(1);
211  kernel_type_opt.setHide(1);
212  kernel_degree_opt.setHide(1);
213  coef0_opt.setHide(1);
214  nu_opt.setHide(1);
215  epsilon_loss_opt.setHide(1);
216  cache_opt.setHide(1);
217  epsilon_tol_opt.setHide(1);
218  shrinking_opt.setHide(1);
219  prob_est_opt.setHide(1);
220  cv_opt.setHide(1);
221  costfunction_opt.setHide(1);
222  maxit_opt.setHide(1);
223  tolerance_opt.setHide(1);
224 // algorithm_opt.setHide(1);
225  classname_opt.setHide(1);
226  classvalue_opt.setHide(1);
227 
228  bool doProcess;//stop process when program was invoked with help option (-h --help)
229  try{
230  doProcess=training_opt.retrieveOption(argc,argv);
231  ccost_opt.retrieveOption(argc,argv);
232  gamma_opt.retrieveOption(argc,argv);
233  stepcc_opt.retrieveOption(argc,argv);
234  stepg_opt.retrieveOption(argc,argv);
235  input_opt.retrieveOption(argc,argv);
236  tlayer_opt.retrieveOption(argc,argv);
237  label_opt.retrieveOption(argc,argv);
238  balance_opt.retrieveOption(argc,argv);
239  random_opt.retrieveOption(argc,argv);
240  minSize_opt.retrieveOption(argc,argv);
241  band_opt.retrieveOption(argc,argv);
242  bstart_opt.retrieveOption(argc,argv);
243  bend_opt.retrieveOption(argc,argv);
244  offset_opt.retrieveOption(argc,argv);
245  scale_opt.retrieveOption(argc,argv);
246  svm_type_opt.retrieveOption(argc,argv);
247  kernel_type_opt.retrieveOption(argc,argv);
248  kernel_degree_opt.retrieveOption(argc,argv);
249  coef0_opt.retrieveOption(argc,argv);
250  nu_opt.retrieveOption(argc,argv);
251  epsilon_loss_opt.retrieveOption(argc,argv);
252  cache_opt.retrieveOption(argc,argv);
253  epsilon_tol_opt.retrieveOption(argc,argv);
254  shrinking_opt.retrieveOption(argc,argv);
255  prob_est_opt.retrieveOption(argc,argv);
256  cv_opt.retrieveOption(argc,argv);
257  costfunction_opt.retrieveOption(argc,argv);
258  maxit_opt.retrieveOption(argc,argv);
259  tolerance_opt.retrieveOption(argc,argv);
260 // algorithm_opt.retrieveOption(argc,argv);
261  classname_opt.retrieveOption(argc,argv);
262  classvalue_opt.retrieveOption(argc,argv);
263  verbose_opt.retrieveOption(argc,argv);
264  }
265  catch(string predefinedString){
266  std::cout << predefinedString << std::endl;
267  exit(0);
268  }
269  if(!doProcess){
270  cout << endl;
271  cout << "Usage: pkoptsvm -t training" << endl;
272  cout << endl;
273  std::cout << "short option -h shows basic options only, use long option --help to show all options" << std::endl;
274  exit(0);//help was invoked, stop processing
275  }
276 
277  assert(training_opt.size());
278  if(input_opt.size())
279  cv_opt[0]=0;
280 
281  if(verbose_opt[0]>=1){
282  if(input_opt.size())
283  std::cout << "input filename: " << input_opt[0] << std::endl;
284  std::cout << "training vector file: " << std::endl;
285  for(int ifile=0;ifile<training_opt.size();++ifile)
286  std::cout << training_opt[ifile] << std::endl;
287  std::cout << "verbose: " << verbose_opt[0] << std::endl;
288  }
289 
290  unsigned int totalSamples=0;
291  unsigned int totalTestSamples=0;
292 
293  unsigned short nclass=0;
294  int nband=0;
295  int startBand=2;//first two bands represent X and Y pos
296 
297  vector<double> offset;
298  vector<double> scale;
299  vector< Vector2d<float> > trainingPixels;//[class][sample][band]
300  vector< Vector2d<float> > testPixels;//[class][sample][band]
301 
302  // if(priors_opt.size()>1){//priors from argument list
303  // priors.resize(priors_opt.size());
304  // double normPrior=0;
305  // for(int iclass=0;iclass<priors_opt.size();++iclass){
306  // priors[iclass]=priors_opt[iclass];
307  // normPrior+=priors[iclass];
308  // }
309  // //normalize
310  // for(int iclass=0;iclass<priors_opt.size();++iclass)
311  // priors[iclass]/=normPrior;
312  // }
313 
314  //convert start and end band options to vector of band indexes
315  try{
316  if(bstart_opt.size()){
317  if(bend_opt.size()!=bstart_opt.size()){
318  string errorstring="Error: options for start and end band indexes must be provided as pairs, missing end band";
319  throw(errorstring);
320  }
321  band_opt.clear();
322  for(int ipair=0;ipair<bstart_opt.size();++ipair){
323  if(bend_opt[ipair]<=bstart_opt[ipair]){
324  string errorstring="Error: index for end band must be smaller then start band";
325  throw(errorstring);
326  }
327  for(int iband=bstart_opt[ipair];iband<=bend_opt[ipair];++iband)
328  band_opt.push_back(iband);
329  }
330  }
331  }
332  catch(string error){
333  cerr << error << std::endl;
334  exit(1);
335  }
336  //sort bands
337  if(band_opt.size())
338  std::sort(band_opt.begin(),band_opt.end());
339 
340  // map<string,short> classValueMap;//global variable for now (due to getCost)
341  if(classname_opt.size()){
342  assert(classname_opt.size()==classvalue_opt.size());
343  for(int iclass=0;iclass<classname_opt.size();++iclass)
344  classValueMap[classname_opt[iclass]]=classvalue_opt[iclass];
345  }
346 
347  //----------------------------------- Training -------------------------------
348  struct svm_problem prob;
349  vector<string> fields;
350  //organize training data
351  trainingPixels.clear();
352  testPixels.clear();
353  map<string,Vector2d<float> > trainingMap;
354  map<string,Vector2d<float> > testMap;
355  if(verbose_opt[0]>=1)
356  std::cout << "reading training file " << training_opt[0] << std::endl;
357  try{
358  ImgReaderOgr trainingReader(training_opt[0]);
359  if(band_opt.size()){
360  totalSamples=trainingReader.readDataImageOgr(trainingMap,fields,band_opt,label_opt[0],tlayer_opt,verbose_opt[0]);
361  if(input_opt.size()){
362  ImgReaderOgr inputReader(input_opt[0]);
363  totalTestSamples=inputReader.readDataImageOgr(testMap,fields,band_opt,label_opt[0],tlayer_opt,verbose_opt[0]);
364  inputReader.close();
365  }
366  }
367  else{
368  totalSamples=trainingReader.readDataImageOgr(trainingMap,fields,0,0,label_opt[0],tlayer_opt,verbose_opt[0]);
369  if(input_opt.size()){
370  ImgReaderOgr inputReader(input_opt[0]);
371  totalTestSamples=inputReader.readDataImageOgr(testMap,fields,0,0,label_opt[0],tlayer_opt,verbose_opt[0]);
372  inputReader.close();
373  }
374  trainingReader.close();
375  }
376  if(trainingMap.size()<2){
377  // map<string,Vector2d<float> >::iterator mapit=trainingMap.begin();
378  // while(mapit!=trainingMap.end())
379  // cerr << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
380  string errorstring="Error: could not read at least two classes from training input file";
381  throw(errorstring);
382  }
383  if(input_opt.size()&&testMap.size()<2){
384  string errorstring="Error: could not read at least two classes from test input file";
385  throw(errorstring);
386  }
387  }
388  catch(string error){
389  cerr << error << std::endl;
390  exit(1);
391  }
392  catch(...){
393  cerr << "error catched" << std::endl;
394  exit(1);
395  }
396  //todo delete class 0 ?
397  // if(verbose_opt[0]>=1)
398  // std::cout << "erasing class 0 from training set (" << trainingMap[0].size() << " from " << totalSamples << ") samples" << std::endl;
399  // totalSamples-=trainingMap[0].size();
400  // trainingMap.erase(0);
401 
402  if(verbose_opt[0]>1)
403  std::cout << "training pixels: " << std::endl;
404  map<string,Vector2d<float> >::iterator mapit;
405  mapit=trainingMap.begin();
406  while(mapit!=trainingMap.end()){
407  if(classValueMap.size()){
408  //check if name in training is covered by classname_opt (values can not be 0)
409  if(classValueMap[mapit->first]>0){
410  if(verbose_opt[0])
411  std::cout << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
412  }
413  else{
414  std::cerr << "Error: names in classname option are not complete, please check names in training vector and make sure classvalue is > 0" << std::endl;
415  exit(1);
416  }
417  }
418  //delete small classes
419  if((mapit->second).size()<minSize_opt[0]){
420  trainingMap.erase(mapit);
421  continue;
422  }
423  nameVector.push_back(mapit->first);
424  trainingPixels.push_back(mapit->second);
425  if(verbose_opt[0]>1)
426  std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl;
427  // trainingPixels.push_back(mapit->second); ??
428  // ++iclass;
429  ++mapit;
430  }
431  nclass=trainingPixels.size();
432  if(classname_opt.size())
433  assert(nclass==classname_opt.size());
434  nband=trainingPixels[0][0].size()-2;//X and Y//trainingPixels[0][0].size();
435 
436  mapit=testMap.begin();
437  while(mapit!=testMap.end()){
438  if(classValueMap.size()){
439  //check if name in test is covered by classname_opt (values can not be 0)
440  if(classValueMap[mapit->first]>0){
441  ;//ok, no need to print to std::cout
442  }
443  else{
444  std::cerr << "Error: names in classname option are not complete, please check names in test vector and make sure classvalue is > 0" << std::endl;
445  exit(1);
446  }
447  }
448  //no need to delete small classes for test sample
449  testPixels.push_back(mapit->second);
450  if(verbose_opt[0]>1)
451  std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl;
452  ++mapit;
453  }
454  if(input_opt.size()){
455  assert(nclass==testPixels.size());
456  assert(nband=testPixels[0][0].size()-2);//X and Y//testPixels[0][0].size();
457  assert(!cv_opt[0]);
458  }
459 
460  //do not remove outliers here: could easily be obtained through ogr2ogr -where 'B2<110' output.shp input.shp
461  //balance training data
462  if(balance_opt[0]>0){
463  if(random_opt[0])
464  srand(time(NULL));
465  totalSamples=0;
466  for(int iclass=0;iclass<nclass;++iclass){
467  if(trainingPixels[iclass].size()>balance_opt[0]){
468  while(trainingPixels[iclass].size()>balance_opt[0]){
469  int index=rand()%trainingPixels[iclass].size();
470  trainingPixels[iclass].erase(trainingPixels[iclass].begin()+index);
471  }
472  }
473  else{
474  int oldsize=trainingPixels[iclass].size();
475  for(int isample=trainingPixels[iclass].size();isample<balance_opt[0];++isample){
476  int index = rand()%oldsize;
477  trainingPixels[iclass].push_back(trainingPixels[iclass][index]);
478  }
479  }
480  totalSamples+=trainingPixels[iclass].size();
481  }
482  assert(totalSamples==nclass*balance_opt[0]);
483  }
484 
485  //no need to balance test sample
486  //set scale and offset
487  offset.resize(nband);
488  scale.resize(nband);
489  if(offset_opt.size()>1)
490  assert(offset_opt.size()==nband);
491  if(scale_opt.size()>1)
492  assert(scale_opt.size()==nband);
493  for(int iband=0;iband<nband;++iband){
494  if(verbose_opt[0]>1)
495  std::cout << "scaling for band" << iband << std::endl;
496  offset[iband]=(offset_opt.size()==1)?offset_opt[0]:offset_opt[iband];
497  scale[iband]=(scale_opt.size()==1)?scale_opt[0]:scale_opt[iband];
498  //search for min and maximum
499  if(scale[iband]<=0){
500  float theMin=trainingPixels[0][0][iband+startBand];
501  float theMax=trainingPixels[0][0][iband+startBand];
502  for(int iclass=0;iclass<nclass;++iclass){
503  for(int isample=0;isample<trainingPixels[iclass].size();++isample){
504  if(theMin>trainingPixels[iclass][isample][iband+startBand])
505  theMin=trainingPixels[iclass][isample][iband+startBand];
506  if(theMax<trainingPixels[iclass][isample][iband+startBand])
507  theMax=trainingPixels[iclass][isample][iband+startBand];
508  }
509  }
510  offset[iband]=theMin+(theMax-theMin)/2.0;
511  scale[iband]=(theMax-theMin)/2.0;
512  if(verbose_opt[0]>1){
513  std::cout << "Extreme image values for band " << iband << ": [" << theMin << "," << theMax << "]" << std::endl;
514  std::cout << "Using offset, scale: " << offset[iband] << ", " << scale[iband] << std::endl;
515  std::cout << "scaled values for band " << iband << ": [" << (theMin-offset[iband])/scale[iband] << "," << (theMax-offset[iband])/scale[iband] << "]" << std::endl;
516  }
517  }
518  }
519 
520  // if(priors_opt.size()==1){//default: equal priors for each class
521  // priors.resize(nclass);
522  // for(int iclass=0;iclass<nclass;++iclass)
523  // priors[iclass]=1.0/nclass;
524  // }
525  // assert(priors_opt.size()==1||priors_opt.size()==nclass);
526 
527  if(verbose_opt[0]>=1){
528  std::cout << "number of bands: " << nband << std::endl;
529  std::cout << "number of classes: " << nclass << std::endl;
530  // std::cout << "priors:";
531  // for(int iclass=0;iclass<nclass;++iclass)
532  // std::cout << " " << priors[iclass];
533  // std::cout << std::endl;
534  }
535 
536  //Calculate features of training (and test) set
537  nctraining.resize(nclass);
538  nctest.resize(nclass);
539  vector< Vector2d<float> > trainingFeatures(nclass);
540  for(int iclass=0;iclass<nclass;++iclass){
541  if(verbose_opt[0]>=1)
542  std::cout << "calculating features for class " << iclass << std::endl;
543  nctraining[iclass]=trainingPixels[iclass].size();
544  if(verbose_opt[0]>=1)
545  std::cout << "nctraining[" << iclass << "]: " << nctraining[iclass] << std::endl;
546  if(testPixels.size()>iclass){
547  nctest[iclass]=testPixels[iclass].size();
548  if(verbose_opt[0]>=1){
549  std::cout << "nctest[" << iclass << "]: " << nctest[iclass] << std::endl;
550  }
551  }
552  else
553  nctest[iclass]=0;
554  // trainingFeatures[iclass].resize(nctraining[iclass]);
555  trainingFeatures[iclass].resize(nctraining[iclass]+nctest[iclass]);
556  for(int isample=0;isample<nctraining[iclass];++isample){
557  //scale pixel values according to scale and offset!!!
558  for(int iband=0;iband<nband;++iband){
559  assert(trainingPixels[iclass].size()>isample);
560  assert(trainingPixels[iclass][isample].size()>iband+startBand);
561  assert(offset.size()>iband);
562  assert(scale.size()>iband);
563  float value=trainingPixels[iclass][isample][iband+startBand];
564  trainingFeatures[iclass][isample].push_back((value-offset[iband])/scale[iband]);
565  }
566  }
567  // assert(trainingFeatures[iclass].size()==nctraining[iclass]);
568  for(int isample=0;isample<nctest[iclass];++isample){
569  //scale pixel values according to scale and offset!!!
570  for(int iband=0;iband<nband;++iband){
571  assert(testPixels[iclass].size()>isample);
572  assert(testPixels[iclass][isample].size()>iband+startBand);
573  assert(offset.size()>iband);
574  assert(scale.size()>iband);
575  float value=testPixels[iclass][isample][iband+startBand];
576  // testFeatures[iclass][isample].push_back((value-offset[iband])/scale[iband]);
577  trainingFeatures[iclass][nctraining[iclass]+isample].push_back((value-offset[iband])/scale[iband]);
578  }
579  }
580  assert(trainingFeatures[iclass].size()==nctraining[iclass]+nctest[iclass]);
581  }
582 
583  assert(ccost_opt.size()>1);//must have boundaries at least (initial value is optional)
584  if(ccost_opt.size()<3)//create initial value
585  ccost_opt.push_back(sqrt(ccost_opt[0]*ccost_opt[1]));
586  assert(gamma_opt.size()>1);//must have boundaries at least (initial value is optional)
587  if(gamma_opt.size()<3)//create initial value
588  gamma_opt.push_back(sqrt(gamma_opt[0]*gamma_opt[1]));//will be translated to 1.0/nFeatures
589  assert(ccost_opt.size()==3);//min, init, max
590  assert(gamma_opt.size()==3);//min, init, max
591  assert(gamma_opt[0]<gamma_opt[1]);
592  assert(gamma_opt[0]<gamma_opt[2]);
593  assert(gamma_opt[2]<gamma_opt[1]);
594  assert(ccost_opt[0]<ccost_opt[1]);
595  assert(ccost_opt[0]<ccost_opt[2]);
596  assert(ccost_opt[2]<ccost_opt[1]);
597 
598  std::vector<double> x(2);
599 // if(algorithm_opt[0]=="GRID"){
600  if (1){
601  // double minError=1000;
602  // double minCost=0;
603  // double minGamma=0;
604  double maxKappa=0;
605  double maxCost=0;
606  double maxGamma=0;
607  const char* pszMessage;
608  void* pProgressArg=NULL;
609  GDALProgressFunc pfnProgress=GDALTermProgress;
610  double progress=0;
611  if(!verbose_opt[0])
612  pfnProgress(progress,pszMessage,pProgressArg);
613  double ncost=log(ccost_opt[1])/log(stepcc_opt[0])-log(ccost_opt[0])/log(stepcc_opt[0]);
614  double ngamma=log(gamma_opt[1])/log(stepg_opt[0])-log(gamma_opt[0])/log(stepg_opt[0]);
615  for(double ccost=ccost_opt[0];ccost<=ccost_opt[1];ccost*=stepcc_opt[0]){
616  for(double gamma=gamma_opt[0];gamma<=gamma_opt[1];gamma*=stepg_opt[0]){
617  x[0]=ccost;
618  x[1]=gamma;
619  std::vector<double> theGrad;
620  double kappa=0;
621  kappa=objFunction(x,theGrad,&trainingFeatures);
622  if(kappa>maxKappa){
623  maxKappa=kappa;
624  maxCost=ccost;
625  maxGamma=gamma;
626  }
627  if(verbose_opt[0])
628  std::cout << ccost << " " << gamma << " " << kappa<< std::endl;
629  progress+=1.0/ncost/ngamma;
630  if(!verbose_opt[0])
631  pfnProgress(progress,pszMessage,pProgressArg);
632  }
633  }
634  progress=1.0;
635  if(!verbose_opt[0])
636  pfnProgress(progress,pszMessage,pProgressArg);
637  x[0]=maxCost;
638  x[1]=maxGamma;
639  }
640  //else{
641  // nlopt::opt optimizer=OptFactory::getOptimizer(algorithm_opt[0],2);
642  // if(verbose_opt[0]>1)
643  // std::cout << "optimization algorithm: " << optimizer.get_algorithm_name() << "..." << std::endl;
644  // std::vector<double> lb(2);
645  // std::vector<double> init(2);
646  // std::vector<double> ub(2);
647 
648  // lb[0]=ccost_opt[0];
649  // lb[1]=(gamma_opt[0]>0)? gamma_opt[0] : 1.0/trainingFeatures[0][0].size();
650  // init[0]=ccost_opt[2];
651  // init[1]=(gamma_opt[2]>0)? gamma_opt[1] : 1.0/trainingFeatures[0][0].size();
652  // ub[0]=ccost_opt[1];
653  // ub[1]=(gamma_opt[1]>0)? gamma_opt[1] : 1.0/trainingFeatures[0][0].size();
654  // // optimizer.set_min_objective(objFunction, &trainingFeatures);
655  // optimizer.set_max_objective(objFunction, &trainingFeatures);
656  // optimizer.set_lower_bounds(lb);
657  // optimizer.set_upper_bounds(ub);
658  // if(verbose_opt[0]>1)
659  // std::cout << "set stopping criteria" << std::endl;
660  // //set stopping criteria
661  // if(maxit_opt[0])
662  // optimizer.set_maxeval(maxit_opt[0]);
663  // else
664  // optimizer.set_xtol_rel(tolerance_opt[0]);
665  // double minf=0;
666  // x=init;
667  // try{
668  // optimizer.optimize(x, minf);
669  // }
670  // catch(string error){
671  // cerr << error << std::endl;
672  // exit(1);
673  // }
674  // catch (exception& e){
675  // cout << e.what() << endl;
676  // }
677  // catch(...){
678  // cerr << "error catched" << std::endl;
679  // exit(1);
680  // }
681 
682  // double ccost=x[0];
683  // double gamma=x[1];
684  // if(verbose_opt[0])
685  // std::cout << "optimized with " << optimizer.get_algorithm_name() << "..." << std::endl;
686  //}
687  std::cout << " --ccost " << x[0];
688  std::cout << " --gamma " << x[1];
689  std::cout << std::endl;
690 }