LSSTApplications  11.0-13-gbb96280,12.1+18,12.1+7,12.1-1-g14f38d3+72,12.1-1-g16c0db7+5,12.1-1-g5961e7a+84,12.1-1-ge22e12b+23,12.1-11-g06625e2+4,12.1-11-g0d7f63b+4,12.1-19-gd507bfc,12.1-2-g7dda0ab+38,12.1-2-gc0bc6ab+81,12.1-21-g6ffe579+2,12.1-21-gbdb6c2a+4,12.1-24-g941c398+5,12.1-3-g57f6835+7,12.1-3-gf0736f3,12.1-37-g3ddd237,12.1-4-gf46015e+5,12.1-5-g06c326c+20,12.1-5-g648ee80+3,12.1-5-gc2189d7+4,12.1-6-ga608fc0+1,12.1-7-g3349e2a+5,12.1-7-gfd75620+9,12.1-9-g577b946+5,12.1-9-gc4df26a+10
LSSTDataManagementBasePackage
GaussianProcess.cc
Go to the documentation of this file.
1 // - * - LSST - C++ - * -
2 
3 /*
4  * LSST Data Management System
5  * Copyright 2008, 2009, 2010 LSST Corporation.
6  *
7  * This product includes software developed by the
8  * LSST Project (http://www.lsst.org/).
9  *
10  * This program is free software: you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 3 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the LSST License Statement and
21  * the GNU General Public License along with this program. If not,
22  * see < http://www.lsstcorp.org/LegalNotices/ > .
23  */
24 
25 #include <iostream>
26 #include <cmath>
27 
29 
30 using namespace std;
31 
32 
33 namespace lsst {
34 namespace afw {
35 namespace math {
36 
37 GaussianProcessTimer::GaussianProcessTimer()
38 {
39  _interpolationCount = 0;
40  _iterationTime = 0.0;
41  _eigenTime = 0.0;
42  _searchTime = 0.0;
43  _varianceTime = 0.0;
44  _totalTime = 0.0;
45 
46 }
47 
48 void GaussianProcessTimer::reset()
49 {
50  _interpolationCount = 0;
51  _iterationTime = 0.0;
52  _eigenTime = 0.0;
53  _searchTime = 0.0;
54  _varianceTime = 0.0;
55  _totalTime = 0.0;
56 
57 }
58 
59 void GaussianProcessTimer::start()
60 {
61  _before = double(lsst::daf::base::DateTime::now().get()*24*60*60);
62  _beginning = _before;
63 }
64 
65 void GaussianProcessTimer::addToEigen()
66 {
67  double after;
68  after = double(lsst::daf::base::DateTime::now().get()*24*60*60);
69  _eigenTime += after - _before;
70  _before = after;
71 }
72 
73 void GaussianProcessTimer::addToVariance()
74 {
75  double after;
76  after = double(lsst::daf::base::DateTime::now().get()*24*60*60);
77  _varianceTime += after - _before;
78  _before = after;
79 }
80 
81 void GaussianProcessTimer::addToSearch()
82 {
83  double after;
84  after = double(lsst::daf::base::DateTime::now().get()*24*60*60);
85  _searchTime += after - _before;
86  _before = after;
87 }
88 
89 void GaussianProcessTimer::addToIteration()
90 {
91  double after;
92  after = double(lsst::daf::base::DateTime::now().get()*24*60*60);
93  _iterationTime += after - _before;
94  _before = after;
95 }
96 
97 void GaussianProcessTimer::addToTotal(int i)
98 {
99  double after;
100  after = double(lsst::daf::base::DateTime::now().get()*24*60*60);
101  _totalTime += after - _beginning;
102  _interpolationCount += i;
103 }
104 
105 void GaussianProcessTimer::display(){
106  std::cout << "\nSearch time " << _searchTime << "\n";
107  std::cout << "Eigen time " << _eigenTime << "\n";
108  std::cout << "Variance time " << _varianceTime << "\n";
109  std::cout << "Iteration time " << _iterationTime << "\n";
110  std::cout << "Total time " << _totalTime << "\n";
111  std::cout << "Number of interpolations " << _interpolationCount << "\n";
112 }
113 
114 template < typename T >
115 void KdTree < T > ::Initialize(ndarray::Array < T,2,2 > const &dt)
116 {
117  int i;
118 
119  _npts = dt.template getSize < 0 > ();
120  _dimensions = dt.template getSize < 1 > ();
121 
122  //a buffer to use when first building the tree
123  _inn = allocate(ndarray::makeVector(_npts));
124 
125  _roomStep = 5000;
126  _room = _npts;
127 
128  _data = allocate(ndarray::makeVector(_room,_dimensions));
129 
130  _data.deep() = dt;
131 
132  _tree = allocate(ndarray::makeVector(_room,4));
133 
134  for (i = 0 ; i < _npts ; i++ ) {
135  _inn[i] = i;
136  }
137 
138  _organize(_inn,_npts, -1, -1);
139 
140 
141  i = _testTree();
142  if (i == 0) {
143  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
144  "Failed to properly initialize KdTree\n");
145  }
146 
147 }
148 
149 template < typename T >
150 void KdTree < T > ::findNeighbors(ndarray::Array < int,1,1 > neighdex,
151  ndarray::Array < double,1,1 > dd,
152  ndarray::Array < const T,1,1 > const &v,
153  int n_nn) const
154 {
155 
156  if(n_nn > _npts){
157  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
158  "Asked for more neighbors than kd tree contains\n");
159  }
160 
161  if(n_nn <= 0){
162  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
163  "Asked for zero or a negative number of neighbors\n");
164  }
165 
166  if(neighdex.getNumElements() != n_nn){
167  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
168  "Size of neighdex does not equal n_nn in KdTree.findNeighbors\n");
169  }
170 
171  if(dd.getNumElements() != n_nn){
172  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
173  "Size of dd does not equal n_nn in KdTree.findNeighbors\n");
174  }
175 
176  int i,start;
177 
178  _neighborCandidates = allocate(ndarray::makeVector(n_nn));
179  _neighborDistances = allocate(ndarray::makeVector(n_nn));
180  _neighborsFound = 0;
181  _neighborsWanted = n_nn;
182 
183  for (i = 0; i < n_nn; i++ ) _neighborDistances[i] = -1.0;
184 
185  start = _findNode(v);
186 
187  _neighborDistances[0] = _distance(v,_data[start]);
188  _neighborCandidates[0] = start;
189  _neighborsFound = 1;
190 
191 
192  for (i = 1; i < 4; i++ ) {
193  if(_tree[start][i] >= 0){
194  _lookForNeighbors(v,_tree[start][i], start);
195  }
196  }
197 
198  for (i = 0 ; i < n_nn ; i++ ) {
199  neighdex[i] = _neighborCandidates[i];
200  dd[i] = _neighborDistances[i];
201  }
202 }
203 
204 template < typename T >
205 T KdTree < T > ::getData(int ipt, int idim) const
206 {
207  if(ipt >= _npts || ipt < 0){
208  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
209  "Asked for point that does not exist in KdTree\n");
210  }
211 
212  if(idim >= _dimensions || idim < 0){
213  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
214  "KdTree does not have points of that many dimensions\n");
215  }
216 
217  return _data[ipt][idim];
218 }
219 
220 template < typename T >
221 ndarray::Array < T,1,1 > KdTree < T > ::getData(int ipt) const
222 {
223  if(ipt >= _npts || ipt<0){
224  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
225  "Asked for point that does not exist in KdTree\n");
226  }
227 
228  return _data[ipt];
229 }
230 
231 template < typename T >
232 void KdTree < T > ::addPoint(ndarray::Array < const T,1,1 > const &v)
233 {
234 
235  if(v.getNumElements() != _dimensions){
236  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
237  "You are trying to add a point of the incorrect dimensionality to KdTree\n");
238  }
239 
240  int i,j,node,dim,dir;
241 
242  node = _findNode(v);
243  dim = _tree[node][DIMENSION] + 1;
244  if(dim == _dimensions)dim = 0;
245 
246  if(_npts == _room){
247 
248  ndarray::Array < T,2,2 > dbuff = allocate(ndarray::makeVector(_npts, _dimensions));
249  ndarray::Array < int,2,2 > tbuff = allocate(ndarray::makeVector(_npts, 4));
250 
251  dbuff.deep() = _data;
252  tbuff.deep() = _tree;
253 
254  _room += _roomStep;
255 
256  _tree = allocate(ndarray::makeVector(_room, 4));
257  _data = allocate(ndarray::makeVector(_room, _dimensions));
258 
259 
260  for (i = 0; i < _npts; i++ ) {
261  for (j = 0; j < _dimensions; j++ ) _data[i][j] = dbuff[i][j];
262  for (j = 0; j < 4; j++ ) _tree[i][j] = tbuff[i][j];
263  }
264  }
265 
266  _tree[_npts][DIMENSION] = dim;
267  _tree[_npts][PARENT] = node;
268  i = _tree[node][DIMENSION];
269 
270  if(_data[node][i] > v[i]){
271  if(_tree[node][LT] >= 0){
272  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
273  "Trying to add to KdTree in a node that is already occupied\n");
274  }
275  _tree[node][LT] = _npts;
276  dir=LT;
277  }
278  else{
279  if(_tree[node][GEQ] >= 0){
280  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
281  "Trying to add to KdTree in a node that is already occupied\n");
282  }
283  _tree[node][GEQ] = _npts;
284  dir=GEQ;
285  }
286  _tree[_npts][LT] = -1;
287  _tree[_npts][GEQ] = -1;
288  for (i = 0; i < _dimensions; i++ ) {
289  _data[_npts][i] = v[i];
290  }
291 
292  _npts++;
293 
294 
295  i = _walkUpTree(_tree[_npts-1][PARENT], dir, _npts-1);
296  if (i != _masterParent){
297  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
298  "Adding to KdTree failed\n");
299  }
300 
301 }
302 
303 template < typename T >
304 int KdTree < T > ::getNPoints() const
305 {
306  return _npts;
307 }
308 
309 template < typename T >
310 void KdTree < T > ::getTreeNode(ndarray::Array < int,1,1 > const &v,
311  int dex) const
312 {
313  if(dex < 0 || dex >= _npts){
314  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
315  "Asked for tree information on point that does not exist\n");
316  }
317 
318  if(v.getNumElements() != 4){
319  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
320  "Need to pass a 4-element ndarray into KdTree.getTreeNode()\n");
321  }
322 
323  v[0] = _tree[dex][DIMENSION];
324  v[1] = _tree[dex][LT];
325  v[2] = _tree[dex][GEQ];
326  v[3] = _tree[dex][PARENT];
327 }
328 
329 template < typename T >
330 int KdTree < T > ::_testTree() const{
331 
332  if(_npts == 1 && _tree[0][PARENT] < 0 && _masterParent==0){
333  // there is only one point in the tree
334  return 1;
335  }
336 
337  int i,j,output;
338  std::vector < int > isparent;
339 
340  output=1;
341 
342  for (i = 0; i < _npts; i++ ) isparent.push_back(0);
343 
344  j = 0;
345  for (i = 0; i < _npts; i++ ) {
346  if(_tree[i][PARENT] < 0)j++;
347  }
348  if(j != 1){
349  std::cout << "_tree FAILURE " << j << " _masterParents\n";
350  return 0;
351  }
352 
353  for (i = 0; i < _npts; i++ ) {
354  if(_tree[i][PARENT] >= 0){
355  isparent[_tree[i][PARENT]]++;
356  }
357  }
358 
359  for (i = 0; i < _npts; i++ ) {
360  if(isparent[i] > 2){
361  std::cout << "_tree FAILURE " << i << " is parent to " << isparent[i] << "\n";
362  return 0;
363  }
364  }
365 
366 
367  for (i = 0; i < _npts; i++ ) {
368  if(_tree[i][PARENT] >= 0) {
369  if(_tree[_tree[i][PARENT]][LT] == i)j = LT;
370  else j = GEQ;
371  output = _walkUpTree(_tree[i][PARENT], j, i);
372 
373  if(output != _masterParent){
374  return 0;
375  }
376  }
377  }
378 
379  if(output != _masterParent) return 0;
380  else return 1;
381 }
382 
383 template < typename T >
384 void KdTree < T > ::_organize(ndarray::Array < int,1,1 > const &use,
385  int ct,
386  int parent,
387  int dir
388  )
389 {
390 
391  int i,j,k,l,idim,daughter;
392  T mean,var,varbest;
393 
394 
395  std::vector < std::vector < T > > toSort;
396  std::vector < T > toSortElement;
397 
398  if(ct > 1){
399  //below is code to choose the dimension on which the available points
400  //have the greates variance. This will be the dimension on which
401  //the daughter node splits the data
402  idim=0;
403  varbest=-1.0;
404  for (i = 0; i < _dimensions; i++ ) {
405  mean = 0.0;
406  var = 0.0;
407  for (j = 0; j < ct; j++ ) {
408  mean += _data[use[j]][i];
409  var += _data[use[j]][i]*_data[use[j]][i];
410  }
411  mean = mean/double(ct);
412  var = var/double(ct) - mean*mean;
413  if(i == 0 || var > varbest ||
414  (var == varbest && parent >= 0 && i > _tree[parent][DIMENSION])) {
415  idim = i;
416  varbest = var;
417  }
418  }//for(i = 0;i < _dimensions;i++ )
419 
420  //we need to sort the available data points according to their idim - th element
421  //but we need to keep track of their original indices, so we can refer back to
422  //their positions in _data. Therefore, the code constructs a 2 - dimensional
423  //std::vector. The first dimension contains the element to be sorted.
424  //The second dimension contains the original index information.
425  //After sorting, the (rearranged) index information will be stored back in the
426  //ndarray use[]
427 
428  toSortElement.push_back(_data[use[0]][idim]);
429  toSortElement.push_back(T(use[0]));
430  toSort.push_back(toSortElement);
431  for(i = 1; i < ct; i++ ) {
432  toSortElement[0] = _data[use[i]][idim];
433  toSortElement[1] = T(use[i]);
434  toSort.push_back(toSortElement);
435  }
436 
437  std::stable_sort(toSort.begin(), toSort.end());
438 
439  k = ct/2;
440  l = ct/2;
441  while(k > 0 && toSort[k][0] == toSort[k - 1][0]) k--;
442 
443  while(l < ct - 1 && toSort[l][0] == toSort[ct/2][0]) l++;
444 
445  if((ct/2 - k) < (l - ct/2) || l == ct - 1) j = k;
446  else j = l;
447 
448  for(i = 0; i < ct; i++ ) {
449  use[i] = int(toSort[i][1]);
450  }
451  daughter = use[j];
452 
453  if(parent >= 0)_tree[parent][dir] = daughter;
454  _tree[daughter][DIMENSION] = idim;
455  _tree[daughter][PARENT] = parent;
456 
457  if(j < ct - 1){
458  _organize(use[ndarray::view(j + 1,use.getSize < 0 > ())], ct - j - 1, daughter, GEQ);
459  }
460  else _tree[daughter][GEQ] = -1;
461 
462  if(j > 0){
463  _organize(use, j, daughter, LT);
464  }
465  else _tree[daughter][LT] = -1;
466 
467  }//if(ct > 1)
468  else{
469  daughter = use[0];
470 
471  if(parent >= 0) _tree[parent][dir] = daughter;
472 
473  idim = _tree[parent][DIMENSION] + 1;
474 
475  if(idim >= _dimensions)idim = 0;
476 
477  _tree[daughter][DIMENSION] = idim;
478  _tree[daughter][LT] = - 1;
479  _tree[daughter][GEQ] = - 1;
480  _tree[daughter][PARENT] = parent;
481 
482  }
483 
484  if(parent == - 1){
485  _masterParent = daughter;
486  }
487 
488 }
489 
490 template < typename T >
491 int KdTree < T > ::_findNode(ndarray::Array < const T,1,1 > const &v) const
492 {
493  int consider,next,dim;
494 
495  dim = _tree[_masterParent][DIMENSION];
496 
497  if(v[dim] < _data[_masterParent][dim]) consider = _tree[_masterParent][LT];
498  else consider = _tree[_masterParent][GEQ];
499 
500  next = consider;
501 
502  while(next >= 0){
503 
504  consider = next;
505 
506  dim = _tree[consider][DIMENSION];
507  if(v[dim] < _data[consider][dim]) next = _tree[consider][LT];
508  else next = _tree[consider][GEQ];
509 
510  }
511 
512  return consider;
513 }
514 
515 template < typename T >
516 void KdTree < T > ::_lookForNeighbors(ndarray::Array < const T,1,1 > const &v,
517  int consider,
518  int from) const
519 {
520 
521  int i,j,going;
522  double dd;
523 
524  dd = _distance(v, _data[consider]);
525 
526  if(_neighborsFound < _neighborsWanted || dd < _neighborDistances[_neighborsWanted -1]) {
527 
528  for(j = 0; j < _neighborsFound && _neighborDistances[j] < dd; j++ );
529 
530  for(i = _neighborsWanted - 1; i > j; i-- ){
531  _neighborDistances[i] = _neighborDistances[i - 1];
532  _neighborCandidates[i] = _neighborCandidates[i - 1];
533  }
534 
535  _neighborDistances[j] = dd;
536  _neighborCandidates[j] = consider;
537 
538  if(_neighborsFound < _neighborsWanted) _neighborsFound++;
539  }
540 
541  if(_tree[consider][PARENT] == from){
542  //you came here from the parent
543 
544  i = _tree[consider][DIMENSION];
545  dd = v[i] - _data[consider][i];
546  if((dd <= _neighborDistances[_neighborsFound - 1] || _neighborsFound < _neighborsWanted)
547  && _tree[consider][LT] >= 0){
548 
549  _lookForNeighbors(v, _tree[consider][LT], consider);
550  }
551 
552  dd = _data[consider][i] - v[i];
553  if((dd <= _neighborDistances[_neighborsFound - 1] || _neighborsFound < _neighborsWanted)
554  && _tree[consider][GEQ] >= 0){
555 
556  _lookForNeighbors(v, _tree[consider][GEQ], consider);
557  }
558  }
559  else{
560  //you came here from one of the branches
561 
562  //descend the other branch
563  if(_tree[consider][LT] == from){
564  going = GEQ;
565  }
566  else{
567  going = LT;
568  }
569 
570  j = _tree[consider][going];
571 
572  if(j >= 0){
573  i = _tree[consider][DIMENSION];
574 
575  if(going == 1) dd = v[i] - _data[consider][i];
576  else dd = _data[consider][i] - v[i];
577 
578  if(dd <= _neighborDistances[_neighborsFound - 1] || _neighborsFound < _neighborsWanted) {
579  _lookForNeighbors(v, j, consider);
580  }
581  }
582 
583  //ascend to the parent
584  if(_tree[consider][PARENT] >= 0) {
585  _lookForNeighbors(v, _tree[consider][PARENT], consider);
586  }
587  }
588 }
589 
590 template < typename T >
591 int KdTree < T > ::_walkUpTree(int target,
592  int dir,
593  int root) const
594 {
595  //target is the node that you are examining now
596  //dir is where you came from
597  //root is the ultimate point from which you started
598 
599  int i,output;
600 
601  output = 1;
602 
603  if(dir == LT){
604  if(_data[root][_tree[target][DIMENSION]] >= _data[target][_tree[target][DIMENSION]]){
605  return 0;
606  }
607  }
608  else{
609  if(_data[root][_tree[target][DIMENSION]] < _data[target][_tree[target][DIMENSION]]) {
610  return 0;
611  }
612  }
613 
614  if(_tree[target][PARENT] >= 0){
615  if(_tree[_tree[target][PARENT]][LT] == target)i = LT;
616  else i = GEQ;
617  output = output*_walkUpTree(_tree[target][PARENT],i,root);
618 
619  }
620  else{
621  output = output*target;
622  //so that it will return _masterParent
623  //make sure everything is connected to _masterParent
624  }
625  return output;
626 
627 }
628 
629 template < typename T >
630 void KdTree < T > ::removePoint(int target)
631 {
632 
633  if(target < 0 || target >= _npts){
634  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
635  "You are trying to remove a point that doesn't exist from KdTree\n");
636  }
637 
638  if(_npts==1){
639  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
640  "There is only one point left in this KdTree. You cannot call removePoint\n");
641  }
642 
643  int nl,nr,i,j,k,side;
644  int root;
645 
646  nl = 0;
647  nr = 0;
648  if(_tree[target][LT] >= 0){
649  nl++ ;
650  _count(_tree[target][LT], &nl);
651  }
652 
653  if(_tree[target][GEQ] >= 0){
654  nr++;
655  _count(_tree[target][GEQ], &nr);
656  }
657 
658  if(nl == 0 && nr == 0){
659 
660  k = _tree[target][PARENT];
661 
662  if(_tree[k][LT] == target) _tree[k][LT] = - 1;
663  else if(_tree[k][GEQ] == target) _tree[k][GEQ] = - 1;
664 
665  }//if target is terminal
666  else if((nl == 0 && nr > 0) || (nr == 0 && nl > 0)){
667 
668  if(nl == 0) side = GEQ;
669  else side = LT;
670 
671  k = _tree[target][PARENT];
672  if(k >= 0){
673 
674  if(_tree[k][LT] == target){
675  _tree[k][LT] = _tree[target][side];
676  _tree[_tree[k][LT]][PARENT] = k;
677 
678  }
679  else{
680  _tree[k][GEQ] = _tree[target][side];
681  _tree[_tree[k][GEQ]][PARENT] = k;
682  }
683  }
684  else{
685  _masterParent = _tree[target][side];
686  _tree[_tree[target][side]][PARENT] = - 1;
687  }
688  }//if only one side is populated
689  else{
690 
691  if(nl > nr)side = LT;
692  else side = GEQ;
693 
694  k = _tree[target][PARENT];
695  if(k < 0){
696  _masterParent = _tree[target][side];
697  _tree[_masterParent][PARENT] = - 1;
698  }
699  else{
700  if(_tree[k][LT] == target){
701  _tree[k][LT] = _tree[target][side];
702  _tree[_tree[k][LT]][PARENT] = k;
703  }
704  else{
705  _tree[k][GEQ] = _tree[target][side];
706  _tree[_tree[k][GEQ]][PARENT] = k;
707  }
708  }
709 
710  root = _tree[target][3 - side];
711 
712  _descend(root);
713  }//if both sides are populated
714 
715  if(target < _npts - 1){
716  for(i = target + 1;i < _npts;i++ ){
717  for(j = 0; j < 4; j++ ) _tree[i - 1][j] = _tree[i][j];
718 
719  for(j = 0; j < _dimensions; j++ ) _data[i - 1][j] = _data[i][j];
720  }
721 
722  for(i = 0; i < _npts; i++ ){
723  for(j = 1; j < 4; j++ ){
724  if(_tree[i][j] > target) _tree[i][j]--;
725  }
726  }
727 
728  if(_masterParent > target)_masterParent-- ;
729  }
730  _npts--;
731 
732  i = _testTree();
733  if (i == 0) {
734  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
735  "Subtracting from KdTree failed\n");
736  }
737 
738 
739 }
740 
741 template < typename T >
742 void KdTree < T > ::_count(int where, int *ct) const
743 {
744  //a way to count the number of vital elements on a given branch
745 
746  if(_tree[where][LT] >= 0) {
747  ct[0]++;
748  _count(_tree[where][LT], ct);
749  }
750  if(_tree[where][GEQ] >= 0) {
751  ct[0]++;
752  _count(_tree[where][GEQ], ct);
753  }
754 }
755 
756 template < typename T >
757 void KdTree < T > ::_reassign(int target)
758 {
759 
760  int where,dir,k;
761 
762  where = _masterParent;
763  if(_data[target][_tree[where][DIMENSION]] < _data[where][_tree[where][DIMENSION]]) dir = LT;
764  else dir = GEQ;
765 
766  k = _tree[where][dir];
767  while(k >= 0) {
768  where = k;
769  if(_data[target][_tree[where][DIMENSION]] < _data[where][_tree[where][DIMENSION]]) dir = LT;
770  else dir = GEQ;
771  k = _tree[where][dir];
772  }
773 
774  _tree[where][dir] = target;
775  _tree[target][PARENT] = where;
776  _tree[target][LT] = - 1;
777  _tree[target][GEQ] = - 1;
778  _tree[target][DIMENSION] = _tree[where][DIMENSION] + 1;
779 
780  if(_tree[target][DIMENSION] == _dimensions) _tree[target][DIMENSION] = 0;
781 }
782 
783 template < typename T >
784 void KdTree < T > ::_descend(int root)
785 {
786 
787  if(_tree[root][LT] >= 0) _descend(_tree[root][LT]);
788  if(_tree[root][GEQ] >= 0) _descend(_tree[root][GEQ]);
789 
790  _reassign(root);
791 }
792 
793 template < typename T >
794 double KdTree < T > ::_distance(ndarray::Array < const T,1,1 > const &p1,
795  ndarray::Array < const T,1,1 > const &p2) const
796 {
797 
798  int i,dd;
799  double ans;
800  ans = 0.0;
801  dd = p1.template getSize< 0 >();
802 
803  for(i = 0;i < dd;i++ ) ans += (p1[i] - p2[i])*(p1[i] - p2[i]);
804 
805  return ::sqrt(ans);
806 
807 }
808 
809 
810 template < typename T >
811 GaussianProcess < T > ::GaussianProcess(ndarray::Array < T,2,2 > const &dataIn,
812  ndarray::Array < T,1,1 > const &ff,
813  std::shared_ptr < Covariogram < T > > const &covarIn)
814 
815 {
816  int i;
817 
818  _covariogram = covarIn;
819 
820  _npts = dataIn.template getSize < 0 > ();
821  _dimensions = dataIn.template getSize < 1 > ();
822 
823  _room = _npts;
824  _roomStep = 5000;
825 
826  _nFunctions = 1;
827  _function = allocate(ndarray::makeVector(_npts,1));
828 
829  if(ff.getNumElements() != _npts){
830  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
831  "You did not pass in the same number of data points as function values\n");
832  }
833 
834  for(i = 0; i < _npts; i++ ) _function[i][0] = ff[i];
835 
836  _krigingParameter = T(1.0);
837  _lambda = T(1.0e-5);
838 
839  _useMaxMin = 0;
840 
841 
842  _kdTree.Initialize(dataIn);
843 
844  _npts = _kdTree.getNPoints();
845 
846 }
847 
848 template < typename T >
849 GaussianProcess < T > ::GaussianProcess(ndarray::Array < T,2,2 > const &dataIn,
850  ndarray::Array < T,1,1 > const &mn,
851  ndarray::Array < T,1,1 > const &mx,
852  ndarray::Array < T,1,1 > const &ff,
853  std::shared_ptr < Covariogram < T > > const &covarIn
854  )
855 {
856 
857  int i,j;
858  ndarray::Array < T,2,2 > normalizedData;
859 
860  _covariogram = covarIn;
861 
862  _npts = dataIn.template getSize < 0 > ();
863  _dimensions = dataIn.template getSize < 1 > ();
864  _room = _npts;
865  _roomStep = 5000;
866 
867  if(ff.getNumElements() != _npts){
868  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
869  "You did not pass in the same number of data points as function values\n");
870  }
871 
872  if(mn.getNumElements() != _dimensions || mx.getNumElements() != _dimensions){
873  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
874  "Your min/max values have different dimensionality than your data points\n");
875  }
876 
877  _krigingParameter = T(1.0);
878 
879  _lambda = T(1.0e-5);
880  _krigingParameter = T(1.0);
881 
882  _max = allocate(ndarray::makeVector(_dimensions));
883  _min = allocate(ndarray::makeVector(_dimensions));
884  _max.deep() = mx;
885  _min.deep() = mn;
886  _useMaxMin = 1;
887  normalizedData = allocate(ndarray::makeVector(_npts, _dimensions));
888  for(i = 0; i < _npts; i++ ) {
889  for(j = 0; j < _dimensions; j++ ) {
890  normalizedData[i][j] = (dataIn[i][j] - _min[j])/(_max[j] - _min[j]);
891  //note the normalization by _max - _min in each dimension
892  }
893  }
894 
895  _kdTree.Initialize(normalizedData);
896 
897  _npts = _kdTree.getNPoints();
898  _nFunctions = 1;
899  _function = allocate(ndarray::makeVector(_npts, 1));
900  for(i = 0; i < _npts; i++ )_function[i][0] = ff[i];
901 }
902 
903 template < typename T >
904 GaussianProcess < T > ::GaussianProcess(ndarray::Array < T,2,2 > const &dataIn,
905  ndarray::Array < T,2,2 > const &ff,
906  std::shared_ptr <Covariogram < T > > const &covarIn)
907 {
908 
909  _covariogram = covarIn;
910 
911  _npts = dataIn.template getSize < 0 > ();
912  _dimensions = dataIn.template getSize < 1 > ();
913 
914  _room = _npts;
915  _roomStep = 5000;
916 
917  if(ff.template getSize < 0 > () != _npts){
918  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
919  "You did not pass in the same number of data points as function values\n");
920  }
921 
922  _nFunctions = ff.template getSize < 1 > ();
923  _function = allocate(ndarray::makeVector(_npts, _nFunctions));
924  _function.deep() = ff;
925 
926  _krigingParameter = T(1.0);
927 
928  _lambda = T(1.0e-5);
929 
930  _useMaxMin = 0;
931 
932  _kdTree.Initialize(dataIn);
933 
934  _npts = _kdTree.getNPoints();
935 }
936 
937 template < typename T >
938 GaussianProcess < T > ::GaussianProcess(ndarray::Array < T,2,2 > const &dataIn,
939  ndarray::Array < T,1,1 > const &mn,
940  ndarray::Array < T,1,1 > const &mx,
941  ndarray::Array < T,2,2 > const &ff,
942  std::shared_ptr <Covariogram < T > > const &covarIn
943  )
944 {
945 
946  int i,j;
947  ndarray::Array < T,2,2 > normalizedData;
948 
949  _covariogram = covarIn;
950 
951  _npts = dataIn.template getSize < 0 > ();
952  _dimensions = dataIn.template getSize < 1 > ();
953 
954  _room = _npts;
955  _roomStep = 5000;
956 
957  if(ff.template getSize < 0 > () != _npts){
958  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
959  "You did not pass in the same number of data points as function values\n");
960  }
961 
962  if(mn.getNumElements() != _dimensions || mx.getNumElements() != _dimensions){
963  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
964  "Your min/max values have different dimensionality than your data points\n");
965  }
966 
967  _krigingParameter = T(1.0);
968 
969  _lambda = T(1.0e-5);
970  _krigingParameter = T(1.0);
971 
972  _max = allocate(ndarray::makeVector(_dimensions));
973  _min = allocate(ndarray::makeVector(_dimensions));
974  _max.deep() = mx;
975  _min.deep() = mn;
976  _useMaxMin = 1;
977  normalizedData = allocate(ndarray::makeVector(_npts,_dimensions));
978  for(i = 0; i < _npts; i++ ) {
979  for(j = 0; j < _dimensions; j++ ) {
980  normalizedData[i][j] = (dataIn[i][j] - _min[j])/(_max[j] - _min[j]);
981  //note the normalization by _max - _min in each dimension
982  }
983  }
984 
985  _kdTree.Initialize(normalizedData);
986  _npts = _kdTree.getNPoints();
987  _nFunctions = ff.template getSize < 1 > ();
988  _function = allocate(ndarray::makeVector(_npts, _nFunctions));
989  _function.deep() = ff;
990 }
991 
992 
993 template < typename T >
994 int GaussianProcess < T > ::getNPoints() const{
995  return _npts;
996 }
997 
998 
999 template < typename T >
1000 int GaussianProcess < T > ::getDim() const{
1001  return _dimensions;
1002 }
1003 
1004 template < typename T >
1005 void GaussianProcess < T > ::getData(ndarray::Array <T, 2, 2> pts, ndarray::Array<T, 1, 1> fn,
1006  ndarray::Array <int, 1, 1 > indices) const{
1007 
1008  if(_nFunctions != 1){
1009  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1010  "Your function value array does not have enough room for all of the functions "
1011  "in your GaussianProcess.\n");
1012  }
1013 
1014  if(pts.template getSize < 1 > () != _dimensions){
1015  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1016  "Your pts array is constructed for points of the wrong dimensionality.\n");
1017  }
1018 
1019  if(pts.template getSize < 0 > () != indices.getNumElements()){
1020  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1021  "You did not put enough room in your pts array to fit all the points "
1022  "you asked for in your indices array.\n");
1023  }
1024 
1025  if(fn.template getSize < 0 > () != indices.getNumElements()){
1026  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1027  "You did not provide enough room in your function value array "
1028  "for all of the points you requested in your indices array.\n");
1029  }
1030 
1031  int i,j;
1032  for(i = 0; i < indices.template getSize < 0 > (); i++){
1033  pts[i] = _kdTree.getData(indices[i]); // do this first in case one of the indices is invalid.
1034  // _kdTree.getData() will raise an exception in that case
1035  fn[i] = _function[indices[i]][0];
1036  if(_useMaxMin == 1){
1037  for(j = 0; j < _dimensions; j ++){
1038  pts[i][j] *= (_max[j] - _min[j]);
1039  pts[i][j] += _min[j];
1040  }
1041  }
1042  }
1043 
1044 }
1045 
1046 
1047 template < typename T >
1048 void GaussianProcess < T > ::getData(ndarray::Array <T, 2, 2> pts, ndarray::Array<T, 2, 2> fn,
1049  ndarray::Array <int, 1, 1 > indices) const{
1050 
1051  if(fn.template getSize < 1 > () != _nFunctions){
1052  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1053  "Your function value array does not have enough room for all of the functions "
1054  "in your GaussianProcess.\n");
1055  }
1056 
1057  if(pts.template getSize < 1 > () != _dimensions){
1058  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1059  "Your pts array is constructed for points of the wrong dimensionality.\n");
1060  }
1061 
1062  if(pts.template getSize < 0 > () != indices.getNumElements()){
1063  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1064  "You did not put enough room in your pts array to fit all the points "
1065  "you asked for in your indices array.\n");
1066  }
1067 
1068  if(fn.template getSize < 0 > () != indices.getNumElements()){
1069  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1070  "You did not provide enough room in your function value array "
1071  "for all of the points you requested in your indices array.\n");
1072  }
1073 
1074  int i,j;
1075  for(i = 0; i < indices.template getSize < 0 > (); i++){
1076  pts[i] = _kdTree.getData(indices[i]); // do this first in case one of the indices is invalid.
1077  // _kdTree.getData() will raise an exception in that case
1078  for(j = 0; j < _nFunctions; j ++){
1079  fn[i][j] = _function[indices[i]][j];
1080  }
1081  if(_useMaxMin == 1){
1082  for(j = 0; j < _dimensions; j ++){
1083  pts[i][j] *= (_max[j] - _min[j]);
1084  pts[i][j] += _min[j];
1085  }
1086  }
1087  }
1088 
1089 }
1090 
1091 
1092 template < typename T >
1093 T GaussianProcess < T > ::interpolate(ndarray::Array < T,1,1 > variance,
1094  ndarray::Array < T,1,1 > const &vin,
1095  int numberOfNeighbors) const
1096 {
1097 
1098  if(_nFunctions > 1){
1099  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1100  "You need to call the version of GaussianProcess.interpolate() "
1101  "that accepts mu and variance arrays (which it populates with results). "
1102  "You are interpolating more than one function.");
1103  }
1104 
1105  if(numberOfNeighbors <= 0){
1106  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1107  "Asked for zero or negative number of neighbors\n");
1108  }
1109 
1110  if(numberOfNeighbors > _kdTree.getNPoints()){
1111  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1112  "Asked for more neighbors than you have data points\n");
1113  }
1114 
1115  if(variance.getNumElements() != _nFunctions){
1116  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1117  "Your variance array is the incorrect size for the number "
1118  "of functions you are trying to interpolate\n");
1119  }
1120 
1121  if(vin.getNumElements() != _dimensions){
1122  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1123  "You are interpolating at a point with different dimensionality than you data\n");
1124  }
1125 
1126  int i,j;
1127  T fbar,mu;
1128 
1129  ndarray::Array < T,1,1 > covarianceTestPoint;
1130  ndarray::Array < int,1,1 > neighbors;
1131  ndarray::Array < double,1,1 > neighborDistances,vv;
1132 
1133  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > covariance,bb,xx;
1134  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1135 
1136 
1137  _timer.start();
1138 
1139  bb.resize(numberOfNeighbors, 1);
1140  xx.resize(numberOfNeighbors, 1);
1141 
1142  covariance.resize(numberOfNeighbors,numberOfNeighbors);
1143 
1144  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1145 
1146  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1147 
1148  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1149 
1150  vv = allocate(ndarray::makeVector(_dimensions));
1151  if(_useMaxMin == 1){
1152  //if you constructed this Gaussian process with minimum and maximum
1153  //values for the dimensions of your parameter space,
1154  //the point you are interpolating must be scaled to match the data so
1155  //that the selected nearest neighbors are appropriate
1156 
1157  for(i = 0; i < _dimensions; i++ ) vv[i] = (vin[i] - _min[i])/(_max[i] - _min[i]);
1158  }
1159  else{
1160  vv = vin;
1161  }
1162 
1163 
1164  _kdTree.findNeighbors(neighbors, neighborDistances, vv, numberOfNeighbors);
1165 
1166  _timer.addToSearch();
1167 
1168  fbar = 0.0;
1169  for(i = 0; i < numberOfNeighbors; i++ )fbar += _function[neighbors[i]][0];
1170  fbar = fbar/double(numberOfNeighbors);
1171 
1172  for(i = 0; i < numberOfNeighbors; i++ ){
1173  covarianceTestPoint[i] = (*_covariogram)(vv, _kdTree.getData(neighbors[i]));
1174 
1175  covariance(i,i) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i]))
1176  + _lambda;
1177 
1178  for(j = i + 1; j < numberOfNeighbors; j++ ){
1179  covariance(i,j) = (*_covariogram)(_kdTree.getData(neighbors[i]),
1180  _kdTree.getData(neighbors[j]));
1181  covariance(j,i) = covariance(i, j);
1182  }
1183  }
1184 
1185  _timer.addToIteration();
1186 
1187  //use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1188  ldlt.compute(covariance);
1189 
1190  for(i = 0; i < numberOfNeighbors; i++) bb(i,0) = _function[neighbors[i]][0] - fbar;
1191 
1192  xx = ldlt.solve(bb);
1193  _timer.addToEigen();
1194 
1195  mu = fbar;
1196 
1197  for(i = 0; i < numberOfNeighbors; i++ ){
1198  mu += covarianceTestPoint[i]*xx(i, 0);
1199  }
1200 
1201  _timer.addToIteration();
1202 
1203  variance(0) = (*_covariogram)(vv, vv) + _lambda;
1204 
1205  for(i = 0; i < numberOfNeighbors; i++ ) bb(i) = covarianceTestPoint[i];
1206 
1207  xx = ldlt.solve(bb);
1208 
1209  for(i = 0; i < numberOfNeighbors; i++ ){
1210  variance(0) -= covarianceTestPoint[i]*xx(i, 0);
1211  }
1212 
1213  variance(0) = variance(0)*_krigingParameter;
1214 
1215  _timer.addToVariance();
1216  _timer.addToTotal(1);
1217 
1218  return mu;
1219 }
1220 
1221 template < typename T >
1222 void GaussianProcess < T > ::interpolate(ndarray::Array < T,1,1 > mu,
1223  ndarray::Array < T,1,1 > variance,
1224  ndarray::Array < T,1,1 > const &vin,
1225  int numberOfNeighbors) const
1226 {
1227 
1228 
1229  if(numberOfNeighbors <= 0){
1230  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1231  "Asked for zero or negative number of neighbors\n");
1232  }
1233 
1234  if(numberOfNeighbors > _kdTree.getNPoints()){
1235  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1236  "Asked for more neighbors than you have data points\n");
1237  }
1238 
1239  if(vin.getNumElements() != _dimensions){
1240  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1241  "You are interpolating at a point with different dimensionality than you data\n");
1242  }
1243 
1244  if(mu.getNumElements() != _nFunctions || variance.getNumElements() != _nFunctions){
1245  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1246  "Your mu and/or var arrays are improperly sized for the number of functions "
1247  "you are interpolating\n");
1248  }
1249 
1250  int i,j,ii;
1251  T fbar;
1252 
1253  ndarray::Array < T,1,1 > covarianceTestPoint;
1254  ndarray::Array < int,1,1 > neighbors;
1255  ndarray::Array < double,1,1 > neighborDistances,vv;
1256 
1257  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > covariance,bb,xx;
1258  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1259 
1260  _timer.start();
1261 
1262 
1263  bb.resize(numberOfNeighbors,1);
1264  xx.resize(numberOfNeighbors,1);
1265  covariance.resize(numberOfNeighbors,numberOfNeighbors);
1266  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1267  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1268  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1269 
1270  vv = allocate(ndarray::makeVector(_dimensions));
1271 
1272 
1273  if(_useMaxMin == 1) {
1274  //if you constructed this Gaussian process with minimum and maximum
1275  //values for the dimensions of your parameter space,
1276  //the point you are interpolating must be scaled to match the data so
1277  //that the selected nearest neighbors are appropriate
1278 
1279  for(i = 0; i < _dimensions; i++ )vv[i] = (vin[i] - _min[i])/(_max[i] - _min[i]);
1280  }
1281  else {
1282  vv = vin;
1283  }
1284 
1285 
1286  _kdTree.findNeighbors(neighbors, neighborDistances, vv, numberOfNeighbors);
1287 
1288  _timer.addToSearch();
1289 
1290  for(i = 0; i < numberOfNeighbors; i++ ) {
1291  covarianceTestPoint[i] = (*_covariogram)(vv, _kdTree.getData(neighbors[i]));
1292 
1293  covariance(i,i) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i]))
1294  + _lambda;
1295 
1296  for(j = i + 1; j < numberOfNeighbors; j++ ){
1297  covariance(i,j) = (*_covariogram)(_kdTree.getData(neighbors[i]),
1298  _kdTree.getData(neighbors[j]));
1299  covariance(j,i) = covariance(i, j);
1300  }
1301  }
1302 
1303  _timer.addToIteration();
1304 
1305  //use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1306  ldlt.compute(covariance);
1307 
1308  for(ii = 0; ii < _nFunctions; ii++ ) {
1309 
1310  fbar = 0.0;
1311  for(i = 0; i < numberOfNeighbors; i++ )fbar += _function[neighbors[i]][ii];
1312  fbar = fbar/double(numberOfNeighbors);
1313 
1314  for(i = 0; i < numberOfNeighbors; i++ )bb(i,0) = _function[neighbors[i]][ii] - fbar;
1315  xx = ldlt.solve(bb);
1316 
1317  mu[ii] = fbar;
1318 
1319  for(i = 0; i < numberOfNeighbors; i++ ) {
1320  mu[ii] += covarianceTestPoint[i]*xx(i, 0);
1321  }
1322 
1323  }//ii = 0 through _nFunctions
1324 
1325  _timer.addToEigen();
1326 
1327  variance[0] = (*_covariogram)(vv, vv) + _lambda;
1328 
1329  for(i = 0; i < numberOfNeighbors; i++ )bb(i) = covarianceTestPoint[i];
1330 
1331  xx = ldlt.solve(bb);
1332 
1333  for(i = 0; i < numberOfNeighbors; i++ ) {
1334  variance[0] -= covarianceTestPoint[i]*xx(i, 0);
1335  }
1336  variance[0] = variance[0]*_krigingParameter;
1337 
1338 
1339  for(i = 1; i < _nFunctions; i++ )variance[i] = variance[0];
1340 
1341  _timer.addToVariance();
1342  _timer.addToTotal(1);
1343 }
1344 
1345 
1346 template < typename T >
1347 T GaussianProcess < T > ::selfInterpolate(ndarray::Array < T,1,1 > variance,
1348  int dex,
1349  int numberOfNeighbors) const
1350 {
1351 
1352  if(_nFunctions > 1){
1353  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1354  "You need to call the version of GaussianProcess.selfInterpolate() "
1355  "that accepts mu and variance arrays (which it populates with results). "
1356  "You are interpolating more than one function.");
1357  }
1358 
1359  if(variance.getNumElements() != _nFunctions){
1360  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1361  "Your variance array is the incorrect size for the number "
1362  "of functions you are trying to interpolate\n");
1363  }
1364 
1365  if(numberOfNeighbors <= 0){
1366  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1367  "Asked for zero or negative number of neighbors\n");
1368  }
1369 
1370  if(numberOfNeighbors + 1 > _kdTree.getNPoints()){
1371  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1372  "Asked for more neighbors than you have data points\n");
1373  }
1374 
1375  if(dex < 0 || dex >=_kdTree.getNPoints()){
1376  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1377  "Asked to self interpolate on a point that does not exist\n");
1378  }
1379 
1380  int i,j;
1381  T fbar,mu;
1382 
1383  ndarray::Array < T,1,1 > covarianceTestPoint;
1384  ndarray::Array < int,1,1 > selfNeighbors;
1385  ndarray::Array < double,1,1 > selfDistances;
1386  ndarray::Array < int,1,1 > neighbors;
1387  ndarray::Array < double,1,1 > neighborDistances;
1388 
1389  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > covariance,bb,xx;
1390  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1391 
1392  _timer.start();
1393 
1394  bb.resize(numberOfNeighbors, 1);
1395  xx.resize(numberOfNeighbors, 1);
1396  covariance.resize(numberOfNeighbors,numberOfNeighbors);
1397  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1398  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1399  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1400 
1401  selfNeighbors = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1402  selfDistances = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1403 
1404  //we don't use _useMaxMin because the data has already been normalized
1405 
1406 
1407  _kdTree.findNeighbors(selfNeighbors, selfDistances, _kdTree.getData(dex),
1408  numberOfNeighbors + 1);
1409 
1410  _timer.addToSearch();
1411 
1412  if(selfNeighbors[0]!= dex) {
1413  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1414  "Nearest neighbor search in selfInterpolate did not find self\n");
1415  }
1416 
1417  //SelfNeighbors[0] will be the point itself (it is its own nearest neighbor)
1418  //We discard that for the interpolation calculation
1419  //
1420  //If you do not wish to do this, simply call the usual ::interpolate() method instead of
1421  //::selfInterpolate()
1422  for(i = 0; i < numberOfNeighbors; i++ ){
1423  neighbors[i] = selfNeighbors[i + 1];
1424  neighborDistances[i] = selfDistances[i + 1];
1425  }
1426 
1427  fbar = 0.0;
1428  for(i = 0; i < numberOfNeighbors; i++ ) fbar += _function[neighbors[i]][0];
1429  fbar = fbar/double(numberOfNeighbors);
1430 
1431  for(i = 0; i < numberOfNeighbors; i++ ){
1432  covarianceTestPoint[i] = (*_covariogram)(_kdTree.getData(dex),
1433  _kdTree.getData(neighbors[i]));
1434 
1435  covariance(i, i) = (*_covariogram)(_kdTree.getData(neighbors[i]),
1436  _kdTree.getData(neighbors[i])) + _lambda;
1437 
1438  for(j = i + 1; j < numberOfNeighbors; j++ ) {
1439  covariance(i, j) = (*_covariogram)(_kdTree.getData(neighbors[i]),
1440  _kdTree.getData(neighbors[j]));
1441  covariance(j, i) = covariance(i ,j);
1442  }
1443  }
1444  _timer.addToIteration();
1445 
1446  //use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1447  ldlt.compute(covariance);
1448 
1449 
1450  for(i = 0; i < numberOfNeighbors;i++ ) bb(i, 0) = _function[neighbors[i]][0] - fbar;
1451  xx = ldlt.solve(bb);
1452  _timer.addToEigen();
1453 
1454  mu = fbar;
1455 
1456  for(i = 0; i < numberOfNeighbors; i++ ) {
1457  mu += covarianceTestPoint[i]*xx(i, 0);
1458  }
1459 
1460 
1461  variance(0) = (*_covariogram)(_kdTree.getData(dex), _kdTree.getData(dex)) + _lambda;
1462 
1463  for(i = 0; i < numberOfNeighbors; i++ )bb(i) = covarianceTestPoint[i];
1464 
1465  xx = ldlt.solve(bb);
1466 
1467  for(i = 0; i < numberOfNeighbors; i++ ){
1468  variance(0) -= covarianceTestPoint[i]*xx(i,0);
1469  }
1470 
1471  variance(0) = variance(0)*_krigingParameter;
1472  _timer.addToVariance();
1473  _timer.addToTotal(1);
1474 
1475  return mu;
1476 }
1477 
1478 template < typename T >
1479 void GaussianProcess < T > ::selfInterpolate(ndarray::Array < T,1,1 > mu,
1480  ndarray::Array < T,1,1 > variance,
1481  int dex,
1482  int numberOfNeighbors) const{
1483 
1484  if(mu.getNumElements() != _nFunctions || variance.getNumElements() != _nFunctions){
1485  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1486  "Your mu and/or var arrays are improperly sized for the number of functions "
1487  "you are interpolating\n");
1488  }
1489 
1490  if(numberOfNeighbors <= 0){
1491  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1492  "Asked for zero or negative number of neighbors\n");
1493  }
1494 
1495  if(numberOfNeighbors + 1 > _kdTree.getNPoints()){
1496  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1497  "Asked for more neighbors than you have data points\n");
1498  }
1499 
1500  if(dex < 0 || dex >=_kdTree.getNPoints()){
1501  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1502  "Asked to self interpolate on a point that does not exist\n");
1503  }
1504 
1505  int i,j,ii;
1506  T fbar;
1507 
1508  ndarray::Array < T,1,1 > covarianceTestPoint;
1509  ndarray::Array < int,1,1 > selfNeighbors;
1510  ndarray::Array < double,1,1 > selfDistances;
1511  ndarray::Array < int,1,1 > neighbors;
1512  ndarray::Array < double,1,1 > neighborDistances;
1513 
1514  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > covariance,bb,xx;
1515  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1516 
1517  _timer.start();
1518 
1519  bb.resize(numberOfNeighbors, 1);
1520  xx.resize(numberOfNeighbors, 1);
1521  covariance.resize(numberOfNeighbors,numberOfNeighbors);
1522  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1523  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1524  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1525 
1526  selfNeighbors = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1527  selfDistances = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1528 
1529  //we don't use _useMaxMin because the data has already been normalized
1530 
1531 
1532  _kdTree.findNeighbors(selfNeighbors, selfDistances, _kdTree.getData(dex),
1533  numberOfNeighbors + 1);
1534 
1535  _timer.addToSearch();
1536 
1537  if(selfNeighbors[0]!= dex) {
1538 
1539  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1540  "Nearest neighbor search in selfInterpolate did not find self\n");
1541  }
1542 
1543  //SelfNeighbors[0] will be the point itself (it is its own nearest neighbor)
1544  //We discard that for the interpolation calculation
1545  //
1546  //If you do not wish to do this, simply call the usual ::interpolate() method instead of
1547  //::selfInterpolate()
1548  for(i = 0; i < numberOfNeighbors; i++ ) {
1549  neighbors[i] = selfNeighbors[i + 1];
1550  neighborDistances[i] = selfDistances[i + 1];
1551  }
1552 
1553 
1554 
1555  for(i = 0; i < numberOfNeighbors; i++ ) {
1556  covarianceTestPoint[i] = (*_covariogram)(_kdTree.getData(dex),_kdTree.getData(neighbors[i]));
1557 
1558  covariance(i, i) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i]))
1559  + _lambda;
1560 
1561  for(j = i + 1; j < numberOfNeighbors; j++ ) {
1562  covariance(i, j) = (*_covariogram)(_kdTree.getData(neighbors[i]),
1563  _kdTree.getData(neighbors[j]));
1564  covariance(j, i) = covariance(i, j);
1565  }
1566  }
1567  _timer.addToIteration();
1568 
1569 
1570  //use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1571  ldlt.compute(covariance);
1572 
1573  for(ii = 0; ii < _nFunctions; ii++ ) {
1574 
1575  fbar = 0.0;
1576  for(i = 0; i < numberOfNeighbors; i++ )fbar += _function[neighbors[i]][ii];
1577  fbar = fbar/double(numberOfNeighbors);
1578 
1579  for(i = 0; i < numberOfNeighbors; i++ )bb(i,0) = _function[neighbors[i]][ii] - fbar;
1580  xx = ldlt.solve(bb);
1581 
1582  mu[ii] = fbar;
1583 
1584  for(i = 0; i < numberOfNeighbors; i++ ){
1585  mu[ii] += covarianceTestPoint[i]*xx(i,0);
1586  }
1587  }//ii = 0 through _nFunctions
1588 
1589  _timer.addToEigen();
1590 
1591  variance[0] = (*_covariogram)(_kdTree.getData(dex), _kdTree.getData(dex)) + _lambda;
1592 
1593  for(i = 0; i < numberOfNeighbors; i++ )bb(i) = covarianceTestPoint[i];
1594 
1595  xx = ldlt.solve(bb);
1596 
1597  for(i = 0; i < numberOfNeighbors; i++ ) {
1598  variance[0] -= covarianceTestPoint[i]*xx(i,0);
1599  }
1600 
1601  variance[0] = variance[0]*_krigingParameter;
1602 
1603  for(i = 1; i < _nFunctions; i++ )variance[i] = variance[0];
1604 
1605  _timer.addToVariance();
1606  _timer.addToTotal(1);
1607 }
1608 
1609 
1610 template < typename T >
1611 void GaussianProcess < T > ::batchInterpolate(ndarray::Array < T,1,1 > mu,
1612  ndarray:: Array < T,1,1 > variance,
1613  ndarray::Array < T,2,2 > const &queries) const
1614 {
1615 
1616  int i,j,ii,nQueries;
1617 
1618  nQueries = queries.template getSize < 0 > ();
1619 
1620  if(_nFunctions != 1){
1621  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1622  "Your mu and variance arrays do not have room for all of the functions "
1623  "as you are trying to interpolate\n");
1624  }
1625 
1626  if(mu.getNumElements() != nQueries || variance.getNumElements() != nQueries){
1627  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1628  "Your mu and variance arrays do not have room for all of the points "
1629  "at which you are trying to interpolate your function.\n");
1630  }
1631 
1632  if(queries.template getSize < 1 > () != _dimensions){
1633  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1634  "The points you passed to batchInterpolate are of the wrong "
1635  "dimensionality for your Gaussian Process\n");
1636  }
1637 
1638  T fbar;
1639  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > batchCovariance,batchbb,batchxx;
1640  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > queryCovariance;
1641  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1642 
1643  ndarray::Array < T,1,1 > v1;
1644 
1645  _timer.start();
1646 
1647 
1648  v1 = allocate(ndarray::makeVector(_dimensions));
1649  batchbb.resize(_npts, 1);
1650  batchxx.resize(_npts, 1);
1651  batchCovariance.resize(_npts, _npts);
1652  queryCovariance.resize(_npts, 1);
1653 
1654 
1655  for(i = 0; i < _npts; i++ ) {
1656 
1657  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1658  for(j = i + 1; j < _npts; j++ ) {
1659  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1660  batchCovariance(j, i) = batchCovariance(i, j);
1661  }
1662  }
1663  _timer.addToIteration();
1664 
1665  ldlt.compute(batchCovariance);
1666 
1667  fbar = 0.0;
1668  for(i = 0; i < _npts; i++ ) {
1669  fbar += _function[i][0];
1670  }
1671  fbar = fbar/T(_npts);
1672 
1673  for(i = 0; i < _npts; i++ ){
1674  batchbb(i, 0) = _function[i][0] - fbar;
1675  }
1676  batchxx = ldlt.solve(batchbb);
1677  _timer.addToEigen();
1678 
1679 
1680  for(ii = 0; ii < nQueries; ii++ ) {
1681  for(i = 0; i < _dimensions; i++ )v1[i] = queries[ii][i];
1682  if(_useMaxMin == 1) {
1683  for(i = 0; i < _dimensions; i++ )v1[i] = (v1[i] - _min[i])/(_max[i] - _min[i]);
1684  }
1685  mu(ii) = fbar;
1686  for(i = 0; i < _npts; i++ ){
1687  mu(ii) += batchxx(i)*(*_covariogram)(v1, _kdTree.getData(i));
1688  }
1689  }
1690  _timer.addToIteration();
1691 
1692  for(ii = 0; ii < nQueries; ii++ ) {
1693  for(i = 0; i < _dimensions; i++ )v1[i] = queries[ii][i];
1694  if(_useMaxMin == 1) {
1695  for(i = 0; i < _dimensions; i++ )v1[i] = (v1[i] - _min[i])/(_max[i] - _min[i]);
1696  }
1697 
1698  for(i = 0; i < _npts; i++ ) {
1699  batchbb(i, 0) = (*_covariogram)(v1, _kdTree.getData(i));
1700  queryCovariance(i, 0) = batchbb(i, 0);
1701  }
1702  batchxx = ldlt.solve(batchbb);
1703 
1704  variance[ii] = (*_covariogram)(v1, v1) + _lambda;
1705 
1706  for(i = 0; i < _npts; i++ ){
1707  variance[ii] -= queryCovariance(i, 0)*batchxx(i);
1708  }
1709 
1710  variance[ii] = variance[ii]*_krigingParameter;
1711 
1712  }
1713 
1714  _timer.addToVariance();
1715  _timer.addToTotal(nQueries);
1716 }
1717 
1718 template < typename T >
1719 void GaussianProcess < T > ::batchInterpolate(ndarray::Array < T,2,2 > mu,
1720  ndarray:: Array < T,2,2 > variance,
1721  ndarray::Array < T,2,2 > const &queries) const
1722 {
1723 
1724  int i,j,ii,nQueries,ifn;
1725 
1726  nQueries = queries.template getSize < 0 > ();
1727 
1728  if(mu.template getSize < 0 > () != nQueries || variance.template getSize < 0 > () != nQueries){
1729  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1730  "Your output arrays do not have room for all of the points at which "
1731  "you are interpolating your functions.\n");
1732  }
1733 
1734  if(mu.template getSize < 1 > () != _nFunctions || variance.template getSize < 1 > () != _nFunctions){
1735  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1736  "Your output arrays do not have room for all of the functions you are "
1737  "interpolating\n");
1738  }
1739 
1740  if(queries.template getSize < 1 > () != _dimensions){
1741  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1742  "The points at which you are interpolating your functions have the "
1743  "wrong dimensionality.\n");
1744  }
1745 
1746  T fbar;
1747  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > batchCovariance,batchbb,batchxx;
1748  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > queryCovariance;
1749  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1750 
1751  ndarray::Array < T,1,1 > v1;
1752 
1753  _timer.start();
1754 
1755  v1 = allocate(ndarray::makeVector(_dimensions));
1756  batchbb.resize(_npts, 1);
1757  batchxx.resize(_npts, 1);
1758  batchCovariance.resize(_npts, _npts);
1759  queryCovariance.resize(_npts, 1);
1760 
1761  for(i = 0; i < _npts; i++ ){
1762  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1763  for(j = i + 1; j < _npts; j++ ) {
1764  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1765  batchCovariance(j, i) = batchCovariance(i, j);
1766  }
1767  }
1768 
1769  _timer.addToIteration();
1770 
1771  ldlt.compute(batchCovariance);
1772 
1773  _timer.addToEigen();
1774 
1775  for(ifn = 0; ifn < _nFunctions; ifn++ ) {
1776 
1777  fbar = 0.0;
1778  for(i = 0; i < _npts; i++ ){
1779  fbar += _function[i][ifn];
1780  }
1781  fbar = fbar/T(_npts);
1782  _timer.addToIteration();
1783 
1784  for(i = 0; i < _npts; i++ ){
1785  batchbb(i,0) = _function[i][ifn] - fbar;
1786  }
1787  batchxx = ldlt.solve(batchbb);
1788  _timer.addToEigen();
1789 
1790 
1791  for(ii = 0; ii < nQueries; ii++ ){
1792  for(i = 0; i < _dimensions; i++ ) v1[i] = queries[ii][i];
1793  if(_useMaxMin == 1) {
1794  for(i = 0; i < _dimensions; i++ ) v1[i] = (v1[i] - _min[i])/(_max[i] - _min[i]);
1795  }
1796  mu[ii][ifn] = fbar;
1797  for(i = 0; i < _npts; i++ ){
1798  mu[ii][ifn] += batchxx(i)*(*_covariogram)(v1, _kdTree.getData(i));
1799  }
1800  }
1801 
1802  }//ifn = 0 to _nFunctions
1803 
1804  _timer.addToIteration();
1805  for(ii = 0; ii < nQueries; ii++ ){
1806  for(i = 0; i < _dimensions; i++ ) v1[i] = queries[ii][i];
1807  if(_useMaxMin == 1){
1808  for(i = 0; i < _dimensions; i++ ) v1[i] = (v1[i] - _min[i])/(_max[i] - _min[i]);
1809  }
1810 
1811  for(i = 0;i < _npts;i++ ) {
1812  batchbb(i,0) = (*_covariogram)(v1,_kdTree.getData(i));
1813  queryCovariance(i,0) = batchbb(i,0);
1814  }
1815  batchxx = ldlt.solve(batchbb);
1816 
1817  variance[ii][0] = (*_covariogram)(v1, v1) + _lambda;
1818 
1819  for(i = 0; i < _npts; i++ ) {
1820  variance[ii][0] -= queryCovariance(i, 0)*batchxx(i);
1821  }
1822 
1823  variance[ii][0] = variance[ii][0]*_krigingParameter;
1824  for(i = 1; i < _nFunctions; i++ ) variance[ii][i] = variance[ii][0];
1825 
1826  }
1827  _timer.addToVariance();
1828  _timer.addToTotal(nQueries);
1829 }
1830 
1831 template < typename T >
1832 void GaussianProcess < T > ::batchInterpolate(ndarray::Array < T,1,1 > mu,
1833  ndarray::Array < T,2,2 > const &queries) const
1834 {
1835 
1836  int i,j,ii,nQueries;
1837 
1838  nQueries = queries.template getSize < 0 > ();
1839 
1840  if(_nFunctions != 1){
1841  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1842  "Your output array does not have enough room for all of the functions "
1843  "you are trying to interpolate.\n");
1844  }
1845 
1846  if(queries.template getSize <1> () != _dimensions){
1847  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1848  "The points at which you are trying to interpolate your function are "
1849  "of the wrong dimensionality.\n");
1850  }
1851 
1852  if(mu.getNumElements() != nQueries){
1853  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1854  "Your output array does not have enough room for all of the points "
1855  "at which you are trying to interpolate your function.\n");
1856  }
1857 
1858  T fbar;
1859  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > batchCovariance,batchbb,batchxx;
1860  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > queryCovariance;
1861  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1862 
1863  ndarray::Array < T,1,1 > v1;
1864 
1865  _timer.start();
1866 
1867  v1 = allocate(ndarray::makeVector(_dimensions));
1868 
1869  batchbb.resize(_npts, 1);
1870  batchxx.resize(_npts, 1);
1871  batchCovariance.resize(_npts, _npts);
1872  queryCovariance.resize(_npts, 1);
1873 
1874 
1875  for(i = 0; i < _npts; i++ ) {
1876  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1877  for(j = i + 1; j < _npts; j++ ) {
1878  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1879  batchCovariance(j, i) = batchCovariance(i, j);
1880  }
1881  }
1882  _timer.addToIteration();
1883 
1884  ldlt.compute(batchCovariance);
1885 
1886  fbar = 0.0;
1887  for(i = 0; i < _npts; i++ ) {
1888  fbar += _function[i][0];
1889  }
1890  fbar = fbar/T(_npts);
1891 
1892  for(i = 0; i < _npts; i++ ) {
1893  batchbb(i, 0) = _function[i][0] - fbar;
1894  }
1895  batchxx = ldlt.solve(batchbb);
1896  _timer.addToEigen();
1897 
1898  for(ii = 0; ii < nQueries; ii++ ) {
1899 
1900  for(i = 0; i < _dimensions; i++ ) v1[i] = queries[ii][i];
1901  if(_useMaxMin == 1) {
1902  for(i = 0; i < _dimensions; i++ )v1[i] = (v1[i] - _min[i])/(_max[i] - _min[i]);
1903  }
1904 
1905  mu(ii) = fbar;
1906  for(i = 0; i < _npts; i++ ) {
1907  mu(ii) += batchxx(i)*(*_covariogram)(v1, _kdTree.getData(i));
1908  }
1909  }
1910  _timer.addToIteration();
1911  _timer.addToTotal(nQueries);
1912 
1913 }
1914 
1915 template < typename T >
1916 void GaussianProcess < T > ::batchInterpolate(ndarray::Array < T,2,2 > mu,
1917  ndarray::Array < T,2,2 > const &queries) const
1918 {
1919 
1920  int i,j,ii,nQueries,ifn;
1921 
1922  nQueries = queries.template getSize < 0 > ();
1923 
1924  if(mu.template getSize < 0 > () != nQueries){
1925  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1926  "Your output array does not have enough room for all of the points "
1927  "at which you want to interpolate your functions.\n");
1928  }
1929 
1930  if(mu.template getSize < 1 > () != _nFunctions){
1931  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1932  "Your output array does not have enough room for all of the functions "
1933  "you are trying to interpolate.\n");
1934  }
1935 
1936  if(queries.template getSize < 1 > () != _dimensions){
1937  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
1938  "The points at which you are interpolating your functions do not "
1939  "have the correct dimensionality.\n");
1940  }
1941 
1942  T fbar;
1943  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > batchCovariance,batchbb,batchxx;
1944  Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > queryCovariance;
1945  Eigen::LDLT < Eigen::Matrix < T,Eigen::Dynamic,Eigen::Dynamic > > ldlt;
1946 
1947  ndarray::Array < T,1,1 > v1;
1948 
1949  _timer.start();
1950 
1951  v1 = allocate(ndarray::makeVector(_dimensions));
1952 
1953  batchbb.resize(_npts, 1);
1954  batchxx.resize(_npts, 1);
1955  batchCovariance.resize(_npts, _npts);
1956  queryCovariance.resize(_npts, 1);
1957 
1958 
1959  for(i = 0; i < _npts; i++ ){
1960  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1961  for(j = i + 1; j < _npts; j++ ){
1962  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1963  batchCovariance(j, i) = batchCovariance(i, j);
1964  }
1965  }
1966 
1967  _timer.addToIteration();
1968 
1969  ldlt.compute(batchCovariance);
1970 
1971  _timer.addToEigen();
1972 
1973  for(ifn = 0; ifn < _nFunctions; ifn++ ){
1974 
1975  fbar = 0.0;
1976  for(i = 0; i < _npts; i++ ){
1977  fbar += _function[i][ifn];
1978  }
1979  fbar = fbar/T(_npts);
1980 
1981  _timer.addToIteration();
1982 
1983  for(i = 0; i < _npts; i++ ){
1984  batchbb(i, 0) = _function[i][ifn] - fbar;
1985  }
1986  batchxx = ldlt.solve(batchbb);
1987  _timer.addToEigen();
1988 
1989 
1990  for(ii = 0; ii < nQueries; ii++ ){
1991  for(i = 0; i < _dimensions; i++ )v1[i] = queries[ii][i];
1992  if(_useMaxMin == 1){
1993  for(i = 0;i < _dimensions;i++ )v1[i] = (v1[i] - _min[i])/(_max[i] - _min[i]);
1994  }
1995 
1996  mu[ii][ifn] = fbar;
1997  for(i = 0; i < _npts; i++ ){
1998  mu[ii][ifn] += batchxx(i)*(*_covariogram)(v1, _kdTree.getData(i));
1999  }
2000  }
2001 
2002 
2003  }//ifn = 0 through _nFunctions
2004 
2005  _timer.addToTotal(nQueries);
2006 
2007 }
2008 
2009 
2010 template < typename T >
2011 void GaussianProcess < T > ::addPoint(ndarray::Array < T,1,1 > const &vin, T f)
2012 {
2013 
2014  int i,j;
2015 
2016  if(_nFunctions!= 1){
2017 
2018  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
2019  "You are calling the wrong addPoint; you need a vector of functions\n");
2020 
2021  }
2022 
2023  if(vin.getNumElements() != _dimensions){
2024  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
2025  "You are trying to add a point of the wrong dimensionality to "
2026  "your GaussianProcess.\n");
2027  }
2028 
2029  ndarray::Array < T,1,1 > v;
2030  v = allocate(ndarray::makeVector(_dimensions));
2031 
2032  for(i = 0; i < _dimensions; i++ ){
2033  v[i] = vin[i];
2034  if(_useMaxMin == 1){
2035  v[i] = (v[i] - _min[i])/(_max[i] - _min[i]);
2036  }
2037  }
2038 
2039  if(_npts == _room){
2040  ndarray::Array < T,2,2 > buff;
2041  buff = allocate(ndarray::makeVector(_npts, _nFunctions));
2042  buff.deep() = _function;
2043 
2044  _room += _roomStep;
2045  _function = allocate(ndarray::makeVector(_room, _nFunctions));
2046  for(i = 0; i < _npts; i++ ) {
2047 
2048  for(j = 0; j < _nFunctions; j++ ) {
2049  _function[i][j] = buff[i][j];
2050  }
2051 
2052  }
2053  }
2054 
2055  _function[_npts][0] = f;
2056 
2057  _kdTree.addPoint(v);
2058  _npts = _kdTree.getNPoints();
2059 
2060 }
2061 
2062 template < typename T >
2063 void GaussianProcess < T > ::addPoint(ndarray::Array < T,1,1 > const &vin,
2064  ndarray::Array < T,1,1 > const &f)
2065 {
2066 
2067  if(vin.getNumElements() != _dimensions){
2068  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
2069  "You are trying to add a point of the wrong dimensionality to "
2070  "your GaussianProcess.\n");
2071  }
2072 
2073  if(f.template getSize < 0 > () != _nFunctions){
2074  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError,
2075  "You are not adding the correct number of function values to "
2076  "your GaussianProcess.\n");
2077  }
2078 
2079  int i,j;
2080 
2081  ndarray::Array < T,1,1 > v;
2082  v = allocate(ndarray::makeVector(_dimensions));
2083 
2084  for(i = 0; i < _dimensions; i++ ) {
2085  v[i] = vin[i];
2086  if(_useMaxMin == 1) {
2087  v[i] = (v[i] - _min[i])/(_max[i] - _min[i]);
2088  }
2089  }
2090 
2091  if(_npts == _room) {
2092  ndarray::Array < T,2,2 > buff;
2093  buff = allocate(ndarray::makeVector(_npts, _nFunctions));
2094  buff.deep() = _function;
2095 
2096  _room += _roomStep;
2097  _function = allocate(ndarray::makeVector(_room, _nFunctions));
2098  for(i = 0; i < _npts; i++ ) {
2099  for(j = 0; j < _nFunctions; j++ ) {
2100  _function[i][j] = buff[i][j];
2101  }
2102  }
2103 
2104  }
2105  for(i = 0; i < _nFunctions; i++ )_function[_npts][i] = f[i];
2106 
2107  _kdTree.addPoint(v);
2108  _npts = _kdTree.getNPoints();
2109 
2110 
2111 }
2112 
2113 template < typename T >
2114 void GaussianProcess < T > ::removePoint(int dex)
2115 {
2116 
2117  int i,j;
2118 
2119  _kdTree.removePoint(dex);
2120 
2121  for(i = dex; i < _npts; i++ ) {
2122  for(j = 0; j < _nFunctions; j++ ) {
2123  _function[i][j] = _function[i + 1][j];
2124  }
2125  }
2126  _npts = _kdTree.getNPoints();
2127 }
2128 
2129 template < typename T >
2130 void GaussianProcess < T > ::setKrigingParameter(T kk)
2131 {
2132  _krigingParameter = kk;
2133 }
2134 
2135 template < typename T >
2136 void GaussianProcess < T > ::setCovariogram(std::shared_ptr < Covariogram < T > > const &covar){
2137  _covariogram = covar;
2138 }
2139 
2140 template < typename T >
2141 void GaussianProcess < T > ::setLambda(T lambda){
2142  _lambda = lambda;
2143 }
2144 
2145 
2146 template < typename T >
2148 {
2149  return _timer;
2150 }
2151 
2152 template < typename T >
2153 Covariogram < T > ::~Covariogram(){};
2154 
2155 template < typename T >
2156 T Covariogram < T > ::operator()(ndarray::Array < const T,1,1 > const &p1,
2157  ndarray::Array < const T,1,1 > const &p2) const
2158 {
2159  std::cout << "by the way, you are calling the wrong operator\n";
2160  exit(1);
2161  return T(1.0);
2162 }
2163 
2164 template < typename T >
2165 SquaredExpCovariogram < T > ::~SquaredExpCovariogram(){}
2166 
2167 template < typename T >
2169 {
2170  _ellSquared = 1.0;
2171 }
2172 
2173 template < typename T >
2174 void SquaredExpCovariogram < T > ::setEllSquared(double ellSquared)
2175 {
2176  _ellSquared = ellSquared;
2177 }
2178 
2179 template < typename T >
2180 T SquaredExpCovariogram < T > ::operator()(ndarray::Array < const T,1,1 > const &p1,
2181  ndarray::Array < const T,1,1 > const &p2) const
2182 {
2183  int i;
2184  T d;
2185  d = 0.0;
2186  for(i = 0; i < p1.template getSize < 0 > (); i++ ){
2187  d += (p1[i] - p2[i])*(p1[i] - p2[i]);
2188  }
2189 
2190  d = d/_ellSquared;
2191  return T(exp( - 0.5*d));
2192 }
2193 
2194 template < typename T >
2195 NeuralNetCovariogram < T > ::~NeuralNetCovariogram(){}
2196 
2197 template < typename T >
2199 
2200  _sigma0 = 1.0;
2201  _sigma1 = 1.0;
2202 }
2203 
2204 template < typename T >
2205 T NeuralNetCovariogram < T > ::operator()(ndarray::Array < const T,1,1 > const &p1,
2206  ndarray::Array < const T,1,1 > const &p2
2207  ) const
2208 {
2209  int i,dim;
2210  double num,denom1,denom2,arg;
2211 
2212  dim = p1.template getSize < 0 > ();
2213 
2214  num = 2.0*_sigma0;
2215  denom1 = 1.0 + 2.0*_sigma0;
2216  denom2 = 1.0 + 2.0*_sigma0;
2217 
2218  for(i = 0; i < dim; i++ ) {
2219  num += 2.0*p1[i]*p2[i]*_sigma1;
2220  denom1 += 2.0*p1[i]*p1[i]*_sigma1;
2221  denom2 += 2.0*p2[i]*p2[i]*_sigma1;
2222  }
2223 
2224  arg = num/::sqrt(denom1*denom2);
2225  return T(2.0*(::asin(arg))/3.141592654);
2226 
2227 }
2228 
2229 template < typename T >
2230 void NeuralNetCovariogram < T > ::setSigma0(double sigma0)
2231 {
2232  _sigma0 = sigma0;
2233 }
2234 
2235 template < typename T >
2236 void NeuralNetCovariogram < T > ::setSigma1(double sigma1)
2237 {
2238  _sigma1 = sigma1;
2239 }
2240 
2241 }}}
2242 
2243 #define gpn lsst::afw::math
2244 
2245 #define INSTANTIATEGP(T) \
2246  template class gpn::KdTree < T > ; \
2247  template class gpn::GaussianProcess < T > ; \
2248  template class gpn::Covariogram < T > ; \
2249  template class gpn::SquaredExpCovariogram < T > ;\
2250  template class gpn::NeuralNetCovariogram < T > ;
2251 
2252 INSTANTIATEGP(double);
#define INSTANTIATEGP(T)
Stores values of a function sampled on an image and allows you to interpolate the function to unsampl...
This is a structure for keeping track of how long the interpolation methods spend on different parts ...
The parent class of covariogram functions for use in Gaussian Process interpolation.
The data for GaussianProcess is stored in a KD tree to facilitate nearest-neighbor searches...
a Covariogram that falls off as the negative exponent of the square of the distance between the point...
a Covariogram that recreates a neural network with one hidden layer and infinite units in that layer ...
#define LSST_EXCEPT(type,...)
Create an exception with a given type and message and optionally other arguments (dependent on the ty...
Definition: Exception.h:46
afw::table::Key< double > sigma1
void removePoint(int dex)
This will remove a point from the data set.
static DateTime now(void)
Return current time as a DateTime.
Definition: DateTime.cc:533