LSST Applications  21.0.0+04719a4bac,21.0.0-1-ga51b5d4+f5e6047307,21.0.0-11-g2b59f77+a9c1acf22d,21.0.0-11-ga42c5b2+86977b0b17,21.0.0-12-gf4ce030+76814010d2,21.0.0-13-g1721dae+760e7a6536,21.0.0-13-g3a573fe+768d78a30a,21.0.0-15-g5a7caf0+f21cbc5713,21.0.0-16-g0fb55c1+b60e2d390c,21.0.0-19-g4cded4ca+71a93a33c0,21.0.0-2-g103fe59+bb20972958,21.0.0-2-g45278ab+04719a4bac,21.0.0-2-g5242d73+3ad5d60fb1,21.0.0-2-g7f82c8f+8babb168e8,21.0.0-2-g8f08a60+06509c8b61,21.0.0-2-g8faa9b5+616205b9df,21.0.0-2-ga326454+8babb168e8,21.0.0-2-gde069b7+5e4aea9c2f,21.0.0-2-gecfae73+1d3a86e577,21.0.0-2-gfc62afb+3ad5d60fb1,21.0.0-25-g1d57be3cd+e73869a214,21.0.0-3-g357aad2+ed88757d29,21.0.0-3-g4a4ce7f+3ad5d60fb1,21.0.0-3-g4be5c26+3ad5d60fb1,21.0.0-3-g65f322c+e0b24896a3,21.0.0-3-g7d9da8d+616205b9df,21.0.0-3-ge02ed75+a9c1acf22d,21.0.0-4-g591bb35+a9c1acf22d,21.0.0-4-g65b4814+b60e2d390c,21.0.0-4-gccdca77+0de219a2bc,21.0.0-4-ge8a399c+6c55c39e83,21.0.0-5-gd00fb1e+05fce91b99,21.0.0-6-gc675373+3ad5d60fb1,21.0.0-64-g1122c245+4fb2b8f86e,21.0.0-7-g04766d7+cd19d05db2,21.0.0-7-gdf92d54+04719a4bac,21.0.0-8-g5674e7b+d1bd76f71f,master-gac4afde19b+a9c1acf22d,w.2021.13
LSST Data Management Base Package
GaussianProcess.cc
Go to the documentation of this file.
1 // - * - LSST - C++ - * -
2 
3 /*
4  * LSST Data Management System
5  * Copyright 2008, 2009, 2010 LSST Corporation.
6  *
7  * This product includes software developed by the
8  * LSST Project (http://www.lsst.org/).
9  *
10  * This program is free software: you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 3 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the LSST License Statement and
21  * the GNU General Public License along with this program. If not,
22  * see < http://www.lsstcorp.org/LegalNotices/ > .
23  */
24 
25 #include <iostream>
26 #include <cmath>
27 
29 
30 using namespace std;
31 
32 namespace lsst {
33 namespace afw {
34 namespace math {
35 
36 GaussianProcessTimer::GaussianProcessTimer() {
37  _interpolationCount = 0;
38  _iterationTime = 0.0;
39  _eigenTime = 0.0;
40  _searchTime = 0.0;
41  _varianceTime = 0.0;
42  _totalTime = 0.0;
43 }
44 
45 void GaussianProcessTimer::reset() {
46  _interpolationCount = 0;
47  _iterationTime = 0.0;
48  _eigenTime = 0.0;
49  _searchTime = 0.0;
50  _varianceTime = 0.0;
51  _totalTime = 0.0;
52 }
53 
54 void GaussianProcessTimer::start() {
55  _before = double(lsst::daf::base::DateTime::now().get() * 24 * 60 * 60);
56  _beginning = _before;
57 }
58 
59 void GaussianProcessTimer::addToEigen() {
60  double after;
61  after = double(lsst::daf::base::DateTime::now().get() * 24 * 60 * 60);
62  _eigenTime += after - _before;
63  _before = after;
64 }
65 
66 void GaussianProcessTimer::addToVariance() {
67  double after;
68  after = double(lsst::daf::base::DateTime::now().get() * 24 * 60 * 60);
69  _varianceTime += after - _before;
70  _before = after;
71 }
72 
73 void GaussianProcessTimer::addToSearch() {
74  double after;
75  after = double(lsst::daf::base::DateTime::now().get() * 24 * 60 * 60);
76  _searchTime += after - _before;
77  _before = after;
78 }
79 
80 void GaussianProcessTimer::addToIteration() {
81  double after;
82  after = double(lsst::daf::base::DateTime::now().get() * 24 * 60 * 60);
83  _iterationTime += after - _before;
84  _before = after;
85 }
86 
87 void GaussianProcessTimer::addToTotal(int i) {
88  double after;
89  after = double(lsst::daf::base::DateTime::now().get() * 24 * 60 * 60);
90  _totalTime += after - _beginning;
91  _interpolationCount += i;
92 }
93 
94 void GaussianProcessTimer::display() {
95  std::cout << "\nSearch time " << _searchTime << "\n";
96  std::cout << "Eigen time " << _eigenTime << "\n";
97  std::cout << "Variance time " << _varianceTime << "\n";
98  std::cout << "Iteration time " << _iterationTime << "\n";
99  std::cout << "Total time " << _totalTime << "\n";
100  std::cout << "Number of interpolations " << _interpolationCount << "\n";
101 }
102 
103 template <typename T>
104 void KdTree<T>::Initialize(ndarray::Array<T, 2, 2> const &dt) {
105  int i;
106 
107  _npts = dt.template getSize<0>();
108  _dimensions = dt.template getSize<1>();
109 
110  // a buffer to use when first building the tree
111  _inn = allocate(ndarray::makeVector(_npts));
112 
113  _roomStep = 5000;
114  _room = _npts;
115 
116  _data = allocate(ndarray::makeVector(_room, _dimensions));
117 
118  _data.deep() = dt;
119 
120  _tree = allocate(ndarray::makeVector(_room, 4));
121 
122  for (i = 0; i < _npts; i++) {
123  _inn[i] = i;
124  }
125 
126  _organize(_inn, _npts, -1, -1);
127 
128  i = _testTree();
129  if (i == 0) {
130  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError, "Failed to properly initialize KdTree\n");
131  }
132 }
133 
134 template <typename T>
135 void KdTree<T>::findNeighbors(ndarray::Array<int, 1, 1> neighdex, ndarray::Array<double, 1, 1> dd,
136  ndarray::Array<const T, 1, 1> const &v, int n_nn) const {
137  if (n_nn > _npts) {
139  "Asked for more neighbors than kd tree contains\n");
140  }
141 
142  if (n_nn <= 0) {
144  "Asked for zero or a negative number of neighbors\n");
145  }
146 
147  if (neighdex.getNumElements() != static_cast<ndarray::Size>(n_nn)) {
149  "Size of neighdex does not equal n_nn in KdTree.findNeighbors\n");
150  }
151 
152  if (dd.getNumElements() != static_cast<ndarray::Size>(n_nn)) {
154  "Size of dd does not equal n_nn in KdTree.findNeighbors\n");
155  }
156 
157  int i, start;
158 
159  _neighborCandidates = allocate(ndarray::makeVector(n_nn));
160  _neighborDistances = allocate(ndarray::makeVector(n_nn));
161  _neighborsFound = 0;
162  _neighborsWanted = n_nn;
163 
164  for (i = 0; i < n_nn; i++) _neighborDistances[i] = -1.0;
165 
166  start = _findNode(v);
167 
168  _neighborDistances[0] = _distance(v, _data[start]);
169  _neighborCandidates[0] = start;
170  _neighborsFound = 1;
171 
172  for (i = 1; i < 4; i++) {
173  if (_tree[start][i] >= 0) {
174  _lookForNeighbors(v, _tree[start][i], start);
175  }
176  }
177 
178  for (i = 0; i < n_nn; i++) {
179  neighdex[i] = _neighborCandidates[i];
180  dd[i] = _neighborDistances[i];
181  }
182 }
183 
184 template <typename T>
185 T KdTree<T>::getData(int ipt, int idim) const {
186  if (ipt >= _npts || ipt < 0) {
188  "Asked for point that does not exist in KdTree\n");
189  }
190 
191  if (idim >= _dimensions || idim < 0) {
193  "KdTree does not have points of that many dimensions\n");
194  }
195 
196  return _data[ipt][idim];
197 }
198 
199 template <typename T>
200 ndarray::Array<T, 1, 1> KdTree<T>::getData(int ipt) const {
201  if (ipt >= _npts || ipt < 0) {
203  "Asked for point that does not exist in KdTree\n");
204  }
205 
206  return _data[ipt];
207 }
208 
209 template <typename T>
210 void KdTree<T>::addPoint(ndarray::Array<const T, 1, 1> const &v) {
211  if (v.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
213  "You are trying to add a point of the incorrect dimensionality to KdTree\n");
214  }
215 
216  int i, j, node, dim, dir;
217 
218  node = _findNode(v);
219  dim = _tree[node][DIMENSION] + 1;
220  if (dim == _dimensions) dim = 0;
221 
222  if (_npts == _room) {
223  ndarray::Array<T, 2, 2> dbuff = allocate(ndarray::makeVector(_npts, _dimensions));
224  ndarray::Array<int, 2, 2> tbuff = allocate(ndarray::makeVector(_npts, 4));
225 
226  dbuff.deep() = _data;
227  tbuff.deep() = _tree;
228 
229  _room += _roomStep;
230 
231  _tree = allocate(ndarray::makeVector(_room, 4));
232  _data = allocate(ndarray::makeVector(_room, _dimensions));
233 
234  for (i = 0; i < _npts; i++) {
235  for (j = 0; j < _dimensions; j++) _data[i][j] = dbuff[i][j];
236  for (j = 0; j < 4; j++) _tree[i][j] = tbuff[i][j];
237  }
238  }
239 
240  _tree[_npts][DIMENSION] = dim;
241  _tree[_npts][PARENT] = node;
242  i = _tree[node][DIMENSION];
243 
244  if (_data[node][i] > v[i]) {
245  if (_tree[node][LT] >= 0) {
247  "Trying to add to KdTree in a node that is already occupied\n");
248  }
249  _tree[node][LT] = _npts;
250  dir = LT;
251  } else {
252  if (_tree[node][GEQ] >= 0) {
254  "Trying to add to KdTree in a node that is already occupied\n");
255  }
256  _tree[node][GEQ] = _npts;
257  dir = GEQ;
258  }
259  _tree[_npts][LT] = -1;
260  _tree[_npts][GEQ] = -1;
261  for (i = 0; i < _dimensions; i++) {
262  _data[_npts][i] = v[i];
263  }
264 
265  _npts++;
266 
267  i = _walkUpTree(_tree[_npts - 1][PARENT], dir, _npts - 1);
268  if (i != _masterParent) {
269  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError, "Adding to KdTree failed\n");
270  }
271 }
272 
273 template <typename T>
275  return _npts;
276 }
277 
278 template <typename T>
279 void KdTree<T>::getTreeNode(ndarray::Array<int, 1, 1> const &v, int dex) const {
280  if (dex < 0 || dex >= _npts) {
282  "Asked for tree information on point that does not exist\n");
283  }
284 
285  if (v.getNumElements() != 4) {
287  "Need to pass a 4-element ndarray into KdTree.getTreeNode()\n");
288  }
289 
290  v[0] = _tree[dex][DIMENSION];
291  v[1] = _tree[dex][LT];
292  v[2] = _tree[dex][GEQ];
293  v[3] = _tree[dex][PARENT];
294 }
295 
296 template <typename T>
297 int KdTree<T>::_testTree() const {
298  if (_npts == 1 && _tree[0][PARENT] < 0 && _masterParent == 0) {
299  // there is only one point in the tree
300  return 1;
301  }
302 
303  int i, j, output;
304  std::vector<int> isparent;
305 
306  output = 1;
307 
308  for (i = 0; i < _npts; i++) isparent.push_back(0);
309 
310  j = 0;
311  for (i = 0; i < _npts; i++) {
312  if (_tree[i][PARENT] < 0) j++;
313  }
314  if (j != 1) {
315  std::cout << "_tree FAILURE " << j << " _masterParents\n";
316  return 0;
317  }
318 
319  for (i = 0; i < _npts; i++) {
320  if (_tree[i][PARENT] >= 0) {
321  isparent[_tree[i][PARENT]]++;
322  }
323  }
324 
325  for (i = 0; i < _npts; i++) {
326  if (isparent[i] > 2) {
327  std::cout << "_tree FAILURE " << i << " is parent to " << isparent[i] << "\n";
328  return 0;
329  }
330  }
331 
332  for (i = 0; i < _npts; i++) {
333  if (_tree[i][PARENT] >= 0) {
334  if (_tree[_tree[i][PARENT]][LT] == i)
335  j = LT;
336  else
337  j = GEQ;
338  output = _walkUpTree(_tree[i][PARENT], j, i);
339 
340  if (output != _masterParent) {
341  return 0;
342  }
343  }
344  }
345 
346  if (output != _masterParent)
347  return 0;
348  else
349  return 1;
350 }
351 
352 template <typename T>
353 void KdTree<T>::_organize(ndarray::Array<int, 1, 1> const &use, int ct, int parent, int dir) {
354  int i, j, k, l, idim, daughter;
355  T mean, var, varbest;
356 
358  std::vector<T> toSortElement;
359 
360  if (ct > 1) {
361  // below is code to choose the dimension on which the available points
362  // have the greates variance. This will be the dimension on which
363  // the daughter node splits the data
364  idim = 0;
365  varbest = -1.0;
366  for (i = 0; i < _dimensions; i++) {
367  mean = 0.0;
368  var = 0.0;
369  for (j = 0; j < ct; j++) {
370  mean += _data[use[j]][i];
371  var += _data[use[j]][i] * _data[use[j]][i];
372  }
373  mean = mean / double(ct);
374  var = var / double(ct) - mean * mean;
375  if (i == 0 || var > varbest || (var == varbest && parent >= 0 && i > _tree[parent][DIMENSION])) {
376  idim = i;
377  varbest = var;
378  }
379  } // for(i = 0;i < _dimensions;i++ )
380 
381  // we need to sort the available data points according to their idim - th element
382  // but we need to keep track of their original indices, so we can refer back to
383  // their positions in _data. Therefore, the code constructs a 2 - dimensional
384  // std::vector. The first dimension contains the element to be sorted.
385  // The second dimension contains the original index information.
386  // After sorting, the (rearranged) index information will be stored back in the
387  // ndarray use[]
388 
389  toSortElement.push_back(_data[use[0]][idim]);
390  toSortElement.push_back(T(use[0]));
391  toSort.push_back(toSortElement);
392  for (i = 1; i < ct; i++) {
393  toSortElement[0] = _data[use[i]][idim];
394  toSortElement[1] = T(use[i]);
395  toSort.push_back(toSortElement);
396  }
397 
398  std::stable_sort(toSort.begin(), toSort.end());
399 
400  k = ct / 2;
401  l = ct / 2;
402  while (k > 0 && toSort[k][0] == toSort[k - 1][0]) k--;
403 
404  while (l < ct - 1 && toSort[l][0] == toSort[ct / 2][0]) l++;
405 
406  if ((ct / 2 - k) < (l - ct / 2) || l == ct - 1)
407  j = k;
408  else
409  j = l;
410 
411  for (i = 0; i < ct; i++) {
412  use[i] = int(toSort[i][1]);
413  }
414  daughter = use[j];
415 
416  if (parent >= 0) _tree[parent][dir] = daughter;
417  _tree[daughter][DIMENSION] = idim;
418  _tree[daughter][PARENT] = parent;
419 
420  if (j < ct - 1) {
421  _organize(use[ndarray::view(j + 1, use.getSize<0>())], ct - j - 1, daughter, GEQ);
422  } else
423  _tree[daughter][GEQ] = -1;
424 
425  if (j > 0) {
426  _organize(use, j, daughter, LT);
427  } else
428  _tree[daughter][LT] = -1;
429 
430  } // if(ct > 1)
431  else {
432  daughter = use[0];
433 
434  if (parent >= 0) _tree[parent][dir] = daughter;
435 
436  idim = _tree[parent][DIMENSION] + 1;
437 
438  if (idim >= _dimensions) idim = 0;
439 
440  _tree[daughter][DIMENSION] = idim;
441  _tree[daughter][LT] = -1;
442  _tree[daughter][GEQ] = -1;
443  _tree[daughter][PARENT] = parent;
444  }
445 
446  if (parent == -1) {
447  _masterParent = daughter;
448  }
449 }
450 
451 template <typename T>
452 int KdTree<T>::_findNode(ndarray::Array<const T, 1, 1> const &v) const {
453  int consider, next, dim;
454 
455  dim = _tree[_masterParent][DIMENSION];
456 
457  if (v[dim] < _data[_masterParent][dim])
458  consider = _tree[_masterParent][LT];
459  else
460  consider = _tree[_masterParent][GEQ];
461 
462  next = consider;
463 
464  while (next >= 0) {
465  consider = next;
466 
467  dim = _tree[consider][DIMENSION];
468  if (v[dim] < _data[consider][dim])
469  next = _tree[consider][LT];
470  else
471  next = _tree[consider][GEQ];
472  }
473 
474  return consider;
475 }
476 
477 template <typename T>
478 void KdTree<T>::_lookForNeighbors(ndarray::Array<const T, 1, 1> const &v, int consider, int from) const {
479  int i, j, going;
480  double dd;
481 
482  dd = _distance(v, _data[consider]);
483 
484  if (_neighborsFound < _neighborsWanted || dd < _neighborDistances[_neighborsWanted - 1]) {
485  for (j = 0; j < _neighborsFound && _neighborDistances[j] < dd; j++)
486  ;
487 
488  for (i = _neighborsWanted - 1; i > j; i--) {
489  _neighborDistances[i] = _neighborDistances[i - 1];
490  _neighborCandidates[i] = _neighborCandidates[i - 1];
491  }
492 
493  _neighborDistances[j] = dd;
494  _neighborCandidates[j] = consider;
495 
496  if (_neighborsFound < _neighborsWanted) _neighborsFound++;
497  }
498 
499  if (_tree[consider][PARENT] == from) {
500  // you came here from the parent
501 
502  i = _tree[consider][DIMENSION];
503  dd = v[i] - _data[consider][i];
504  if ((dd <= _neighborDistances[_neighborsFound - 1] || _neighborsFound < _neighborsWanted) &&
505  _tree[consider][LT] >= 0) {
506  _lookForNeighbors(v, _tree[consider][LT], consider);
507  }
508 
509  dd = _data[consider][i] - v[i];
510  if ((dd <= _neighborDistances[_neighborsFound - 1] || _neighborsFound < _neighborsWanted) &&
511  _tree[consider][GEQ] >= 0) {
512  _lookForNeighbors(v, _tree[consider][GEQ], consider);
513  }
514  } else {
515  // you came here from one of the branches
516 
517  // descend the other branch
518  if (_tree[consider][LT] == from) {
519  going = GEQ;
520  } else {
521  going = LT;
522  }
523 
524  j = _tree[consider][going];
525 
526  if (j >= 0) {
527  i = _tree[consider][DIMENSION];
528 
529  if (going == 1)
530  dd = v[i] - _data[consider][i];
531  else
532  dd = _data[consider][i] - v[i];
533 
534  if (dd <= _neighborDistances[_neighborsFound - 1] || _neighborsFound < _neighborsWanted) {
535  _lookForNeighbors(v, j, consider);
536  }
537  }
538 
539  // ascend to the parent
540  if (_tree[consider][PARENT] >= 0) {
541  _lookForNeighbors(v, _tree[consider][PARENT], consider);
542  }
543  }
544 }
545 
546 template <typename T>
547 int KdTree<T>::_walkUpTree(int target, int dir, int root) const {
548  // target is the node that you are examining now
549  // dir is where you came from
550  // root is the ultimate point from which you started
551 
552  int i, output;
553 
554  output = 1;
555 
556  if (dir == LT) {
557  if (_data[root][_tree[target][DIMENSION]] >= _data[target][_tree[target][DIMENSION]]) {
558  return 0;
559  }
560  } else {
561  if (_data[root][_tree[target][DIMENSION]] < _data[target][_tree[target][DIMENSION]]) {
562  return 0;
563  }
564  }
565 
566  if (_tree[target][PARENT] >= 0) {
567  if (_tree[_tree[target][PARENT]][LT] == target)
568  i = LT;
569  else
570  i = GEQ;
571  output = output * _walkUpTree(_tree[target][PARENT], i, root);
572 
573  } else {
574  output = output * target;
575  // so that it will return _masterParent
576  // make sure everything is connected to _masterParent
577  }
578  return output;
579 }
580 
581 template <typename T>
583  if (target < 0 || target >= _npts) {
585  "You are trying to remove a point that doesn't exist from KdTree\n");
586  }
587 
588  if (_npts == 1) {
590  "There is only one point left in this KdTree. You cannot call removePoint\n");
591  }
592 
593  int nl, nr, i, j, k, side;
594  int root;
595 
596  nl = 0;
597  nr = 0;
598  if (_tree[target][LT] >= 0) {
599  nl++;
600  _count(_tree[target][LT], &nl);
601  }
602 
603  if (_tree[target][GEQ] >= 0) {
604  nr++;
605  _count(_tree[target][GEQ], &nr);
606  }
607 
608  if (nl == 0 && nr == 0) {
609  k = _tree[target][PARENT];
610 
611  if (_tree[k][LT] == target)
612  _tree[k][LT] = -1;
613  else if (_tree[k][GEQ] == target)
614  _tree[k][GEQ] = -1;
615 
616  } // if target is terminal
617  else if ((nl == 0 && nr > 0) || (nr == 0 && nl > 0)) {
618  if (nl == 0)
619  side = GEQ;
620  else
621  side = LT;
622 
623  k = _tree[target][PARENT];
624  if (k >= 0) {
625  if (_tree[k][LT] == target) {
626  _tree[k][LT] = _tree[target][side];
627  _tree[_tree[k][LT]][PARENT] = k;
628 
629  } else {
630  _tree[k][GEQ] = _tree[target][side];
631  _tree[_tree[k][GEQ]][PARENT] = k;
632  }
633  } else {
634  _masterParent = _tree[target][side];
635  _tree[_tree[target][side]][PARENT] = -1;
636  }
637  } // if only one side is populated
638  else {
639  if (nl > nr)
640  side = LT;
641  else
642  side = GEQ;
643 
644  k = _tree[target][PARENT];
645  if (k < 0) {
646  _masterParent = _tree[target][side];
647  _tree[_masterParent][PARENT] = -1;
648  } else {
649  if (_tree[k][LT] == target) {
650  _tree[k][LT] = _tree[target][side];
651  _tree[_tree[k][LT]][PARENT] = k;
652  } else {
653  _tree[k][GEQ] = _tree[target][side];
654  _tree[_tree[k][GEQ]][PARENT] = k;
655  }
656  }
657 
658  root = _tree[target][3 - side];
659 
660  _descend(root);
661  } // if both sides are populated
662 
663  if (target < _npts - 1) {
664  for (i = target + 1; i < _npts; i++) {
665  for (j = 0; j < 4; j++) _tree[i - 1][j] = _tree[i][j];
666 
667  for (j = 0; j < _dimensions; j++) _data[i - 1][j] = _data[i][j];
668  }
669 
670  for (i = 0; i < _npts; i++) {
671  for (j = 1; j < 4; j++) {
672  if (_tree[i][j] > target) _tree[i][j]--;
673  }
674  }
675 
676  if (_masterParent > target) _masterParent--;
677  }
678  _npts--;
679 
680  i = _testTree();
681  if (i == 0) {
682  throw LSST_EXCEPT(lsst::pex::exceptions::RuntimeError, "Subtracting from KdTree failed\n");
683  }
684 }
685 
686 template <typename T>
687 void KdTree<T>::_count(int where, int *ct) const {
688  // a way to count the number of vital elements on a given branch
689 
690  if (_tree[where][LT] >= 0) {
691  ct[0]++;
692  _count(_tree[where][LT], ct);
693  }
694  if (_tree[where][GEQ] >= 0) {
695  ct[0]++;
696  _count(_tree[where][GEQ], ct);
697  }
698 }
699 
700 template <typename T>
701 void KdTree<T>::_reassign(int target) {
702  int where, dir, k;
703 
704  where = _masterParent;
705  if (_data[target][_tree[where][DIMENSION]] < _data[where][_tree[where][DIMENSION]])
706  dir = LT;
707  else
708  dir = GEQ;
709 
710  k = _tree[where][dir];
711  while (k >= 0) {
712  where = k;
713  if (_data[target][_tree[where][DIMENSION]] < _data[where][_tree[where][DIMENSION]])
714  dir = LT;
715  else
716  dir = GEQ;
717  k = _tree[where][dir];
718  }
719 
720  _tree[where][dir] = target;
721  _tree[target][PARENT] = where;
722  _tree[target][LT] = -1;
723  _tree[target][GEQ] = -1;
724  _tree[target][DIMENSION] = _tree[where][DIMENSION] + 1;
725 
726  if (_tree[target][DIMENSION] == _dimensions) _tree[target][DIMENSION] = 0;
727 }
728 
729 template <typename T>
730 void KdTree<T>::_descend(int root) {
731  if (_tree[root][LT] >= 0) _descend(_tree[root][LT]);
732  if (_tree[root][GEQ] >= 0) _descend(_tree[root][GEQ]);
733 
734  _reassign(root);
735 }
736 
737 template <typename T>
738 double KdTree<T>::_distance(ndarray::Array<const T, 1, 1> const &p1,
739  ndarray::Array<const T, 1, 1> const &p2) const {
740  int i, dd;
741  double ans;
742  ans = 0.0;
743  dd = p1.template getSize<0>();
744 
745  for (i = 0; i < dd; i++) ans += (p1[i] - p2[i]) * (p1[i] - p2[i]);
746 
747  return ::sqrt(ans);
748 }
749 
750 template <typename T>
751 GaussianProcess<T>::GaussianProcess(ndarray::Array<T, 2, 2> const &dataIn, ndarray::Array<T, 1, 1> const &ff,
752  std::shared_ptr<Covariogram<T> > const &covarIn)
753 
754 {
755  int i;
756 
757  _covariogram = covarIn;
758 
759  _npts = dataIn.template getSize<0>();
760  _dimensions = dataIn.template getSize<1>();
761 
762  _room = _npts;
763  _roomStep = 5000;
764 
765  _nFunctions = 1;
766  _function = allocate(ndarray::makeVector(_npts, 1));
767 
768  if (ff.getNumElements() != static_cast<ndarray::Size>(_npts)) {
770  "You did not pass in the same number of data points as function values\n");
771  }
772 
773  for (i = 0; i < _npts; i++) _function[i][0] = ff[i];
774 
775  _krigingParameter = T(1.0);
776  _lambda = T(1.0e-5);
777 
778  _useMaxMin = 0;
779 
780  _kdTree.Initialize(dataIn);
781 
782  _npts = _kdTree.getNPoints();
783 }
784 
785 template <typename T>
786 GaussianProcess<T>::GaussianProcess(ndarray::Array<T, 2, 2> const &dataIn, ndarray::Array<T, 1, 1> const &mn,
787  ndarray::Array<T, 1, 1> const &mx, ndarray::Array<T, 1, 1> const &ff,
788  std::shared_ptr<Covariogram<T> > const &covarIn) {
789  int i, j;
790  ndarray::Array<T, 2, 2> normalizedData;
791 
792  _covariogram = covarIn;
793 
794  _npts = dataIn.template getSize<0>();
795  _dimensions = dataIn.template getSize<1>();
796  _room = _npts;
797  _roomStep = 5000;
798 
799  if (ff.getNumElements() != static_cast<ndarray::Size>(_npts)) {
801  "You did not pass in the same number of data points as function values\n");
802  }
803 
804  if (mn.getNumElements() != static_cast<ndarray::Size>(_dimensions) ||
805  mx.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
807  "Your min/max values have different dimensionality than your data points\n");
808  }
809 
810  _krigingParameter = T(1.0);
811 
812  _lambda = T(1.0e-5);
813  _krigingParameter = T(1.0);
814 
815  _max = allocate(ndarray::makeVector(_dimensions));
816  _min = allocate(ndarray::makeVector(_dimensions));
817  _max.deep() = mx;
818  _min.deep() = mn;
819  _useMaxMin = 1;
820  normalizedData = allocate(ndarray::makeVector(_npts, _dimensions));
821  for (i = 0; i < _npts; i++) {
822  for (j = 0; j < _dimensions; j++) {
823  normalizedData[i][j] = (dataIn[i][j] - _min[j]) / (_max[j] - _min[j]);
824  // note the normalization by _max - _min in each dimension
825  }
826  }
827 
828  _kdTree.Initialize(normalizedData);
829 
830  _npts = _kdTree.getNPoints();
831  _nFunctions = 1;
832  _function = allocate(ndarray::makeVector(_npts, 1));
833  for (i = 0; i < _npts; i++) _function[i][0] = ff[i];
834 }
835 
836 template <typename T>
837 GaussianProcess<T>::GaussianProcess(ndarray::Array<T, 2, 2> const &dataIn, ndarray::Array<T, 2, 2> const &ff,
838  std::shared_ptr<Covariogram<T> > const &covarIn) {
839  _covariogram = covarIn;
840 
841  _npts = dataIn.template getSize<0>();
842  _dimensions = dataIn.template getSize<1>();
843 
844  _room = _npts;
845  _roomStep = 5000;
846 
847  if (ff.template getSize<0>() != static_cast<ndarray::Size>(_npts)) {
849  "You did not pass in the same number of data points as function values\n");
850  }
851 
852  _nFunctions = ff.template getSize<1>();
853  _function = allocate(ndarray::makeVector(_npts, _nFunctions));
854  _function.deep() = ff;
855 
856  _krigingParameter = T(1.0);
857 
858  _lambda = T(1.0e-5);
859 
860  _useMaxMin = 0;
861 
862  _kdTree.Initialize(dataIn);
863 
864  _npts = _kdTree.getNPoints();
865 }
866 
867 template <typename T>
868 GaussianProcess<T>::GaussianProcess(ndarray::Array<T, 2, 2> const &dataIn, ndarray::Array<T, 1, 1> const &mn,
869  ndarray::Array<T, 1, 1> const &mx, ndarray::Array<T, 2, 2> const &ff,
870  std::shared_ptr<Covariogram<T> > const &covarIn) {
871  int i, j;
872  ndarray::Array<T, 2, 2> normalizedData;
873 
874  _covariogram = covarIn;
875 
876  _npts = dataIn.template getSize<0>();
877  _dimensions = dataIn.template getSize<1>();
878 
879  _room = _npts;
880  _roomStep = 5000;
881 
882  if (ff.template getSize<0>() != static_cast<ndarray::Size>(_npts)) {
884  "You did not pass in the same number of data points as function values\n");
885  }
886 
887  if (mn.getNumElements() != static_cast<ndarray::Size>(_dimensions) ||
888  mx.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
890  "Your min/max values have different dimensionality than your data points\n");
891  }
892 
893  _krigingParameter = T(1.0);
894 
895  _lambda = T(1.0e-5);
896  _krigingParameter = T(1.0);
897 
898  _max = allocate(ndarray::makeVector(_dimensions));
899  _min = allocate(ndarray::makeVector(_dimensions));
900  _max.deep() = mx;
901  _min.deep() = mn;
902  _useMaxMin = 1;
903  normalizedData = allocate(ndarray::makeVector(_npts, _dimensions));
904  for (i = 0; i < _npts; i++) {
905  for (j = 0; j < _dimensions; j++) {
906  normalizedData[i][j] = (dataIn[i][j] - _min[j]) / (_max[j] - _min[j]);
907  // note the normalization by _max - _min in each dimension
908  }
909  }
910 
911  _kdTree.Initialize(normalizedData);
912  _npts = _kdTree.getNPoints();
913  _nFunctions = ff.template getSize<1>();
914  _function = allocate(ndarray::makeVector(_npts, _nFunctions));
915  _function.deep() = ff;
916 }
917 
918 template <typename T>
920  return _npts;
921 }
922 
923 template <typename T>
925  return _dimensions;
926 }
927 
928 template <typename T>
929 void GaussianProcess<T>::getData(ndarray::Array<T, 2, 2> pts, ndarray::Array<T, 1, 1> fn,
930  ndarray::Array<int, 1, 1> indices) const {
931  if (_nFunctions != 1) {
933  "Your function value array does not have enough room for all of the functions "
934  "in your GaussianProcess.\n");
935  }
936 
937  if (pts.template getSize<1>() != static_cast<ndarray::Size>(_dimensions)) {
939  "Your pts array is constructed for points of the wrong dimensionality.\n");
940  }
941 
942  if (pts.template getSize<0>() != indices.getNumElements()) {
944  "You did not put enough room in your pts array to fit all the points "
945  "you asked for in your indices array.\n");
946  }
947 
948  if (fn.template getSize<0>() != indices.getNumElements()) {
950  "You did not provide enough room in your function value array "
951  "for all of the points you requested in your indices array.\n");
952  }
953 
954  for (ndarray::Size i = 0; i < indices.template getSize<0>(); i++) {
955  pts[i] = _kdTree.getData(indices[i]); // do this first in case one of the indices is invalid.
956  // _kdTree.getData() will raise an exception in that case
957  fn[i] = _function[indices[i]][0];
958  if (_useMaxMin == 1) {
959  for (int j = 0; j < _dimensions; j++) {
960  pts[i][j] *= (_max[j] - _min[j]);
961  pts[i][j] += _min[j];
962  }
963  }
964  }
965 }
966 
967 template <typename T>
968 void GaussianProcess<T>::getData(ndarray::Array<T, 2, 2> pts, ndarray::Array<T, 2, 2> fn,
969  ndarray::Array<int, 1, 1> indices) const {
970  if (fn.template getSize<1>() != static_cast<ndarray::Size>(_nFunctions)) {
972  "Your function value array does not have enough room for all of the functions "
973  "in your GaussianProcess.\n");
974  }
975 
976  if (pts.template getSize<1>() != static_cast<ndarray::Size>(_dimensions)) {
978  "Your pts array is constructed for points of the wrong dimensionality.\n");
979  }
980 
981  if (pts.template getSize<0>() != indices.getNumElements()) {
983  "You did not put enough room in your pts array to fit all the points "
984  "you asked for in your indices array.\n");
985  }
986 
987  if (fn.template getSize<0>() != indices.getNumElements()) {
989  "You did not provide enough room in your function value array "
990  "for all of the points you requested in your indices array.\n");
991  }
992 
993  for (ndarray::Size i = 0; i < indices.template getSize<0>(); i++) {
994  pts[i] = _kdTree.getData(indices[i]); // do this first in case one of the indices is invalid.
995  // _kdTree.getData() will raise an exception in that case
996  for (int j = 0; j < _nFunctions; j++) {
997  fn[i][j] = _function[indices[i]][j];
998  }
999  if (_useMaxMin == 1) {
1000  for (int j = 0; j < _dimensions; j++) {
1001  pts[i][j] *= (_max[j] - _min[j]);
1002  pts[i][j] += _min[j];
1003  }
1004  }
1005  }
1006 }
1007 
1008 template <typename T>
1009 T GaussianProcess<T>::interpolate(ndarray::Array<T, 1, 1> variance, ndarray::Array<T, 1, 1> const &vin,
1010  int numberOfNeighbors) const {
1011  if (_nFunctions > 1) {
1013  "You need to call the version of GaussianProcess.interpolate() "
1014  "that accepts mu and variance arrays (which it populates with results). "
1015  "You are interpolating more than one function.");
1016  }
1017 
1018  if (numberOfNeighbors <= 0) {
1020  "Asked for zero or negative number of neighbors\n");
1021  }
1022 
1023  if (numberOfNeighbors > _kdTree.getNPoints()) {
1025  "Asked for more neighbors than you have data points\n");
1026  }
1027 
1028  if (variance.getNumElements() != static_cast<ndarray::Size>(_nFunctions)) {
1030  "Your variance array is the incorrect size for the number "
1031  "of functions you are trying to interpolate\n");
1032  }
1033 
1034  if (vin.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
1036  "You are interpolating at a point with different dimensionality than you data\n");
1037  }
1038 
1039  int i, j;
1040  T fbar, mu;
1041 
1042  ndarray::Array<T, 1, 1> covarianceTestPoint;
1043  ndarray::Array<int, 1, 1> neighbors;
1044  ndarray::Array<double, 1, 1> neighborDistances, vv;
1045 
1046  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> covariance, bb, xx;
1047  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1048 
1049  _timer.start();
1050 
1051  bb.resize(numberOfNeighbors, 1);
1052  xx.resize(numberOfNeighbors, 1);
1053 
1054  covariance.resize(numberOfNeighbors, numberOfNeighbors);
1055 
1056  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1057 
1058  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1059 
1060  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1061 
1062  vv = allocate(ndarray::makeVector(_dimensions));
1063  if (_useMaxMin == 1) {
1064  // if you constructed this Gaussian process with minimum and maximum
1065  // values for the dimensions of your parameter space,
1066  // the point you are interpolating must be scaled to match the data so
1067  // that the selected nearest neighbors are appropriate
1068 
1069  for (i = 0; i < _dimensions; i++) vv[i] = (vin[i] - _min[i]) / (_max[i] - _min[i]);
1070  } else {
1071  vv = vin;
1072  }
1073 
1074  _kdTree.findNeighbors(neighbors, neighborDistances, vv, numberOfNeighbors);
1075 
1076  _timer.addToSearch();
1077 
1078  fbar = 0.0;
1079  for (i = 0; i < numberOfNeighbors; i++) fbar += _function[neighbors[i]][0];
1080  fbar = fbar / double(numberOfNeighbors);
1081 
1082  for (i = 0; i < numberOfNeighbors; i++) {
1083  covarianceTestPoint[i] = (*_covariogram)(vv, _kdTree.getData(neighbors[i]));
1084 
1085  covariance(i, i) =
1086  (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i])) + _lambda;
1087 
1088  for (j = i + 1; j < numberOfNeighbors; j++) {
1089  covariance(i, j) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[j]));
1090  covariance(j, i) = covariance(i, j);
1091  }
1092  }
1093 
1094  _timer.addToIteration();
1095 
1096  // use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1097  ldlt.compute(covariance);
1098 
1099  for (i = 0; i < numberOfNeighbors; i++) bb(i, 0) = _function[neighbors[i]][0] - fbar;
1100 
1101  xx = ldlt.solve(bb);
1102  _timer.addToEigen();
1103 
1104  mu = fbar;
1105 
1106  for (i = 0; i < numberOfNeighbors; i++) {
1107  mu += covarianceTestPoint[i] * xx(i, 0);
1108  }
1109 
1110  _timer.addToIteration();
1111 
1112  variance(0) = (*_covariogram)(vv, vv) + _lambda;
1113 
1114  for (i = 0; i < numberOfNeighbors; i++) bb(i) = covarianceTestPoint[i];
1115 
1116  xx = ldlt.solve(bb);
1117 
1118  for (i = 0; i < numberOfNeighbors; i++) {
1119  variance(0) -= covarianceTestPoint[i] * xx(i, 0);
1120  }
1121 
1122  variance(0) = variance(0) * _krigingParameter;
1123 
1124  _timer.addToVariance();
1125  _timer.addToTotal(1);
1126 
1127  return mu;
1128 }
1129 
1130 template <typename T>
1131 void GaussianProcess<T>::interpolate(ndarray::Array<T, 1, 1> mu, ndarray::Array<T, 1, 1> variance,
1132  ndarray::Array<T, 1, 1> const &vin, int numberOfNeighbors) const {
1133  if (numberOfNeighbors <= 0) {
1135  "Asked for zero or negative number of neighbors\n");
1136  }
1137 
1138  if (numberOfNeighbors > _kdTree.getNPoints()) {
1140  "Asked for more neighbors than you have data points\n");
1141  }
1142 
1143  if (vin.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
1145  "You are interpolating at a point with different dimensionality than you data\n");
1146  }
1147 
1148  if (mu.getNumElements() != static_cast<ndarray::Size>(_nFunctions) ||
1149  variance.getNumElements() != static_cast<ndarray::Size>(_nFunctions)) {
1151  "Your mu and/or var arrays are improperly sized for the number of functions "
1152  "you are interpolating\n");
1153  }
1154 
1155  int i, j, ii;
1156  T fbar;
1157 
1158  ndarray::Array<T, 1, 1> covarianceTestPoint;
1159  ndarray::Array<int, 1, 1> neighbors;
1160  ndarray::Array<double, 1, 1> neighborDistances, vv;
1161 
1162  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> covariance, bb, xx;
1163  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1164 
1165  _timer.start();
1166 
1167  bb.resize(numberOfNeighbors, 1);
1168  xx.resize(numberOfNeighbors, 1);
1169  covariance.resize(numberOfNeighbors, numberOfNeighbors);
1170  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1171  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1172  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1173 
1174  vv = allocate(ndarray::makeVector(_dimensions));
1175 
1176  if (_useMaxMin == 1) {
1177  // if you constructed this Gaussian process with minimum and maximum
1178  // values for the dimensions of your parameter space,
1179  // the point you are interpolating must be scaled to match the data so
1180  // that the selected nearest neighbors are appropriate
1181 
1182  for (i = 0; i < _dimensions; i++) vv[i] = (vin[i] - _min[i]) / (_max[i] - _min[i]);
1183  } else {
1184  vv = vin;
1185  }
1186 
1187  _kdTree.findNeighbors(neighbors, neighborDistances, vv, numberOfNeighbors);
1188 
1189  _timer.addToSearch();
1190 
1191  for (i = 0; i < numberOfNeighbors; i++) {
1192  covarianceTestPoint[i] = (*_covariogram)(vv, _kdTree.getData(neighbors[i]));
1193 
1194  covariance(i, i) =
1195  (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i])) + _lambda;
1196 
1197  for (j = i + 1; j < numberOfNeighbors; j++) {
1198  covariance(i, j) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[j]));
1199  covariance(j, i) = covariance(i, j);
1200  }
1201  }
1202 
1203  _timer.addToIteration();
1204 
1205  // use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1206  ldlt.compute(covariance);
1207 
1208  for (ii = 0; ii < _nFunctions; ii++) {
1209  fbar = 0.0;
1210  for (i = 0; i < numberOfNeighbors; i++) fbar += _function[neighbors[i]][ii];
1211  fbar = fbar / double(numberOfNeighbors);
1212 
1213  for (i = 0; i < numberOfNeighbors; i++) bb(i, 0) = _function[neighbors[i]][ii] - fbar;
1214  xx = ldlt.solve(bb);
1215 
1216  mu[ii] = fbar;
1217 
1218  for (i = 0; i < numberOfNeighbors; i++) {
1219  mu[ii] += covarianceTestPoint[i] * xx(i, 0);
1220  }
1221 
1222  } // ii = 0 through _nFunctions
1223 
1224  _timer.addToEigen();
1225 
1226  variance[0] = (*_covariogram)(vv, vv) + _lambda;
1227 
1228  for (i = 0; i < numberOfNeighbors; i++) bb(i) = covarianceTestPoint[i];
1229 
1230  xx = ldlt.solve(bb);
1231 
1232  for (i = 0; i < numberOfNeighbors; i++) {
1233  variance[0] -= covarianceTestPoint[i] * xx(i, 0);
1234  }
1235  variance[0] = variance[0] * _krigingParameter;
1236 
1237  for (i = 1; i < _nFunctions; i++) variance[i] = variance[0];
1238 
1239  _timer.addToVariance();
1240  _timer.addToTotal(1);
1241 }
1242 
1243 template <typename T>
1244 T GaussianProcess<T>::selfInterpolate(ndarray::Array<T, 1, 1> variance, int dex,
1245  int numberOfNeighbors) const {
1246  if (_nFunctions > 1) {
1248  "You need to call the version of GaussianProcess.selfInterpolate() "
1249  "that accepts mu and variance arrays (which it populates with results). "
1250  "You are interpolating more than one function.");
1251  }
1252 
1253  if (variance.getNumElements() != static_cast<ndarray::Size>(_nFunctions)) {
1255  "Your variance array is the incorrect size for the number "
1256  "of functions you are trying to interpolate\n");
1257  }
1258 
1259  if (numberOfNeighbors <= 0) {
1261  "Asked for zero or negative number of neighbors\n");
1262  }
1263 
1264  if (numberOfNeighbors + 1 > _kdTree.getNPoints()) {
1266  "Asked for more neighbors than you have data points\n");
1267  }
1268 
1269  if (dex < 0 || dex >= _kdTree.getNPoints()) {
1271  "Asked to self interpolate on a point that does not exist\n");
1272  }
1273 
1274  int i, j;
1275  T fbar, mu;
1276 
1277  ndarray::Array<T, 1, 1> covarianceTestPoint;
1278  ndarray::Array<int, 1, 1> selfNeighbors;
1279  ndarray::Array<double, 1, 1> selfDistances;
1280  ndarray::Array<int, 1, 1> neighbors;
1281  ndarray::Array<double, 1, 1> neighborDistances;
1282 
1283  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> covariance, bb, xx;
1284  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1285 
1286  _timer.start();
1287 
1288  bb.resize(numberOfNeighbors, 1);
1289  xx.resize(numberOfNeighbors, 1);
1290  covariance.resize(numberOfNeighbors, numberOfNeighbors);
1291  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1292  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1293  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1294 
1295  selfNeighbors = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1296  selfDistances = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1297 
1298  // we don't use _useMaxMin because the data has already been normalized
1299 
1300  _kdTree.findNeighbors(selfNeighbors, selfDistances, _kdTree.getData(dex), numberOfNeighbors + 1);
1301 
1302  _timer.addToSearch();
1303 
1304  if (selfNeighbors[0] != dex) {
1306  "Nearest neighbor search in selfInterpolate did not find self\n");
1307  }
1308 
1309  // SelfNeighbors[0] will be the point itself (it is its own nearest neighbor)
1310  // We discard that for the interpolation calculation
1311  //
1312  // If you do not wish to do this, simply call the usual ::interpolate() method instead of
1313  //::selfInterpolate()
1314  for (i = 0; i < numberOfNeighbors; i++) {
1315  neighbors[i] = selfNeighbors[i + 1];
1316  neighborDistances[i] = selfDistances[i + 1];
1317  }
1318 
1319  fbar = 0.0;
1320  for (i = 0; i < numberOfNeighbors; i++) fbar += _function[neighbors[i]][0];
1321  fbar = fbar / double(numberOfNeighbors);
1322 
1323  for (i = 0; i < numberOfNeighbors; i++) {
1324  covarianceTestPoint[i] = (*_covariogram)(_kdTree.getData(dex), _kdTree.getData(neighbors[i]));
1325 
1326  covariance(i, i) =
1327  (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i])) + _lambda;
1328 
1329  for (j = i + 1; j < numberOfNeighbors; j++) {
1330  covariance(i, j) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[j]));
1331  covariance(j, i) = covariance(i, j);
1332  }
1333  }
1334  _timer.addToIteration();
1335 
1336  // use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1337  ldlt.compute(covariance);
1338 
1339  for (i = 0; i < numberOfNeighbors; i++) bb(i, 0) = _function[neighbors[i]][0] - fbar;
1340  xx = ldlt.solve(bb);
1341  _timer.addToEigen();
1342 
1343  mu = fbar;
1344 
1345  for (i = 0; i < numberOfNeighbors; i++) {
1346  mu += covarianceTestPoint[i] * xx(i, 0);
1347  }
1348 
1349  variance(0) = (*_covariogram)(_kdTree.getData(dex), _kdTree.getData(dex)) + _lambda;
1350 
1351  for (i = 0; i < numberOfNeighbors; i++) bb(i) = covarianceTestPoint[i];
1352 
1353  xx = ldlt.solve(bb);
1354 
1355  for (i = 0; i < numberOfNeighbors; i++) {
1356  variance(0) -= covarianceTestPoint[i] * xx(i, 0);
1357  }
1358 
1359  variance(0) = variance(0) * _krigingParameter;
1360  _timer.addToVariance();
1361  _timer.addToTotal(1);
1362 
1363  return mu;
1364 }
1365 
1366 template <typename T>
1367 void GaussianProcess<T>::selfInterpolate(ndarray::Array<T, 1, 1> mu, ndarray::Array<T, 1, 1> variance,
1368  int dex, int numberOfNeighbors) const {
1369  if (mu.getNumElements() != static_cast<ndarray::Size>(_nFunctions) ||
1370  variance.getNumElements() != static_cast<ndarray::Size>(_nFunctions)) {
1372  "Your mu and/or var arrays are improperly sized for the number of functions "
1373  "you are interpolating\n");
1374  }
1375 
1376  if (numberOfNeighbors <= 0) {
1378  "Asked for zero or negative number of neighbors\n");
1379  }
1380 
1381  if (numberOfNeighbors + 1 > _kdTree.getNPoints()) {
1383  "Asked for more neighbors than you have data points\n");
1384  }
1385 
1386  if (dex < 0 || dex >= _kdTree.getNPoints()) {
1388  "Asked to self interpolate on a point that does not exist\n");
1389  }
1390 
1391  int i, j, ii;
1392  T fbar;
1393 
1394  ndarray::Array<T, 1, 1> covarianceTestPoint;
1395  ndarray::Array<int, 1, 1> selfNeighbors;
1396  ndarray::Array<double, 1, 1> selfDistances;
1397  ndarray::Array<int, 1, 1> neighbors;
1398  ndarray::Array<double, 1, 1> neighborDistances;
1399 
1400  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> covariance, bb, xx;
1401  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1402 
1403  _timer.start();
1404 
1405  bb.resize(numberOfNeighbors, 1);
1406  xx.resize(numberOfNeighbors, 1);
1407  covariance.resize(numberOfNeighbors, numberOfNeighbors);
1408  covarianceTestPoint = allocate(ndarray::makeVector(numberOfNeighbors));
1409  neighbors = allocate(ndarray::makeVector(numberOfNeighbors));
1410  neighborDistances = allocate(ndarray::makeVector(numberOfNeighbors));
1411 
1412  selfNeighbors = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1413  selfDistances = allocate(ndarray::makeVector(numberOfNeighbors + 1));
1414 
1415  // we don't use _useMaxMin because the data has already been normalized
1416 
1417  _kdTree.findNeighbors(selfNeighbors, selfDistances, _kdTree.getData(dex), numberOfNeighbors + 1);
1418 
1419  _timer.addToSearch();
1420 
1421  if (selfNeighbors[0] != dex) {
1423  "Nearest neighbor search in selfInterpolate did not find self\n");
1424  }
1425 
1426  // SelfNeighbors[0] will be the point itself (it is its own nearest neighbor)
1427  // We discard that for the interpolation calculation
1428  //
1429  // If you do not wish to do this, simply call the usual ::interpolate() method instead of
1430  //::selfInterpolate()
1431  for (i = 0; i < numberOfNeighbors; i++) {
1432  neighbors[i] = selfNeighbors[i + 1];
1433  neighborDistances[i] = selfDistances[i + 1];
1434  }
1435 
1436  for (i = 0; i < numberOfNeighbors; i++) {
1437  covarianceTestPoint[i] = (*_covariogram)(_kdTree.getData(dex), _kdTree.getData(neighbors[i]));
1438 
1439  covariance(i, i) =
1440  (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[i])) + _lambda;
1441 
1442  for (j = i + 1; j < numberOfNeighbors; j++) {
1443  covariance(i, j) = (*_covariogram)(_kdTree.getData(neighbors[i]), _kdTree.getData(neighbors[j]));
1444  covariance(j, i) = covariance(i, j);
1445  }
1446  }
1447  _timer.addToIteration();
1448 
1449  // use Eigen's ldlt solver in place of matrix inversion (for speed purposes)
1450  ldlt.compute(covariance);
1451 
1452  for (ii = 0; ii < _nFunctions; ii++) {
1453  fbar = 0.0;
1454  for (i = 0; i < numberOfNeighbors; i++) fbar += _function[neighbors[i]][ii];
1455  fbar = fbar / double(numberOfNeighbors);
1456 
1457  for (i = 0; i < numberOfNeighbors; i++) bb(i, 0) = _function[neighbors[i]][ii] - fbar;
1458  xx = ldlt.solve(bb);
1459 
1460  mu[ii] = fbar;
1461 
1462  for (i = 0; i < numberOfNeighbors; i++) {
1463  mu[ii] += covarianceTestPoint[i] * xx(i, 0);
1464  }
1465  } // ii = 0 through _nFunctions
1466 
1467  _timer.addToEigen();
1468 
1469  variance[0] = (*_covariogram)(_kdTree.getData(dex), _kdTree.getData(dex)) + _lambda;
1470 
1471  for (i = 0; i < numberOfNeighbors; i++) bb(i) = covarianceTestPoint[i];
1472 
1473  xx = ldlt.solve(bb);
1474 
1475  for (i = 0; i < numberOfNeighbors; i++) {
1476  variance[0] -= covarianceTestPoint[i] * xx(i, 0);
1477  }
1478 
1479  variance[0] = variance[0] * _krigingParameter;
1480 
1481  for (i = 1; i < _nFunctions; i++) variance[i] = variance[0];
1482 
1483  _timer.addToVariance();
1484  _timer.addToTotal(1);
1485 }
1486 
1487 template <typename T>
1488 void GaussianProcess<T>::batchInterpolate(ndarray::Array<T, 1, 1> mu, ndarray::Array<T, 1, 1> variance,
1489  ndarray::Array<T, 2, 2> const &queries) const {
1490  int i, j;
1491 
1492  ndarray::Size nQueries = queries.template getSize<0>();
1493 
1494  if (_nFunctions != 1) {
1496  "Your mu and variance arrays do not have room for all of the functions "
1497  "as you are trying to interpolate\n");
1498  }
1499 
1500  if (mu.getNumElements() != nQueries || variance.getNumElements() != nQueries) {
1502  "Your mu and variance arrays do not have room for all of the points "
1503  "at which you are trying to interpolate your function.\n");
1504  }
1505 
1506  if (queries.template getSize<1>() != static_cast<ndarray::Size>(_dimensions)) {
1508  "The points you passed to batchInterpolate are of the wrong "
1509  "dimensionality for your Gaussian Process\n");
1510  }
1511 
1512  T fbar;
1513  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> batchCovariance, batchbb, batchxx;
1514  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> queryCovariance;
1515  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1516 
1517  ndarray::Array<T, 1, 1> v1;
1518 
1519  _timer.start();
1520 
1521  v1 = allocate(ndarray::makeVector(_dimensions));
1522  batchbb.resize(_npts, 1);
1523  batchxx.resize(_npts, 1);
1524  batchCovariance.resize(_npts, _npts);
1525  queryCovariance.resize(_npts, 1);
1526 
1527  for (i = 0; i < _npts; i++) {
1528  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1529  for (j = i + 1; j < _npts; j++) {
1530  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1531  batchCovariance(j, i) = batchCovariance(i, j);
1532  }
1533  }
1534  _timer.addToIteration();
1535 
1536  ldlt.compute(batchCovariance);
1537 
1538  fbar = 0.0;
1539  for (i = 0; i < _npts; i++) {
1540  fbar += _function[i][0];
1541  }
1542  fbar = fbar / T(_npts);
1543 
1544  for (i = 0; i < _npts; i++) {
1545  batchbb(i, 0) = _function[i][0] - fbar;
1546  }
1547  batchxx = ldlt.solve(batchbb);
1548  _timer.addToEigen();
1549 
1550  for (ndarray::Size ii = 0; ii < nQueries; ii++) {
1551  for (i = 0; i < _dimensions; i++) v1[i] = queries[ii][i];
1552  if (_useMaxMin == 1) {
1553  for (i = 0; i < _dimensions; i++) v1[i] = (v1[i] - _min[i]) / (_max[i] - _min[i]);
1554  }
1555  mu(ii) = fbar;
1556  for (i = 0; i < _npts; i++) {
1557  mu(ii) += batchxx(i) * (*_covariogram)(v1, _kdTree.getData(i));
1558  }
1559  }
1560  _timer.addToIteration();
1561 
1562  for (ndarray::Size ii = 0; ii < nQueries; ii++) {
1563  for (i = 0; i < _dimensions; i++) v1[i] = queries[ii][i];
1564  if (_useMaxMin == 1) {
1565  for (i = 0; i < _dimensions; i++) v1[i] = (v1[i] - _min[i]) / (_max[i] - _min[i]);
1566  }
1567 
1568  for (i = 0; i < _npts; i++) {
1569  batchbb(i, 0) = (*_covariogram)(v1, _kdTree.getData(i));
1570  queryCovariance(i, 0) = batchbb(i, 0);
1571  }
1572  batchxx = ldlt.solve(batchbb);
1573 
1574  variance[ii] = (*_covariogram)(v1, v1) + _lambda;
1575 
1576  for (i = 0; i < _npts; i++) {
1577  variance[ii] -= queryCovariance(i, 0) * batchxx(i);
1578  }
1579 
1580  variance[ii] = variance[ii] * _krigingParameter;
1581  }
1582 
1583  _timer.addToVariance();
1584  _timer.addToTotal(nQueries);
1585 }
1586 
1587 template <typename T>
1588 void GaussianProcess<T>::batchInterpolate(ndarray::Array<T, 2, 2> mu, ndarray::Array<T, 2, 2> variance,
1589  ndarray::Array<T, 2, 2> const &queries) const {
1590  int i, j, ifn;
1591 
1592  ndarray::Size nQueries = queries.template getSize<0>();
1593 
1594  if (mu.template getSize<0>() != nQueries || variance.template getSize<0>() != nQueries) {
1596  "Your output arrays do not have room for all of the points at which "
1597  "you are interpolating your functions.\n");
1598  }
1599 
1600  if (mu.template getSize<1>() != static_cast<ndarray::Size>(_nFunctions) ||
1601  variance.template getSize<1>() != static_cast<ndarray::Size>(_nFunctions)) {
1603  "Your output arrays do not have room for all of the functions you are "
1604  "interpolating\n");
1605  }
1606 
1607  if (queries.template getSize<1>() != static_cast<ndarray::Size>(_dimensions)) {
1609  "The points at which you are interpolating your functions have the "
1610  "wrong dimensionality.\n");
1611  }
1612 
1613  T fbar;
1614  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> batchCovariance, batchbb, batchxx;
1615  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> queryCovariance;
1616  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1617 
1618  ndarray::Array<T, 1, 1> v1;
1619 
1620  _timer.start();
1621 
1622  v1 = allocate(ndarray::makeVector(_dimensions));
1623  batchbb.resize(_npts, 1);
1624  batchxx.resize(_npts, 1);
1625  batchCovariance.resize(_npts, _npts);
1626  queryCovariance.resize(_npts, 1);
1627 
1628  for (i = 0; i < _npts; i++) {
1629  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1630  for (j = i + 1; j < _npts; j++) {
1631  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1632  batchCovariance(j, i) = batchCovariance(i, j);
1633  }
1634  }
1635 
1636  _timer.addToIteration();
1637 
1638  ldlt.compute(batchCovariance);
1639 
1640  _timer.addToEigen();
1641 
1642  for (ifn = 0; ifn < _nFunctions; ifn++) {
1643  fbar = 0.0;
1644  for (i = 0; i < _npts; i++) {
1645  fbar += _function[i][ifn];
1646  }
1647  fbar = fbar / T(_npts);
1648  _timer.addToIteration();
1649 
1650  for (i = 0; i < _npts; i++) {
1651  batchbb(i, 0) = _function[i][ifn] - fbar;
1652  }
1653  batchxx = ldlt.solve(batchbb);
1654  _timer.addToEigen();
1655 
1656  for (ndarray::Size ii = 0; ii < nQueries; ii++) {
1657  for (i = 0; i < _dimensions; i++) v1[i] = queries[ii][i];
1658  if (_useMaxMin == 1) {
1659  for (i = 0; i < _dimensions; i++) v1[i] = (v1[i] - _min[i]) / (_max[i] - _min[i]);
1660  }
1661  mu[ii][ifn] = fbar;
1662  for (i = 0; i < _npts; i++) {
1663  mu[ii][ifn] += batchxx(i) * (*_covariogram)(v1, _kdTree.getData(i));
1664  }
1665  }
1666 
1667  } // ifn = 0 to _nFunctions
1668 
1669  _timer.addToIteration();
1670  for (ndarray::Size ii = 0; ii < nQueries; ii++) {
1671  for (i = 0; i < _dimensions; i++) v1[i] = queries[ii][i];
1672  if (_useMaxMin == 1) {
1673  for (i = 0; i < _dimensions; i++) v1[i] = (v1[i] - _min[i]) / (_max[i] - _min[i]);
1674  }
1675 
1676  for (i = 0; i < _npts; i++) {
1677  batchbb(i, 0) = (*_covariogram)(v1, _kdTree.getData(i));
1678  queryCovariance(i, 0) = batchbb(i, 0);
1679  }
1680  batchxx = ldlt.solve(batchbb);
1681 
1682  variance[ii][0] = (*_covariogram)(v1, v1) + _lambda;
1683 
1684  for (i = 0; i < _npts; i++) {
1685  variance[ii][0] -= queryCovariance(i, 0) * batchxx(i);
1686  }
1687 
1688  variance[ii][0] = variance[ii][0] * _krigingParameter;
1689  for (i = 1; i < _nFunctions; i++) variance[ii][i] = variance[ii][0];
1690  }
1691  _timer.addToVariance();
1692  _timer.addToTotal(nQueries);
1693 }
1694 
1695 template <typename T>
1696 void GaussianProcess<T>::batchInterpolate(ndarray::Array<T, 1, 1> mu,
1697  ndarray::Array<T, 2, 2> const &queries) const {
1698  int i, j;
1699 
1700  ndarray::Size nQueries = queries.template getSize<0>();
1701 
1702  if (_nFunctions != 1) {
1704  "Your output array does not have enough room for all of the functions "
1705  "you are trying to interpolate.\n");
1706  }
1707 
1708  if (queries.template getSize<1>() != static_cast<ndarray::Size>(_dimensions)) {
1710  "The points at which you are trying to interpolate your function are "
1711  "of the wrong dimensionality.\n");
1712  }
1713 
1714  if (mu.getNumElements() != nQueries) {
1716  "Your output array does not have enough room for all of the points "
1717  "at which you are trying to interpolate your function.\n");
1718  }
1719 
1720  T fbar;
1721  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> batchCovariance, batchbb, batchxx;
1722  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> queryCovariance;
1723  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1724 
1725  ndarray::Array<T, 1, 1> v1;
1726 
1727  _timer.start();
1728 
1729  v1 = allocate(ndarray::makeVector(_dimensions));
1730 
1731  batchbb.resize(_npts, 1);
1732  batchxx.resize(_npts, 1);
1733  batchCovariance.resize(_npts, _npts);
1734  queryCovariance.resize(_npts, 1);
1735 
1736  for (i = 0; i < _npts; i++) {
1737  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1738  for (j = i + 1; j < _npts; j++) {
1739  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1740  batchCovariance(j, i) = batchCovariance(i, j);
1741  }
1742  }
1743  _timer.addToIteration();
1744 
1745  ldlt.compute(batchCovariance);
1746 
1747  fbar = 0.0;
1748  for (i = 0; i < _npts; i++) {
1749  fbar += _function[i][0];
1750  }
1751  fbar = fbar / T(_npts);
1752 
1753  for (i = 0; i < _npts; i++) {
1754  batchbb(i, 0) = _function[i][0] - fbar;
1755  }
1756  batchxx = ldlt.solve(batchbb);
1757  _timer.addToEigen();
1758 
1759  for (ndarray::Size ii = 0; ii < nQueries; ii++) {
1760  for (i = 0; i < _dimensions; i++) v1[i] = queries[ii][i];
1761  if (_useMaxMin == 1) {
1762  for (i = 0; i < _dimensions; i++) v1[i] = (v1[i] - _min[i]) / (_max[i] - _min[i]);
1763  }
1764 
1765  mu(ii) = fbar;
1766  for (i = 0; i < _npts; i++) {
1767  mu(ii) += batchxx(i) * (*_covariogram)(v1, _kdTree.getData(i));
1768  }
1769  }
1770  _timer.addToIteration();
1771  _timer.addToTotal(nQueries);
1772 }
1773 
1774 template <typename T>
1775 void GaussianProcess<T>::batchInterpolate(ndarray::Array<T, 2, 2> mu,
1776  ndarray::Array<T, 2, 2> const &queries) const {
1777  int i, j, ifn;
1778 
1779  ndarray::Size nQueries = queries.template getSize<0>();
1780 
1781  if (mu.template getSize<0>() != nQueries) {
1783  "Your output array does not have enough room for all of the points "
1784  "at which you want to interpolate your functions.\n");
1785  }
1786 
1787  if (mu.template getSize<1>() != static_cast<ndarray::Size>(_nFunctions)) {
1789  "Your output array does not have enough room for all of the functions "
1790  "you are trying to interpolate.\n");
1791  }
1792 
1793  if (queries.template getSize<1>() != static_cast<ndarray::Size>(_dimensions)) {
1795  "The points at which you are interpolating your functions do not "
1796  "have the correct dimensionality.\n");
1797  }
1798 
1799  T fbar;
1800  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> batchCovariance, batchbb, batchxx;
1801  Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> queryCovariance;
1802  Eigen::LDLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > ldlt;
1803 
1804  ndarray::Array<T, 1, 1> v1;
1805 
1806  _timer.start();
1807 
1808  v1 = allocate(ndarray::makeVector(_dimensions));
1809 
1810  batchbb.resize(_npts, 1);
1811  batchxx.resize(_npts, 1);
1812  batchCovariance.resize(_npts, _npts);
1813  queryCovariance.resize(_npts, 1);
1814 
1815  for (i = 0; i < _npts; i++) {
1816  batchCovariance(i, i) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(i)) + _lambda;
1817  for (j = i + 1; j < _npts; j++) {
1818  batchCovariance(i, j) = (*_covariogram)(_kdTree.getData(i), _kdTree.getData(j));
1819  batchCovariance(j, i) = batchCovariance(i, j);
1820  }
1821  }
1822 
1823  _timer.addToIteration();
1824 
1825  ldlt.compute(batchCovariance);
1826 
1827  _timer.addToEigen();
1828 
1829  for (ifn = 0; ifn < _nFunctions; ifn++) {
1830  fbar = 0.0;
1831  for (i = 0; i < _npts; i++) {
1832  fbar += _function[i][ifn];
1833  }
1834  fbar = fbar / T(_npts);
1835 
1836  _timer.addToIteration();
1837 
1838  for (i = 0; i < _npts; i++) {
1839  batchbb(i, 0) = _function[i][ifn] - fbar;
1840  }
1841  batchxx = ldlt.solve(batchbb);
1842  _timer.addToEigen();
1843 
1844  for (ndarray::Size ii = 0; ii < nQueries; ii++) {
1845  for (i = 0; i < _dimensions; i++) v1[i] = queries[ii][i];
1846  if (_useMaxMin == 1) {
1847  for (i = 0; i < _dimensions; i++) v1[i] = (v1[i] - _min[i]) / (_max[i] - _min[i]);
1848  }
1849 
1850  mu[ii][ifn] = fbar;
1851  for (i = 0; i < _npts; i++) {
1852  mu[ii][ifn] += batchxx(i) * (*_covariogram)(v1, _kdTree.getData(i));
1853  }
1854  }
1855 
1856  } // ifn = 0 through _nFunctions
1857 
1858  _timer.addToTotal(nQueries);
1859 }
1860 
1861 template <typename T>
1862 void GaussianProcess<T>::addPoint(ndarray::Array<T, 1, 1> const &vin, T f) {
1863  int i, j;
1864 
1865  if (_nFunctions != 1) {
1867  "You are calling the wrong addPoint; you need a vector of functions\n");
1868  }
1869 
1870  if (vin.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
1872  "You are trying to add a point of the wrong dimensionality to "
1873  "your GaussianProcess.\n");
1874  }
1875 
1876  ndarray::Array<T, 1, 1> v;
1877  v = allocate(ndarray::makeVector(_dimensions));
1878 
1879  for (i = 0; i < _dimensions; i++) {
1880  v[i] = vin[i];
1881  if (_useMaxMin == 1) {
1882  v[i] = (v[i] - _min[i]) / (_max[i] - _min[i]);
1883  }
1884  }
1885 
1886  if (_npts == _room) {
1887  ndarray::Array<T, 2, 2> buff;
1888  buff = allocate(ndarray::makeVector(_npts, _nFunctions));
1889  buff.deep() = _function;
1890 
1891  _room += _roomStep;
1892  _function = allocate(ndarray::makeVector(_room, _nFunctions));
1893  for (i = 0; i < _npts; i++) {
1894  for (j = 0; j < _nFunctions; j++) {
1895  _function[i][j] = buff[i][j];
1896  }
1897  }
1898  }
1899 
1900  _function[_npts][0] = f;
1901 
1902  _kdTree.addPoint(v);
1903  _npts = _kdTree.getNPoints();
1904 }
1905 
1906 template <typename T>
1907 void GaussianProcess<T>::addPoint(ndarray::Array<T, 1, 1> const &vin, ndarray::Array<T, 1, 1> const &f) {
1908  if (vin.getNumElements() != static_cast<ndarray::Size>(_dimensions)) {
1910  "You are trying to add a point of the wrong dimensionality to "
1911  "your GaussianProcess.\n");
1912  }
1913 
1914  if (f.template getSize<0>() != static_cast<ndarray::Size>(_nFunctions)) {
1916  "You are not adding the correct number of function values to "
1917  "your GaussianProcess.\n");
1918  }
1919 
1920  int i, j;
1921 
1922  ndarray::Array<T, 1, 1> v;
1923  v = allocate(ndarray::makeVector(_dimensions));
1924 
1925  for (i = 0; i < _dimensions; i++) {
1926  v[i] = vin[i];
1927  if (_useMaxMin == 1) {
1928  v[i] = (v[i] - _min[i]) / (_max[i] - _min[i]);
1929  }
1930  }
1931 
1932  if (_npts == _room) {
1933  ndarray::Array<T, 2, 2> buff;
1934  buff = allocate(ndarray::makeVector(_npts, _nFunctions));
1935  buff.deep() = _function;
1936 
1937  _room += _roomStep;
1938  _function = allocate(ndarray::makeVector(_room, _nFunctions));
1939  for (i = 0; i < _npts; i++) {
1940  for (j = 0; j < _nFunctions; j++) {
1941  _function[i][j] = buff[i][j];
1942  }
1943  }
1944  }
1945  for (i = 0; i < _nFunctions; i++) _function[_npts][i] = f[i];
1946 
1947  _kdTree.addPoint(v);
1948  _npts = _kdTree.getNPoints();
1949 }
1950 
1951 template <typename T>
1953  int i, j;
1954 
1955  _kdTree.removePoint(dex);
1956 
1957  for (i = dex; i < _npts; i++) {
1958  for (j = 0; j < _nFunctions; j++) {
1959  _function[i][j] = _function[i + 1][j];
1960  }
1961  }
1962  _npts = _kdTree.getNPoints();
1963 }
1964 
1965 template <typename T>
1967  _krigingParameter = kk;
1968 }
1969 
1970 template <typename T>
1972  _covariogram = covar;
1973 }
1974 
1975 template <typename T>
1977  _lambda = lambda;
1978 }
1979 
1980 template <typename T>
1982  return _timer;
1983 }
1984 
1985 template <typename T>
1986 Covariogram<T>::~Covariogram() = default;
1987 
1988 template <typename T>
1989 T Covariogram<T>::operator()(ndarray::Array<const T, 1, 1> const &p1,
1990  ndarray::Array<const T, 1, 1> const &p2) const {
1991  std::cout << "by the way, you are calling the wrong operator\n";
1992  exit(1);
1993  return T(1.0);
1994 }
1995 
1996 template <typename T>
1998 
1999 template <typename T>
2001  _ellSquared = 1.0;
2002 }
2003 
2004 template <typename T>
2006  _ellSquared = ellSquared;
2007 }
2008 
2009 template <typename T>
2010 T SquaredExpCovariogram<T>::operator()(ndarray::Array<const T, 1, 1> const &p1,
2011  ndarray::Array<const T, 1, 1> const &p2) const {
2012  T d = 0.0;
2013  for (ndarray::Size i = 0; i < p1.template getSize<0>(); i++) {
2014  d += (p1[i] - p2[i]) * (p1[i] - p2[i]);
2015  }
2016 
2017  d = d / _ellSquared;
2018  return T(exp(-0.5 * d));
2019 }
2020 
2021 template <typename T>
2023 
2024 template <typename T>
2026  _sigma0 = 1.0;
2027  _sigma1 = 1.0;
2028 }
2029 
2030 template <typename T>
2031 T NeuralNetCovariogram<T>::operator()(ndarray::Array<const T, 1, 1> const &p1,
2032  ndarray::Array<const T, 1, 1> const &p2) const {
2033  int i, dim;
2034  double num, denom1, denom2, arg;
2035 
2036  dim = p1.template getSize<0>();
2037 
2038  num = 2.0 * _sigma0;
2039  denom1 = 1.0 + 2.0 * _sigma0;
2040  denom2 = 1.0 + 2.0 * _sigma0;
2041 
2042  for (i = 0; i < dim; i++) {
2043  num += 2.0 * p1[i] * p2[i] * _sigma1;
2044  denom1 += 2.0 * p1[i] * p1[i] * _sigma1;
2045  denom2 += 2.0 * p2[i] * p2[i] * _sigma1;
2046  }
2047 
2048  arg = num / ::sqrt(denom1 * denom2);
2049  return T(2.0 * (::asin(arg)) / 3.141592654);
2050 }
2051 
2052 template <typename T>
2054  _sigma0 = sigma0;
2055 }
2056 
2057 template <typename T>
2059  _sigma1 = sigma1;
2060 }
2061 
2062 #define INSTANTIATEGP(T) \
2063  template class KdTree<T>; \
2064  template class GaussianProcess<T>; \
2065  template class Covariogram<T>; \
2066  template class SquaredExpCovariogram<T>; \
2067  template class NeuralNetCovariogram<T>;
2068 
2069 INSTANTIATEGP(double);
2070 } // namespace math
2071 } // namespace afw
2072 } // namespace lsst
Key< Flag > const & target
#define LSST_EXCEPT(type,...)
Create an exception with a given type.
Definition: Exception.h:48
table::Key< double > sigma1
#define INSTANTIATEGP(T)
afw::table::Key< afw::table::Array< VariancePixelT > > variance
table::Key< int > from
T asin(T... args)
T begin(T... args)
The parent class of covariogram functions for use in Gaussian Process interpolation.
Stores values of a function sampled on an image and allows you to interpolate the function to unsampl...
void removePoint(int dex)
This will remove a point from the data set.
This is a structure for keeping track of how long the interpolation methods spend on different parts ...
The data for GaussianProcess is stored in a KD tree to facilitate nearest-neighbor searches.
a Covariogram that recreates a neural network with one hidden layer and infinite units in that layer
static DateTime now(void)
Return current time as a DateTime.
Definition: DateTime.cc:517
Reports errors that are due to events beyond the control of the program.
Definition: Runtime.h:104
T end(T... args)
T exit(T... args)
T exp(T... args)
A base class for image defects.
STL namespace.
T next(T... args)
T push_back(T... args)
T sqrt(T... args)
MatrixQ covariance
Definition: simpleShape.cc:152
T stable_sort(T... args)