LSSTApplications  10.0-2-g4f67435,11.0.rc2+1,11.0.rc2+12,11.0.rc2+3,11.0.rc2+4,11.0.rc2+5,11.0.rc2+6,11.0.rc2+7,11.0.rc2+8
LSSTDataManagementBasePackage
BasicConvolve.cc
Go to the documentation of this file.
1 // -*- LSST-C++ -*-
2 
3 /*
4  * LSST Data Management System
5  * Copyright 2008, 2009, 2010 LSST Corporation.
6  *
7  * This product includes software developed by the
8  * LSST Project (http://www.lsst.org/).
9  *
10  * This program is free software: you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 3 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the LSST License Statement and
21  * the GNU General Public License along with this program. If not,
22  * see <http://www.lsstcorp.org/LegalNotices/>.
23  */
24 
34 #include <algorithm>
35 #include <cmath>
36 #include <sstream>
37 #include <vector>
38 
39 #include "boost/cstdint.hpp"
40 
41 #include "lsst/pex/exceptions.h"
42 #include "lsst/pex/logging/Trace.h"
45 #include "lsst/afw/math/Kernel.h"
46 #include "lsst/afw/geom.h"
51 
52 namespace pexExcept = lsst::pex::exceptions;
53 namespace pexLog = lsst::pex::logging;
54 namespace afwGeom = lsst::afw::geom;
55 namespace afwImage = lsst::afw::image;
56 namespace afwMath = lsst::afw::math;
57 namespace mathDetail = lsst::afw::math::detail;
58 
59 namespace {
60 
61  /*
62  * @brief Compute the dot product of a kernel row or column and the overlapping portion of an %image
63  *
64  * @return computed dot product
65  *
66  * The pixel computed belongs at position imageIter + kernel center.
67  *
68  * @todo get rid of KernelPixelT parameter if possible by not computing local variable kVal,
69  * or by using iterator traits:
70  * typedef typename std::iterator_traits<KernelIterT>::value_type KernelPixel;
71  * Unfortunately, in either case compilation fails with this sort of message:
72 \verbatim
73 include/lsst/afw/image/Pixel.h: In instantiation of ‘lsst::afw::image::pixel::exprTraits<boost::gil::pixel<double, boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>, boost::mpl::range_c<int, 0, 1> > > >’:
74 include/lsst/afw/image/Pixel.h:385: instantiated from ‘lsst::afw::image::pixel::BinaryExpr<lsst::afw::image::pixel::Pixel<int, short unsigned int, float>, boost::gil::pixel<double, boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>, boost::mpl::range_c<int, 0, 1> > >, std::multiplies<int>, lsst::afw::image::pixel::bitwise_or<short unsigned int>, lsst::afw::image::pixel::variance_multiplies<float> >’
75 src/math/ConvolveImage.cc:59: instantiated from ‘OutPixelT<unnamed>::kernelDotProduct(ImageIterT, KernelIterT, int) [with OutPixelT = lsst::afw::image::pixel::SinglePixel<int, short unsigned int, float>, ImageIterT = lsst::afw::image::MaskedImage<int, short unsigned int, float>::const_MaskedImageIterator<boost::gil::gray32s_pixel_t*, boost::gil::gray16_pixel_t*, boost::gil::gray32f_noscale_pixel_t*>, KernelIterT = const boost::gil::gray64f_noscalec_pixel_t*]’
76 src/math/ConvolveImage.cc:265: instantiated from ‘void lsst::afw::math::basicConvolve(OutImageT&, const InImageT&, const lsst::afw::math::Kernel&, bool) [with OutImageT = lsst::afw::image::MaskedImage<int, short unsigned int, float>, InImageT = lsst::afw::image::MaskedImage<int, short unsigned int, float>]’
77 src/math/ConvolveImage.cc:451: instantiated from ‘void lsst::afw::math::convolve(OutImageT&, const InImageT&, const KernelT&, bool, int) [with OutImageT = lsst::afw::image::MaskedImage<int, short unsigned int, float>, InImageT = lsst::afw::image::MaskedImage<int, short unsigned int, float>, KernelT = lsst::afw::math::AnalyticKernel]’
78 src/math/ConvolveImage.cc:587: instantiated from here
79 include/lsst/afw/image/Pixel.h:210: error: no type named ‘ImagePixelT’ in ‘struct boost::gil::pixel<double, boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>, boost::mpl::range_c<int, 0, 1> > >’
80 include/lsst/afw/image/Pixel.h:211: error: no type named ‘MaskPixelT’ in ‘struct boost::gil::pixel<double, boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>, boost::mpl::range_c<int, 0, 1> > >’
81 include/lsst/afw/image/Pixel.h:212: error: no type named ‘VariancePixelT’ in ‘struct boost::gil::pixel<double, boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>, boost::mpl::range_c<int, 0, 1> > >’
82 \endverbatim
83  */
84  template <typename OutPixelT, typename ImageIterT, typename KernelIterT, typename KernelPixelT>
85  inline OutPixelT kernelDotProduct(
86  ImageIterT imageIter,
87  KernelIterT kernelIter,
88  int kWidth)
89  {
90  OutPixelT outPixel(0);
91  for (int x = 0; x < kWidth; ++x, ++imageIter, ++kernelIter) {
92  KernelPixelT kVal = *kernelIter;
93  if (kVal != 0) {
94  outPixel += static_cast<OutPixelT>((*imageIter) * kVal);
95  }
96  }
97  return outPixel;
98  }
99 
110  void CheckForceGpuOnNoGpu(afwMath::ConvolutionControl const& convolutionControl)
111  {
112  #ifndef GPU_BUILD
113  if (lsst::afw::gpu::isGpuEnabled()==true
114  && convolutionControl.getDevicePreference()==lsst::afw::gpu::USE_GPU) {
115  throw LSST_EXCEPT(pexExcept::RuntimeError,
116  "Gpu acceleration must be enabled at compiling for lsst::afw::gpu::USE_GPU");
117  }
118  #endif
119  }
129  void CheckForceGpuOnUnsupportedKernel(afwMath::ConvolutionControl const& convolutionControl)
130  {
131  if (lsst::afw::gpu::isGpuEnabled()==true
132  && convolutionControl.getDevicePreference()==lsst::afw::gpu::USE_GPU) {
133  throw LSST_EXCEPT(pexExcept::InvalidParameterError, "Gpu can not process this type of kernel");
134  }
135  }
136 
137 } // anonymous namespace
138 
158 template <typename OutImageT, typename InImageT>
160  OutImageT &convolvedImage,
161  InImageT const& inImage,
162  afwMath::Kernel const& kernel,
163  afwMath::ConvolutionControl const& convolutionControl)
164 {
165  // Because convolve isn't a method of Kernel we can't always use Kernel's vtbl to dynamically
166  // dispatch the correct version of basicConvolve. The case that fails is convolving with a kernel
167  // obtained from a pointer or reference to a Kernel (base class), e.g. as used in LinearCombinationKernel.
169  pexLog::TTrace<4>("lsst.afw.math.convolve",
170  "generic basicConvolve: dispatch to DeltaFunctionKernel basicConvolve");
171  mathDetail::basicConvolve(convolvedImage, inImage,
172  *dynamic_cast<afwMath::DeltaFunctionKernel const*>(&kernel),
173  convolutionControl);
174  return;
175  } else if (IS_INSTANCE(kernel, afwMath::SeparableKernel)) {
176  pexLog::TTrace<4>("lsst.afw.math.convolve",
177  "generic basicConvolve: dispatch to SeparableKernel basicConvolve");
178  mathDetail::basicConvolve(convolvedImage, inImage,
179  *dynamic_cast<afwMath::SeparableKernel const*>(&kernel),
180  convolutionControl);
181  return;
182  } else if (IS_INSTANCE(kernel, afwMath::LinearCombinationKernel) && kernel.isSpatiallyVarying()) {
183  pexLog::TTrace<4>("lsst.afw.math.convolve",
184  "generic basicConvolve: dispatch to spatially varying LinearCombinationKernel basicConvolve");
185  mathDetail::basicConvolve(convolvedImage, inImage,
186  *dynamic_cast<afwMath::LinearCombinationKernel const*>(&kernel),
187  convolutionControl);
188  return;
189  }
190  // OK, use general (and slower) form
191  if (kernel.isSpatiallyVarying() && (convolutionControl.getMaxInterpolationDistance() > 1)) {
192  // use linear interpolation
193  pexLog::TTrace<3>("lsst.afw.math.convolve", "generic basicConvolve: using linear interpolation");
194  mathDetail::convolveWithInterpolation(convolvedImage, inImage, kernel, convolutionControl);
195 
196  } else {
197  // use brute force
198  pexLog::TTrace<3>("lsst.afw.math.convolve", "generic basicConvolve: using brute force");
199  mathDetail::convolveWithBruteForce(convolvedImage, inImage, kernel,convolutionControl);
200  }
201 }
202 
210 template <typename OutImageT, typename InImageT>
212  OutImageT& convolvedImage,
213  InImageT const& inImage,
214  afwMath::DeltaFunctionKernel const &kernel,
215  afwMath::ConvolutionControl const &convolutionControl)
216 {
217  assert (!kernel.isSpatiallyVarying());
218  assertDimensionsOK(convolvedImage, inImage, kernel);
219 
220  CheckForceGpuOnUnsupportedKernel(convolutionControl);
221 
222  int const mImageWidth = inImage.getWidth(); // size of input region
223  int const mImageHeight = inImage.getHeight();
224  int const cnvWidth = mImageWidth + 1 - kernel.getWidth();
225  int const cnvHeight = mImageHeight + 1 - kernel.getHeight();
226  int const cnvStartX = kernel.getCtrX();
227  int const cnvStartY = kernel.getCtrY();
228  int const inStartX = kernel.getPixel().getX();
229  int const inStartY = kernel.getPixel().getY();
230 
231  pexLog::TTrace<3>("lsst.afw.math.convolve", "DeltaFunctionKernel basicConvolve");
232 
233  for (int i = 0; i < cnvHeight; ++i) {
234  typename InImageT::x_iterator inPtr = inImage.x_at(inStartX, i + inStartY);
235  for (typename OutImageT::x_iterator cnvPtr = convolvedImage.x_at(cnvStartX, i + cnvStartY),
236  cnvEnd = cnvPtr + cnvWidth; cnvPtr != cnvEnd; ++cnvPtr, ++inPtr){
237  *cnvPtr = *inPtr;
238  }
239  }
240 }
241 
260 template <typename OutImageT, typename InImageT>
262  OutImageT& convolvedImage,
263  InImageT const& inImage,
264  afwMath::LinearCombinationKernel const& kernel,
265  afwMath::ConvolutionControl const & convolutionControl)
266 {
267  if (!kernel.isSpatiallyVarying()) {
268  // use the standard algorithm for the spatially invariant case
269  pexLog::TTrace<3>("lsst.afw.math.convolve",
270  "basicConvolve for LinearCombinationKernel: spatially invariant; using brute force");
271  return mathDetail::convolveWithBruteForce(convolvedImage, inImage, kernel,
272  convolutionControl.getDoNormalize());
273  } else {
274  CheckForceGpuOnNoGpu(convolutionControl);
276  if (convolutionControl.getDevicePreference() == lsst::afw::gpu::AUTO_WITH_CPU_FALLBACK) {
277  try {
279  mathDetail::convolveLinearCombinationGPU(convolvedImage,inImage,kernel,
280  convolutionControl);
281  if (rc == mathDetail::ConvolveGpuStatus::OK) return;
282  } catch(lsst::afw::gpu::GpuMemoryError) { }
283  catch(pexExcept::MemoryError) { }
284  catch(lsst::afw::gpu::GpuRuntimeError) { }
285  } else if (convolutionControl.getDevicePreference() != lsst::afw::gpu::USE_CPU) {
287  mathDetail::convolveLinearCombinationGPU(convolvedImage,inImage,kernel,
288  convolutionControl);
289  if (rc == mathDetail::ConvolveGpuStatus::OK) return;
290  if (convolutionControl.getDevicePreference() == lsst::afw::gpu::USE_GPU) {
291  throw LSST_EXCEPT(pexExcept::RuntimeError, "Gpu will not process this kernel");
292  }
293  }
294  }
295 
296  // refactor the kernel if this is reasonable and possible;
297  // then use the standard algorithm for the spatially varying case
298  PTR(afwMath::Kernel) refKernelPtr; // possibly refactored version of kernel
299  if (static_cast<int>(kernel.getNKernelParameters()) > kernel.getNSpatialParameters()) {
300  // refactoring will speed convolution, so try it
301  refKernelPtr = kernel.refactor();
302  if (!refKernelPtr) {
303  refKernelPtr = kernel.clone();
304  }
305  } else {
306  // too few basis kernels for refactoring to be worthwhile
307  refKernelPtr = kernel.clone();
308  }
309  if (convolutionControl.getMaxInterpolationDistance() > 1) {
310  pexLog::TTrace<3>("lsst.afw.math.convolve",
311  "basicConvolve for LinearCombinationKernel: using interpolation");
312  return mathDetail::convolveWithInterpolation(convolvedImage, inImage, *refKernelPtr, convolutionControl);
313  } else {
314  pexLog::TTrace<3>("lsst.afw.math.convolve",
315  "basicConvolve for LinearCombinationKernel: maxInterpolationError < 0; using brute force");
316  return mathDetail::convolveWithBruteForce(convolvedImage, inImage, *refKernelPtr,
317  convolutionControl.getDoNormalize());
318  }
319  }
320 }
321 
329 template <typename OutImageT, typename InImageT>
331  OutImageT& convolvedImage,
332  InImageT const& inImage,
333  afwMath::SeparableKernel const &kernel,
334  afwMath::ConvolutionControl const & convolutionControl)
335 {
336  typedef typename afwMath::Kernel::Pixel KernelPixel;
337  typedef typename std::vector<KernelPixel> KernelVector;
338  typedef KernelVector::const_iterator KernelIterator;
339  typedef typename InImageT::const_x_iterator InXIterator;
340  typedef typename InImageT::const_xy_locator InXYLocator;
341  typedef typename OutImageT::x_iterator OutXIterator;
342  typedef typename OutImageT::y_iterator OutYIterator;
343  typedef typename OutImageT::SinglePixel OutPixel;
344 
345  assertDimensionsOK(convolvedImage, inImage, kernel);
346 
347  CheckForceGpuOnUnsupportedKernel(convolutionControl);
348 
349  afwGeom::Box2I const fullBBox = inImage.getBBox(image::LOCAL);
350  afwGeom::Box2I const goodBBox = kernel.shrinkBBox(fullBBox);
351 
352  KernelVector kernelXVec(kernel.getWidth());
353  KernelVector kernelYVec(kernel.getHeight());
354 
355  if (kernel.isSpatiallyVarying()) {
356  pexLog::TTrace<3>("lsst.afw.math.convolve",
357  "SeparableKernel basicConvolve: kernel is spatially varying");
358 
359  for (int cnvY = goodBBox.getMinY(); cnvY <= goodBBox.getMaxY(); ++cnvY) {
360  double const rowPos = inImage.indexToPosition(cnvY, afwImage::Y);
361 
362  InXYLocator inImLoc = inImage.xy_at(0, cnvY - goodBBox.getMinY());
363  OutXIterator cnvXIter = convolvedImage.row_begin(cnvY) + goodBBox.getMinX();
364  for (int cnvX = goodBBox.getMinX(); cnvX <= goodBBox.getMaxX();
365  ++cnvX, ++inImLoc.x(), ++cnvXIter) {
366  double const colPos = inImage.indexToPosition(cnvX, afwImage::X);
367 
368  KernelPixel kSum = kernel.computeVectors(kernelXVec, kernelYVec,
369  convolutionControl.getDoNormalize(), colPos, rowPos);
370 
371  // why does this trigger warnings? It did not in the past.
372  *cnvXIter = afwMath::convolveAtAPoint<OutImageT, InImageT>(inImLoc, kernelXVec, kernelYVec);
373  if (convolutionControl.getDoNormalize()) {
374  *cnvXIter = *cnvXIter/kSum;
375  }
376  }
377  }
378  } else {
379  // kernel is spatially invariant
380  // The basic sequence:
381  // - For each output row:
382  // - Compute x-convolved data: a kernel height's strip of input image convolved with kernel x vector
383  // - Compute one row of output by dotting each column of x-convolved data with the kernel y vector
384  // The x-convolved data is stored in a kernel-height by good-width buffer.
385  // This is circular buffer along y (to avoid shifting pixels before setting each new row);
386  // so for each new row the kernel y vector is rotated to match the order of the x-convolved data.
387 
388  pexLog::TTrace<3>("lsst.afw.math.convolve",
389  "SeparableKernel basicConvolve: kernel is spatially invariant");
390 
391  kernel.computeVectors(kernelXVec, kernelYVec, convolutionControl.getDoNormalize());
392  KernelIterator const kernelXVecBegin = kernelXVec.begin();
393  KernelIterator const kernelYVecBegin = kernelYVec.begin();
394 
395  // buffer for x-convolved data
396  OutImageT buffer(afwGeom::Extent2I(goodBBox.getWidth(), kernel.getHeight()));
397 
398  // pre-fill x-convolved data buffer with all but one row of data
399  int yInd = 0; // during initial fill bufY = inImageY
400  int const yPrefillEnd = buffer.getHeight() - 1;
401  for (; yInd < yPrefillEnd; ++yInd) {
402  OutXIterator bufXIter = buffer.x_at(0, yInd);
403  OutXIterator const bufXEnd = buffer.x_at(goodBBox.getWidth(), yInd);
404  InXIterator inXIter = inImage.x_at(0, yInd);
405  for ( ; bufXIter != bufXEnd; ++bufXIter, ++inXIter) {
406  *bufXIter = kernelDotProduct<OutPixel, InXIterator, KernelIterator, KernelPixel>(
407  inXIter, kernelXVecBegin, kernel.getWidth());
408  }
409  }
410 
411  // compute output pixels using the sequence described above
412  int inY = yPrefillEnd;
413  int bufY = yPrefillEnd;
414  int cnvY = goodBBox.getMinY();
415  while (true) {
416  // fill next buffer row and compute output row
417  InXIterator inXIter = inImage.x_at(0, inY);
418  OutXIterator bufXIter = buffer.x_at(0, bufY);
419  OutXIterator cnvXIter = convolvedImage.x_at(goodBBox.getMinX(), cnvY);
420  for (int bufX = 0; bufX < goodBBox.getWidth(); ++bufX, ++cnvXIter, ++bufXIter, ++inXIter) {
421  // note: bufXIter points to the row of the buffer that is being updated,
422  // whereas bufYIter points to row 0 of the buffer
423  *bufXIter = kernelDotProduct<OutPixel, InXIterator, KernelIterator, KernelPixel>(
424  inXIter, kernelXVecBegin, kernel.getWidth());
425 
426  OutYIterator bufYIter = buffer.y_at(bufX, 0);
427  *cnvXIter = kernelDotProduct<OutPixel, OutYIterator, KernelIterator, KernelPixel>(
428  bufYIter, kernelYVecBegin, kernel.getHeight());
429  }
430 
431  // test for done now, instead of the start of the loop,
432  // to avoid an unnecessary extra rotation of the kernel Y vector
433  if (cnvY >= goodBBox.getMaxY()) break;
434 
435  // update y indices, including bufY, and rotate the kernel y vector to match
436  ++inY;
437  bufY = (bufY + 1) % kernel.getHeight();
438  ++cnvY;
439  std::rotate(kernelYVec.begin(), kernelYVec.end()-1, kernelYVec.end());
440  }
441  }
442 }
443 
467 template <typename OutImageT, typename InImageT>
469  OutImageT &convolvedImage,
470  InImageT const& inImage,
471  afwMath::Kernel const& kernel,
472  afwMath::ConvolutionControl const & convolutionControl)
473 {
474  bool doNormalize=convolutionControl.getDoNormalize();
475 
476  typedef typename afwMath::Kernel::Pixel KernelPixel;
477  typedef afwImage::Image<KernelPixel> KernelImage;
478 
479  typedef typename KernelImage::const_x_iterator KernelXIterator;
480  typedef typename KernelImage::const_xy_locator KernelXYLocator;
481  typedef typename InImageT::const_x_iterator InXIterator;
482  typedef typename InImageT::const_xy_locator InXYLocator;
483  typedef typename OutImageT::x_iterator OutXIterator;
484  typedef typename OutImageT::SinglePixel OutPixel;
485 
486  assertDimensionsOK(convolvedImage, inImage, kernel);
487 
488  int const inImageWidth = inImage.getWidth();
489  int const inImageHeight = inImage.getHeight();
490  int const kWidth = kernel.getWidth();
491  int const kHeight = kernel.getHeight();
492  int const cnvWidth = inImageWidth + 1 - kernel.getWidth();
493  int const cnvHeight = inImageHeight + 1 - kernel.getHeight();
494  int const cnvStartX = kernel.getCtrX();
495  int const cnvStartY = kernel.getCtrY();
496  int const cnvEndX = cnvStartX + cnvWidth; // end index + 1
497  int const cnvEndY = cnvStartY + cnvHeight; // end index + 1
498 
499  KernelImage kernelImage(kernel.getDimensions());
500  KernelXYLocator const kernelLoc = kernelImage.xy_at(0,0);
501 
502  if (kernel.isSpatiallyVarying()) {
503  pexLog::TTrace<5>("lsst.afw.math.convolve",
504  "convolveWithBruteForce: kernel is spatially varying");
505 
506  CheckForceGpuOnUnsupportedKernel(convolutionControl);
507 
508  for (int cnvY = cnvStartY; cnvY != cnvEndY; ++cnvY) {
509  double const rowPos = inImage.indexToPosition(cnvY, afwImage::Y);
510 
511  InXYLocator inImLoc = inImage.xy_at(0, cnvY - cnvStartY);
512  OutXIterator cnvXIter = convolvedImage.x_at(cnvStartX, cnvY);
513  for (int cnvX = cnvStartX; cnvX != cnvEndX; ++cnvX, ++inImLoc.x(), ++cnvXIter) {
514  double const colPos = inImage.indexToPosition(cnvX, afwImage::X);
515 
516  KernelPixel kSum = kernel.computeImage(kernelImage, false, colPos, rowPos);
517  *cnvXIter = afwMath::convolveAtAPoint<OutImageT, InImageT>(
518  inImLoc, kernelLoc, kWidth, kHeight);
519  if (doNormalize) {
520  *cnvXIter = *cnvXIter/kSum;
521  }
522  }
523  }
524  } else {
525  pexLog::TTrace<5>("lsst.afw.math.convolve",
526  "convolveWithBruteForce: kernel is spatially invariant");
527 
528  CheckForceGpuOnNoGpu(convolutionControl);
530  if (convolutionControl.getDevicePreference() == lsst::afw::gpu::AUTO_WITH_CPU_FALLBACK) {
531  try {
533  mathDetail::convolveSpatiallyInvariantGPU(convolvedImage,inImage,kernel,
534  convolutionControl);
535  if (rc == mathDetail::ConvolveGpuStatus::OK) return;
536  } catch(lsst::afw::gpu::GpuMemoryError) { }
537  catch(pexExcept::MemoryError) { }
538  catch(lsst::afw::gpu::GpuRuntimeError) { }
539  } else if (convolutionControl.getDevicePreference() != lsst::afw::gpu::USE_CPU) {
541  mathDetail::convolveSpatiallyInvariantGPU(convolvedImage,inImage,kernel,
542  convolutionControl);
543  if (rc == mathDetail::ConvolveGpuStatus::OK) return;
544  if (convolutionControl.getDevicePreference() == lsst::afw::gpu::USE_GPU) {
545  throw LSST_EXCEPT(pexExcept::RuntimeError, "Gpu will not process this kernel");
546  }
547  }
548  }
549 
550  (void)kernel.computeImage(kernelImage, doNormalize);
551 
552  for (int inStartY = 0, cnvY = cnvStartY; inStartY < cnvHeight; ++inStartY, ++cnvY) {
553  KernelXIterator kernelXIter = kernelImage.x_at(0, 0);
554  InXIterator inXIter = inImage.x_at(0, inStartY);
555  OutXIterator cnvXIter = convolvedImage.x_at(cnvStartX, cnvY);
556  for (int x = 0; x < cnvWidth; ++x, ++cnvXIter, ++inXIter) {
557  *cnvXIter = kernelDotProduct<OutPixel, InXIterator, KernelXIterator, KernelPixel>(
558  inXIter, kernelXIter, kWidth);
559  }
560  for (int kernelY = 1, inY = inStartY + 1; kernelY < kHeight; ++inY, ++kernelY) {
561  KernelXIterator kernelXIter = kernelImage.x_at(0, kernelY);
562  InXIterator inXIter = inImage.x_at(0, inY);
563  OutXIterator cnvXIter = convolvedImage.x_at(cnvStartX, cnvY);
564  for (int x = 0; x < cnvWidth; ++x, ++cnvXIter, ++inXIter) {
565  *cnvXIter += kernelDotProduct<OutPixel, InXIterator, KernelXIterator, KernelPixel>(
566  inXIter, kernelXIter, kWidth);
567  }
568  }
569  }
570  }
571 }
572 
573 /*
574  * Explicit instantiation
575  */
577 #define IMAGE(PIXTYPE) afwImage::Image<PIXTYPE>
578 #define MASKEDIMAGE(PIXTYPE) afwImage::MaskedImage<PIXTYPE, afwImage::MaskPixel, afwImage::VariancePixel>
579 #define NL /* */
580 // Instantiate Image or MaskedImage versions
581 #define INSTANTIATE_IM_OR_MI(IMGMACRO, OUTPIXTYPE, INPIXTYPE) \
582  template void mathDetail::basicConvolve( \
583  IMGMACRO(OUTPIXTYPE)&, IMGMACRO(INPIXTYPE) const&, afwMath::Kernel const&, \
584  afwMath::ConvolutionControl const&); NL \
585  template void mathDetail::basicConvolve( \
586  IMGMACRO(OUTPIXTYPE)&, IMGMACRO(INPIXTYPE) const&, afwMath::DeltaFunctionKernel const&, \
587  afwMath::ConvolutionControl const&); NL \
588  template void mathDetail::basicConvolve( \
589  IMGMACRO(OUTPIXTYPE)&, IMGMACRO(INPIXTYPE) const&, afwMath::LinearCombinationKernel const&, \
590  afwMath::ConvolutionControl const&); NL \
591  template void mathDetail::basicConvolve( \
592  IMGMACRO(OUTPIXTYPE)&, IMGMACRO(INPIXTYPE) const&, afwMath::SeparableKernel const&, \
593  afwMath::ConvolutionControl const&); NL \
594  template void mathDetail::convolveWithBruteForce( \
595  IMGMACRO(OUTPIXTYPE)&, IMGMACRO(INPIXTYPE) const&, afwMath::Kernel const&, \
596  afwMath::ConvolutionControl const&);
597 // Instantiate both Image and MaskedImage versions
598 #define INSTANTIATE(OUTPIXTYPE, INPIXTYPE) \
599  INSTANTIATE_IM_OR_MI(IMAGE, OUTPIXTYPE, INPIXTYPE) \
600  INSTANTIATE_IM_OR_MI(MASKEDIMAGE, OUTPIXTYPE, INPIXTYPE)
601 
602 INSTANTIATE(double, double)
603 INSTANTIATE(double, float)
604 INSTANTIATE(double, int)
605 INSTANTIATE(double, boost::uint16_t)
606 INSTANTIATE(float, float)
607 INSTANTIATE(float, int)
608 INSTANTIATE(float, boost::uint16_t)
609 INSTANTIATE(int, int)
610 INSTANTIATE(boost::uint16_t, boost::uint16_t)
bool isGpuEnabled()
returns true if GPU acceleration is enabled
Convolution support.
An include file to include the header files for lsst::afw::geom.
Declare the Kernel class and subclasses.
ConvolveGpuStatus::ReturnCode convolveLinearCombinationGPU(lsst::afw::image::MaskedImage< OutPixelT, lsst::afw::image::MaskPixel, lsst::afw::image::VariancePixel > &convolvedImage, lsst::afw::image::MaskedImage< InPixelT, lsst::afw::image::MaskPixel, lsst::afw::image::VariancePixel > const &inImage, lsst::afw::math::LinearCombinationKernel const &kernel, lsst::afw::math::ConvolutionControl const &convolutionControl)
int getWidth() const
Return the Kernel&#39;s width.
Definition: Kernel.h:240
#define PTR(...)
Definition: base.h:41
int getHeight() const
Return the Kernel&#39;s height.
Definition: Kernel.h:247
virtual boost::shared_ptr< Kernel > clone() const
Return a pointer to a deep copy of this kernel.
Parameters to control convolution.
Definition: ConvolveImage.h:58
definition of the Trace messaging facilities
A kernel described by a pair of functions: func(x, y) = colFunc(x) * rowFunc(y)
Definition: Kernel.h:986
unsigned int getNKernelParameters() const
Return the number of kernel parameters (0 if none)
Definition: Kernel.h:289
int getMinY() const
Definition: Box.h:125
int getNSpatialParameters() const
Return the number of spatial parameters (0 if not spatially varying)
Definition: Kernel.h:296
void convolveWithBruteForce(OutImageT &convolvedImage, InImageT const &inImage, lsst::afw::math::Kernel const &kernel, lsst::afw::math::ConvolutionControl const &convolutionControl)
Convolve an Image or MaskedImage with a Kernel by computing the kernel image at every point...
CPU and GPU convolution shared code.
ConvolveGpuStatus::ReturnCode convolveSpatiallyInvariantGPU(lsst::afw::image::MaskedImage< OutPixelT, lsst::afw::image::MaskPixel, lsst::afw::image::VariancePixel > &convolvedImage, lsst::afw::image::MaskedImage< InPixelT, lsst::afw::image::MaskPixel, lsst::afw::image::VariancePixel > const &inImage, lsst::afw::math::Kernel const &kernel, lsst::afw::math::ConvolutionControl const &convolutionControl)
int getMaxX() const
Definition: Box.h:128
bool isSpatiallyVarying() const
Return true iff the kernel is spatially varying (has a spatial function)
Definition: Kernel.h:402
double computeVectors(std::vector< Pixel > &colList, std::vector< Pixel > &rowList, bool doNormalize, double x=0.0, double y=0.0) const
Compute the column and row arrays in place, where kernel(col, row) = colList(col) * rowList(row) ...
An integer coordinate rectangle.
Definition: Box.h:53
table::Key< table::Array< Kernel::Pixel > > image
Definition: FixedKernel.cc:117
void basicConvolve(OutImageT &convolvedImage, InImageT const &inImage, lsst::afw::math::Kernel const &kernel, lsst::afw::math::ConvolutionControl const &convolutionControl)
Low-level convolution function that does not set edge pixels.
A kernel that is a linear combination of fixed basis kernels.
Definition: Kernel.h:814
geom::Extent2I const getDimensions() const
Return the Kernel&#39;s dimensions (width, height)
Definition: Kernel.h:226
bool isGpuBuild()
Inline function which returns true only when GPU_BUILD macro is defined.
Definition: IsGpuBuild.h:45
void convolveWithInterpolation(OutImageT &outImage, InImageT const &inImage, lsst::afw::math::Kernel const &kernel, ConvolutionControl const &convolutionControl)
Convolve an Image or MaskedImage with a spatially varying Kernel using linear interpolation.
int getMaxY() const
Definition: Box.h:129
int getMinX() const
Definition: Box.h:124
boost::shared_ptr< Kernel > refactor() const
Refactor the kernel as a linear combination of N bases where N is the number of parameters for the sp...
int x
Convolve and convolveAtAPoint functions for Image and Kernel.
#define LSST_EXCEPT(type,...)
Definition: Exception.h:46
#define INSTANTIATE(T)
void assertDimensionsOK(OutImageT const &convolvedImage, InImageT const &inImage, lsst::afw::math::Kernel const &kernel)
lsst::afw::geom::Point2I getPixel() const
Definition: Kernel.h:768
Implementation of the Class MaskedImage.
int getCtrY() const
Return y index of kernel&#39;s center.
Definition: Kernel.h:272
lsst::afw::geom::Box2I shrinkBBox(lsst::afw::geom::Box2I const &bbox) const
Definition: Kernel.cc:207
Convolution support.
Kernels are used for convolution with MaskedImages and (eventually) Images.
Definition: Kernel.h:134
A class to represent a 2-dimensional array of pixels.
Definition: PSF.h:43
#define IS_INSTANCE(A, B)
Definition: Convolve.h:48
int getWidth() const
Definition: Box.h:154
A kernel that has only one non-zero pixel (of value 1)
Definition: Kernel.h:744
lsst::afw::gpu::DevicePreference getDevicePreference() const
Definition: ConvolveImage.h:79
double computeImage(lsst::afw::image::Image< Pixel > &image, bool doNormalize, double x=0.0, double y=0.0) const
Compute an image (pixellized representation of the kernel) in place.
Definition: Kernel.cc:94
Include files required for standard LSST Exception handling.
A function to determine whether compiling for GPU is enabled.
int getCtrX() const
Return x index of kernel&#39;s center.
Definition: Kernel.h:263