LSST Applications  21.0.0-131-g8cabc107+528f53ee53,22.0.0+00495a2688,22.0.0+0ef2527977,22.0.0+11a2aa21cd,22.0.0+269b7e55e3,22.0.0+2c6b6677a3,22.0.0+64c1bc5aa5,22.0.0+7b3a3f865e,22.0.0+e1b6d2281c,22.0.0+ff3c34362c,22.0.1-1-g1b65d06+c95cbdf3df,22.0.1-1-g7058be7+1cf78af69b,22.0.1-1-g7dab645+2a65e40b06,22.0.1-1-g8760c09+64c1bc5aa5,22.0.1-1-g949febb+64c1bc5aa5,22.0.1-1-ga324b9c+269b7e55e3,22.0.1-1-gf9d8b05+ff3c34362c,22.0.1-10-g781e53d+9b51d1cd24,22.0.1-10-gba590ab+b9624b875d,22.0.1-13-g76f9b8d+2c6b6677a3,22.0.1-14-g22236948+57af756299,22.0.1-18-g3db9cf4b+9b7092c56c,22.0.1-18-gb17765a+2264247a6b,22.0.1-2-g8ef0a89+2c6b6677a3,22.0.1-2-gcb770ba+c99495d3c6,22.0.1-24-g2e899d296+4206820b0d,22.0.1-3-g7aa11f2+2c6b6677a3,22.0.1-3-g8c1d971+f253ffa91f,22.0.1-3-g997b569+ff3b2f8649,22.0.1-4-g1930a60+6871d0c7f6,22.0.1-4-g5b7b756+6b209d634c,22.0.1-6-ga02864e+6871d0c7f6,22.0.1-7-g3402376+a1a2182ac4,22.0.1-7-g65f59fa+54b92689ce,master-gcc5351303a+e1b6d2281c,w.2021.32
LSST Data Management Base Package
plotPtcGen2.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 
23 __all__ = ['PlotPhotonTransferCurveTaskGen2']
24 
25 import numpy as np
26 import matplotlib.pyplot as plt
27 import matplotlib as mpl
28 from matplotlib import gridspec
29 import os
30 from matplotlib.backends.backend_pdf import PdfPages
31 
32 import lsst.ip.isr as isr
33 import lsst.pex.config as pexConfig
34 import lsst.pipe.base as pipeBase
35 
36 from lsst.cp.pipe.utils import (funcAstier, funcPolynomial, NonexistentDatasetTaskDataIdContainer,
37  calculateWeightedReducedChi2)
38 from matplotlib.ticker import MaxNLocator
39 
40 from lsst.cp.pipe.ptc.astierCovPtcUtils import getFitDataFromCovariances
41 
42 from lsst.ip.isr import PhotonTransferCurveDataset
43 
44 
45 class PlotPhotonTransferCurveTaskConfigGen2(pexConfig.Config):
46  """Config class for photon transfer curve measurement task"""
47  datasetFileName = pexConfig.Field(
48  dtype=str,
49  doc="datasetPtc file name (fits)",
50  default="",
51  )
52  linearizerFileName = pexConfig.Field(
53  dtype=str,
54  doc="linearizer file name (fits)",
55  default="",
56  )
57  ccdKey = pexConfig.Field(
58  dtype=str,
59  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
60  default='detector',
61  )
62  signalElectronsRelativeA = pexConfig.Field(
63  dtype=float,
64  doc="Signal value for relative systematic bias between different methods of estimating a_ij "
65  "(Fig. 15 of Astier+19).",
66  default=75000,
67  )
68  plotNormalizedCovariancesNumberOfBins = pexConfig.Field(
69  dtype=int,
70  doc="Number of bins in `plotNormalizedCovariancesNumber` function "
71  "(Fig. 8, 10., of Astier+19).",
72  default=10,
73  )
74 
75 
76 class PlotPhotonTransferCurveTaskGen2(pipeBase.CmdLineTask):
77  """A class to plot the dataset from MeasurePhotonTransferCurveTask.
78 
79  Parameters
80  ----------
81 
82  *args: `list`
83  Positional arguments passed to the Task constructor. None used at this
84  time.
85  **kwargs: `dict`
86  Keyword arguments passed on to the Task constructor. None used at this
87  time.
88 
89  Notes
90  -----
91  The plotting code in this file is almost identical to the code in
92  `plotPtc.py`. If further changes are implemented in this file,
93  `plotPtc.py` needs to be updated accordingly, and vice versa.
94  This file (`plotPtcGen2.py`) helps with maintaining backwards
95  compatibility with gen2 as we transition to gen3; the code
96  duplication is meant to only last for few month from now
97  (Jan, 2021). At that point only the `plotPtc.py` file will
98  remain.
99  """
100 
101  ConfigClass = PlotPhotonTransferCurveTaskConfigGen2
102  _DefaultName = "plotPhotonTransferCurve"
103 
104  def __init__(self, *args, **kwargs):
105  super().__init__(*args, **kwargs)
106  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
107 
108  @classmethod
109  def _makeArgumentParser(cls):
110  """Augment argument parser for the MeasurePhotonTransferCurveTask."""
111  parser = pipeBase.ArgumentParser(name=cls._DefaultName_DefaultName)
112  parser.add_id_argument("--id", datasetType="photonTransferCurveDataset",
113  ContainerClass=NonexistentDatasetTaskDataIdContainer,
114  help="The ccds to use, e.g. --id ccd=0..100")
115  return parser
116 
117  @pipeBase.timeMethod
118  def runDataRef(self, dataRef):
119  """Run the Photon Transfer Curve (PTC) plotting measurement task.
120 
121  Parameters
122  ----------
123  dataRef : list of lsst.daf.persistence.ButlerDataRef
124  dataRef for the detector for the expIds to be fit.
125  """
126 
127  datasetFile = self.config.datasetFileName
128  datasetPtc = PhotonTransferCurveDataset.readFits(datasetFile)
129 
130  dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True)
131  if not os.path.exists(dirname):
132  os.makedirs(dirname)
133 
134  detNum = dataRef.dataId[self.config.ccdKey]
135  filename = f"PTC_det{detNum}.pdf"
136  filenameFull = os.path.join(dirname, filename)
137 
138  if self.config.linearizerFileName:
139  linearizer = isr.linearize.Linearizer.readFits(self.config.linearizerFileName)
140  else:
141  linearizer = None
142  self.runrun(filenameFull, datasetPtc, linearizer=linearizer, log=self.log)
143 
144  return pipeBase.Struct(exitStatus=0)
145 
146  def run(self, filenameFull, datasetPtc, linearizer=None, log=None):
147  """Make the plots for the PTC task"""
148  ptcFitType = datasetPtc.ptcFitType
149  with PdfPages(filenameFull) as pdfPages:
150  if ptcFitType in ["FULLCOVARIANCE", ]:
151  self.covAstierMakeAllPlotscovAstierMakeAllPlots(datasetPtc, pdfPages, log=log)
152  elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
153  self._plotStandardPtc_plotStandardPtc(datasetPtc, ptcFitType, pdfPages)
154  else:
155  raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
156  "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
157  if linearizer:
158  self._plotLinearizer_plotLinearizer(datasetPtc, linearizer, pdfPages)
159 
160  return
161 
162  def covAstierMakeAllPlots(self, dataset, pdfPages,
163  log=None):
164  """Make plots for MeasurePhotonTransferCurve task when doCovariancesAstier=True.
165 
166  This function call other functions that mostly reproduce the plots in Astier+19.
167  Most of the code is ported from Pierre Astier's repository https://github.com/PierreAstier/bfptc
168 
169  Parameters
170  ----------
171  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
172  The dataset containing the necessary information to produce the plots.
173 
174  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
175  PDF file where the plots will be saved.
176 
177  log : `lsst.log.Log`, optional
178  Logger to handle messages
179  """
180  mu = dataset.finalMeans
181  # dictionaries with ampNames as keys
182  fullCovs = dataset.covariances
183  fullCovsModel = dataset.covariancesModel
184  fullCovWeights = dataset.covariancesSqrtWeights
185  aDict = dataset.aMatrix
186  bDict = dataset.bMatrix
187  fullCovsNoB = dataset.covariances
188  fullCovsModelNoB = dataset.covariancesModelNoB
189  fullCovWeightsNoB = dataset.covariancesSqrtWeights
190  aDictNoB = dataset.aMatrixNoB
191  gainDict = dataset.gain
192  noiseDict = dataset.noise
193 
194  self.plotCovariancesplotCovariances(mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB, fullCovsModelNoB,
195  fullCovWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages)
196  self.plotNormalizedCovariancesplotNormalizedCovariances(0, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
197  fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
198  offset=0.01, topPlot=True,
199  numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins,
200  log=log)
201  self.plotNormalizedCovariancesplotNormalizedCovariances(0, 1, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
202  fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
203  numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins,
204  log=log)
205  self.plotNormalizedCovariancesplotNormalizedCovariances(1, 0, mu, fullCovs, fullCovsModel, fullCovWeights, fullCovsNoB,
206  fullCovsModelNoB, fullCovWeightsNoB, pdfPages,
207  numberOfBins=self.config.plotNormalizedCovariancesNumberOfBins,
208  log=log)
209  self.plot_a_bplot_a_b(aDict, bDict, pdfPages)
210  self.ab_vs_distab_vs_dist(aDict, bDict, pdfPages, bRange=4)
211  self.plotAcoeffsSumplotAcoeffsSum(aDict, bDict, pdfPages)
212  self.plotRelativeBiasACoeffsplotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB,
213  self.config.signalElectronsRelativeA, gainDict, pdfPages, maxr=4)
214 
215  return
216 
217  @staticmethod
218  def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB,
219  gainDict, noiseDict, aDict, bDict, pdfPages):
220  """Plot covariances and models: Cov00, Cov10, Cov01.
221 
222  Figs. 6 and 7 of Astier+19
223 
224  Parameters
225  ----------
226  mu : `dict`, [`str`, `list`]
227  Dictionary keyed by amp name with mean signal values.
228 
229  covs : `dict`, [`str`, `list`]
230  Dictionary keyed by amp names containing a list of measued covariances per mean flux.
231 
232  covsModel : `dict`, [`str`, `list`]
233  Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux.
234 
235  covsWeights : `dict`, [`str`, `list`]
236  Dictionary keyed by amp names containinging sqrt. of covariances weights.
237 
238  covsNoB : `dict`, [`str`, `list`]
239  Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in
240  Astier+19).
241 
242  covsModelNoB : `dict`, [`str`, `list`]
243  Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19)
244  per mean flux.
245 
246  covsWeightsNoB : `dict`, [`str`, `list`]
247  Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of
248  Astier+19).
249 
250  gainDict : `dict`, [`str`, `float`]
251  Dictionary keyed by amp names containing the gains in e-/ADU.
252 
253  noiseDict : `dict`, [`str`, `float`]
254  Dictionary keyed by amp names containing the rms redout noise in e-.
255 
256  aDict : `dict`, [`str`, `numpy.array`]
257  Dictionary keyed by amp names containing 'a' coefficients (Eq. 20 of Astier+19).
258 
259  bDict : `dict`, [`str`, `numpy.array`]
260  Dictionary keyed by amp names containing 'b' coefficients (Eq. 20 of Astier+19).
261 
262  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
263  PDF file where the plots will be saved.
264  """
265 
266  legendFontSize = 6.5
267  labelFontSize = 7
268  titleFontSize = 9
269  supTitleFontSize = 18
270  markerSize = 25
271 
272  nAmps = len(covs)
273  if nAmps == 2:
274  nRows, nCols = 2, 1
275  nRows = np.sqrt(nAmps)
276  mantissa, _ = np.modf(nRows)
277  if mantissa > 0:
278  nRows = int(nRows) + 1
279  nCols = nRows
280  else:
281  nRows = int(nRows)
282  nCols = nRows
283 
284  f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
285  f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
286  fResCov00, axResCov00 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row',
287  figsize=(13, 10))
288  fCov01, axCov01 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
289  fCov10, axCov10 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
290 
291  assert(len(covsModel) == nAmps)
292  assert(len(covsWeights) == nAmps)
293 
294  assert(len(covsNoB) == nAmps)
295  assert(len(covsModelNoB) == nAmps)
296  assert(len(covsWeightsNoB) == nAmps)
297 
298  for i, (amp, a, a2, aResVar, a3, a4) in enumerate(zip(covs, ax.flatten(),
299  ax2.flatten(), axResCov00.flatten(),
300  axCov01.flatten(), axCov10.flatten())):
301 
302  muAmp, cov, model, weight = mu[amp], covs[amp], covsModel[amp], covsWeights[amp]
303  if not np.isnan(np.array(cov)).all(): # If all the entries are np.nan, this is a bad amp.
304  aCoeffs, bCoeffs = np.array(aDict[amp]), np.array(bDict[amp])
305  gain, noise = gainDict[amp], noiseDict[amp]
306  (meanVecFinal, varVecFinal, varVecModelFinal,
307  varWeightsFinal, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight,
308  returnMasked=True)
309 
310  # Get weighted reduced chi2
311  chi2FullModelVar = calculateWeightedReducedChi2(varVecFinal, varVecModelFinal,
312  varWeightsFinal, len(meanVecFinal), 4)
313 
314  (meanVecFinalCov01, varVecFinalCov01, varVecModelFinalCov01,
315  _, _) = getFitDataFromCovariances(0, 0, muAmp, cov, model, weight, returnMasked=True)
316 
317  (meanVecFinalCov10, varVecFinalCov10, varVecModelFinalCov10,
318  _, _) = getFitDataFromCovariances(1, 0, muAmp, cov, model, weight, returnMasked=True)
319 
320  # cuadratic fit for residuals below
321  par2 = np.polyfit(meanVecFinal, varVecFinal, 2, w=varWeightsFinal)
322  varModelFinalQuadratic = np.polyval(par2, meanVecFinal)
323  chi2QuadModelVar = calculateWeightedReducedChi2(varVecFinal, varModelFinalQuadratic,
324  varWeightsFinal, len(meanVecFinal), 3)
325 
326  # fit with no 'b' coefficient (c = a*b in Eq. 20 of Astier+19)
327  covNoB, modelNoB, weightNoB = covsNoB[amp], covsModelNoB[amp], covsWeightsNoB[amp]
328  (meanVecFinalNoB, varVecFinalNoB, varVecModelFinalNoB,
329  varWeightsFinalNoB, _) = getFitDataFromCovariances(0, 0, muAmp, covNoB, modelNoB,
330  weightNoB, returnMasked=True)
331 
332  chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB,
333  varWeightsFinalNoB, len(meanVecFinalNoB),
334  3)
335  stringLegend = (f"Gain: {gain:.4} e/ADU \n"
336  f"Noise: {noise:.4} e \n"
337  + r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n"
338  + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0]
339  + f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
340  minMeanVecFinal = np.nanmin(meanVecFinal)
341  maxMeanVecFinal = np.nanmax(meanVecFinal)
342  deltaXlim = maxMeanVecFinal - minMeanVecFinal
343 
344  a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
345  a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
346  a.tick_params(labelsize=11)
347  a.set_xscale('linear')
348  a.set_yscale('linear')
349  a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
350  a.plot(meanVecFinal, varVecModelFinal, color='red', linestyle='-')
351  a.text(0.03, 0.7, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
352  a.set_title(amp, fontsize=titleFontSize)
353  a.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
354 
355  # Same as above, but in log-scale
356  a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize)
357  a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
358  a2.tick_params(labelsize=11)
359  a2.set_xscale('log')
360  a2.set_yscale('log')
361  a2.plot(meanVecFinal, varVecModelFinal, color='red', linestyle='-')
362  a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
363  a2.text(0.03, 0.7, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
364  a2.set_title(amp, fontsize=titleFontSize)
365  a2.set_xlim([minMeanVecFinal, maxMeanVecFinal])
366 
367  # Residuals var - model
368  aResVar.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
369  aResVar.set_ylabel(r'Residuals (ADU$^2$)', fontsize=labelFontSize)
370  aResVar.tick_params(labelsize=11)
371  aResVar.set_xscale('linear')
372  aResVar.set_yscale('linear')
373  aResVar.plot(meanVecFinal, varVecFinal - varVecModelFinal, color='blue', linestyle='-',
374  label=r'Full fit ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelVar)
375  aResVar.plot(meanVecFinal, varVecFinal - varModelFinalQuadratic, color='red', linestyle='-',
376  label=r'Quadratic fit ($\chi_{\rm{red}}^2$: %g)'%chi2QuadModelVar)
377  aResVar.plot(meanVecFinalNoB, varVecFinalNoB - varVecModelFinalNoB, color='green',
378  linestyle='-',
379  label=r'Full fit (b=0) ($\chi_{\rm{red}}^2$: %g)'%chi2FullModelNoBVar)
380  aResVar.axhline(color='black')
381  aResVar.set_title(amp, fontsize=titleFontSize)
382  aResVar.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
383  aResVar.legend(fontsize=7)
384 
385  a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
386  a3.set_ylabel(r'Cov01 (ADU$^2$)', fontsize=labelFontSize)
387  a3.tick_params(labelsize=11)
388  a3.set_xscale('linear')
389  a3.set_yscale('linear')
390  a3.scatter(meanVecFinalCov01, varVecFinalCov01, c='blue', marker='o', s=markerSize)
391  a3.plot(meanVecFinalCov01, varVecModelFinalCov01, color='red', linestyle='-')
392  a3.set_title(amp, fontsize=titleFontSize)
393  a3.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
394 
395  a4.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
396  a4.set_ylabel(r'Cov10 (ADU$^2$)', fontsize=labelFontSize)
397  a4.tick_params(labelsize=11)
398  a4.set_xscale('linear')
399  a4.set_yscale('linear')
400  a4.scatter(meanVecFinalCov10, varVecFinalCov10, c='blue', marker='o', s=markerSize)
401  a4.plot(meanVecFinalCov10, varVecModelFinalCov10, color='red', linestyle='-')
402  a4.set_title(amp, fontsize=titleFontSize)
403  a4.set_xlim([minMeanVecFinal - 0.2*deltaXlim, maxMeanVecFinal + 0.2*deltaXlim])
404 
405  else:
406  a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
407  a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
408  a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
409  a4.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
410 
411  f.suptitle("PTC from covariances as in Astier+19 \n Fit: Eq. 20, Astier+19",
412  fontsize=supTitleFontSize)
413  pdfPages.savefig(f)
414  f2.suptitle("PTC from covariances as in Astier+19 (log-log) \n Fit: Eq. 20, Astier+19",
415  fontsize=supTitleFontSize)
416  pdfPages.savefig(f2)
417  fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize)
418  pdfPages.savefig(fResCov00)
419  fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n"
420  " Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
421  pdfPages.savefig(fCov01)
422  fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n"
423  "Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
424  pdfPages.savefig(fCov10)
425 
426  return
427 
428  def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB,
429  covsWeightsNoB, pdfPages, offset=0.004,
430  numberOfBins=10, plotData=True, topPlot=False, log=None):
431  """Plot C_ij/mu vs mu.
432 
433  Figs. 8, 10, and 11 of Astier+19
434 
435  Parameters
436  ----------
437  i : `int`
438  Covariane lag
439 
440  j : `int`
441  Covariance lag
442 
443  inputMu : `dict`, [`str`, `list`]
444  Dictionary keyed by amp name with mean signal values.
445 
446  covs : `dict`, [`str`, `list`]
447  Dictionary keyed by amp names containing a list of measued covariances per mean flux.
448 
449  covsModel : `dict`, [`str`, `list`]
450  Dictionary keyed by amp names containinging covariances model (Eq. 20 of Astier+19) per mean flux.
451 
452  covsWeights : `dict`, [`str`, `list`]
453  Dictionary keyed by amp names containinging sqrt. of covariances weights.
454 
455  covsNoB : `dict`, [`str`, `list`]
456  Dictionary keyed by amp names containing a list of measued covariances per mean flux ('b'=0 in
457  Astier+19).
458 
459  covsModelNoB : `dict`, [`str`, `list`]
460  Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of Astier+19)
461  per mean flux.
462 
463  covsWeightsNoB : `dict`, [`str`, `list`]
464  Dictionary keyed by amp names containing sqrt. of covariances weights ('b' = 0 in Eq. 20 of
465  Astier+19).
466 
467  expIdMask : `dict`, [`str`, `list`]
468  Dictionary keyed by amp names containing the masked exposure pairs.
469 
470  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
471  PDF file where the plots will be saved.
472 
473  offset : `float`, optional
474  Constant offset factor to plot covariances in same panel (so they don't overlap).
475 
476  numberOfBins : `int`, optional
477  Number of bins for top and bottom plot.
478 
479  plotData : `bool`, optional
480  Plot the data points?
481 
482  topPlot : `bool`, optional
483  Plot the top plot with the covariances, and the bottom plot with the model residuals?
484 
485  log : `lsst.log.Log`, optional
486  Logger to handle messages.
487  """
488  if not topPlot:
489  fig = plt.figure(figsize=(8, 10))
490  gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
491  gs.update(hspace=0)
492  ax0 = plt.subplot(gs[0])
493  plt.setp(ax0.get_xticklabels(), visible=False)
494  else:
495  fig = plt.figure(figsize=(8, 8))
496  ax0 = plt.subplot(111)
497  ax0.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
498  ax0.tick_params(axis='both', labelsize='x-large')
499  mue, rese, wce = [], [], []
500  mueNoB, reseNoB, wceNoB = [], [], []
501  for counter, amp in enumerate(covs):
502  muAmp, fullCov, fullCovModel, fullCovWeight = (inputMu[amp], covs[amp], covsModel[amp],
503  covsWeights[amp])
504  if len(fullCov) == 0:
505  continue
506  mu, cov, model, weightCov, _ = getFitDataFromCovariances(i, j, muAmp, fullCov, fullCovModel,
507  fullCovWeight, divideByMu=True,
508  returnMasked=True)
509 
510  mue += list(mu)
511  rese += list(cov - model)
512  wce += list(weightCov)
513 
514  fullCovNoB, fullCovModelNoB, fullCovWeightNoB = (covsNoB[amp], covsModelNoB[amp],
515  covsWeightsNoB[amp])
516  if len(fullCovNoB) == 0:
517  continue
518  (muNoB, covNoB, modelNoB,
519  weightCovNoB, _) = getFitDataFromCovariances(i, j, muAmp, fullCovNoB, fullCovModelNoB,
520  fullCovWeightNoB, divideByMu=True,
521  returnMasked=True)
522 
523  mueNoB += list(muNoB)
524  reseNoB += list(covNoB - modelNoB)
525  wceNoB += list(weightCovNoB)
526 
527  # the corresponding fit
528  fit_curve, = plt.plot(mu, model + counter*offset, '-', linewidth=4.0)
529  # bin plot. len(mu) = no binning
530  gind = self.indexForBinsindexForBins(mu, numberOfBins)
531 
532  xb, yb, wyb, sigyb = self.binDatabinData(mu, cov, gind, weightCov)
533  plt.errorbar(xb, yb+counter*offset, yerr=sigyb, marker='o', linestyle='none', markersize=6.5,
534  color=fit_curve.get_color(), label=f"{amp} (N: {len(mu)})")
535  # plot the data
536  if plotData:
537  points, = plt.plot(mu, cov + counter*offset, '.', color=fit_curve.get_color())
538  plt.legend(loc='upper right', fontsize=8)
539  # end loop on amps
540  mue = np.array(mue)
541  rese = np.array(rese)
542  wce = np.array(wce)
543  mueNoB = np.array(mueNoB)
544  reseNoB = np.array(reseNoB)
545  wceNoB = np.array(wceNoB)
546 
547  plt.xlabel(r"$\mu (el)$", fontsize='x-large')
548  plt.ylabel(r"$Cov{%d%d}/\mu + Cst (el)$"%(i, j), fontsize='x-large')
549  if (not topPlot):
550  gind = self.indexForBinsindexForBins(mue, numberOfBins)
551  xb, yb, wyb, sigyb = self.binDatabinData(mue, rese, gind, wce)
552 
553  ax1 = plt.subplot(gs[1], sharex=ax0)
554  ax1.errorbar(xb, yb, yerr=sigyb, marker='o', linestyle='none', label='Full fit')
555  gindNoB = self.indexForBinsindexForBins(mueNoB, numberOfBins)
556  xb2, yb2, wyb2, sigyb2 = self.binDatabinData(mueNoB, reseNoB, gindNoB, wceNoB)
557 
558  ax1.errorbar(xb2, yb2, yerr=sigyb2, marker='o', linestyle='none', label='b = 0')
559  ax1.tick_params(axis='both', labelsize='x-large')
560  plt.legend(loc='upper left', fontsize='large')
561  # horizontal line at zero
562  plt.plot(xb, [0]*len(xb), '--', color='k')
563  plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
564  plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
565  plt.xlabel(r'$\mu (el)$', fontsize='x-large')
566  plt.ylabel(r'$Cov{%d%d}/\mu$ -model (el)'%(i, j), fontsize='x-large')
567  plt.tight_layout()
568  plt.suptitle(f"Nbins: {numberOfBins}")
569  # overlapping y labels:
570  fig.canvas.draw()
571  labels0 = [item.get_text() for item in ax0.get_yticklabels()]
572  labels0[0] = u''
573  ax0.set_yticklabels(labels0)
574  pdfPages.savefig(fig)
575 
576  return
577 
578  @staticmethod
579  def plot_a_b(aDict, bDict, pdfPages, bRange=3):
580  """Fig. 12 of Astier+19
581 
582  Color display of a and b arrays fits, averaged over channels.
583 
584  Parameters
585  ----------
586  aDict : `dict`, [`numpy.array`]
587  Dictionary keyed by amp names containing the fitted 'a' coefficients from the model
588  in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`).
589 
590  bDict : `dict`, [`numpy.array`]
591  Dictionary keyed by amp names containing the fitted 'b' coefficients from the model
592  in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`).
593 
594  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
595  PDF file where the plots will be saved.
596 
597  bRange : `int`
598  Maximum lag for b arrays.
599  """
600  a, b = [], []
601  for amp in aDict:
602  if np.isnan(aDict[amp]).all():
603  continue
604  a.append(aDict[amp])
605  b.append(bDict[amp])
606  a = np.array(a).mean(axis=0)
607  b = np.array(b).mean(axis=0)
608  fig = plt.figure(figsize=(7, 11))
609  ax0 = fig.add_subplot(2, 1, 1)
610  im0 = ax0.imshow(np.abs(a.transpose()), origin='lower', norm=mpl.colors.LogNorm())
611  ax0.tick_params(axis='both', labelsize='x-large')
612  ax0.set_title(r'$|a|$', fontsize='x-large')
613  ax0.xaxis.set_ticks_position('bottom')
614  cb0 = plt.colorbar(im0)
615  cb0.ax.tick_params(labelsize='x-large')
616 
617  ax1 = fig.add_subplot(2, 1, 2)
618  ax1.tick_params(axis='both', labelsize='x-large')
619  ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
620  ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
621  im1 = ax1.imshow(1e6*b[:bRange, :bRange].transpose(), origin='lower')
622  cb1 = plt.colorbar(im1)
623  cb1.ax.tick_params(labelsize='x-large')
624  ax1.set_title(r'$b \times 10^6$', fontsize='x-large')
625  ax1.xaxis.set_ticks_position('bottom')
626  plt.tight_layout()
627  pdfPages.savefig(fig)
628 
629  return
630 
631  @staticmethod
632  def ab_vs_dist(aDict, bDict, pdfPages, bRange=4):
633  """Fig. 13 of Astier+19.
634 
635  Values of a and b arrays fits, averaged over amplifiers, as a function of distance.
636 
637  Parameters
638  ----------
639  aDict : `dict`, [`numpy.array`]
640  Dictionary keyed by amp names containing the fitted 'a' coefficients from the model
641  in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`).
642 
643  bDict : `dict`, [`numpy.array`]
644  Dictionary keyed by amp names containing the fitted 'b' coefficients from the model
645  in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`).
646 
647  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
648  PDF file where the plots will be saved.
649 
650  bRange : `int`
651  Maximum lag for b arrays.
652  """
653  assert (len(aDict) == len(bDict))
654  a = []
655  for amp in aDict:
656  if np.isnan(aDict[amp]).all():
657  continue
658  a.append(aDict[amp])
659  a = np.array(a)
660  y = a.mean(axis=0)
661  sy = a.std(axis=0)/np.sqrt(len(aDict))
662  i, j = np.indices(y.shape)
663  upper = (i >= j).ravel()
664  r = np.sqrt(i**2 + j**2).ravel()
665  y = y.ravel()
666  sy = sy.ravel()
667  fig = plt.figure(figsize=(6, 9))
668  ax = fig.add_subplot(211)
669  ax.set_xlim([0.5, r.max()+1])
670  ax.errorbar(r[upper], y[upper], yerr=sy[upper], marker='o', linestyle='none', color='b',
671  label='$i>=j$')
672  ax.errorbar(r[~upper], y[~upper], yerr=sy[~upper], marker='o', linestyle='none', color='r',
673  label='$i<j$')
674  ax.legend(loc='upper center', fontsize='x-large')
675  ax.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large')
676  ax.set_ylabel(r'$a_{ij}$', fontsize='x-large')
677  ax.set_yscale('log')
678  ax.tick_params(axis='both', labelsize='x-large')
679 
680  #
681  axb = fig.add_subplot(212)
682  b = []
683  for amp in bDict:
684  if np.isnan(bDict[amp]).all():
685  continue
686  b.append(bDict[amp])
687  b = np.array(b)
688  yb = b.mean(axis=0)
689  syb = b.std(axis=0)/np.sqrt(len(bDict))
690  ib, jb = np.indices(yb.shape)
691  upper = (ib > jb).ravel()
692  rb = np.sqrt(i**2 + j**2).ravel()
693  yb = yb.ravel()
694  syb = syb.ravel()
695  xmin = -0.2
696  xmax = bRange
697  axb.set_xlim([xmin, xmax+0.2])
698  cutu = (r > xmin) & (r < xmax) & (upper)
699  cutl = (r > xmin) & (r < xmax) & (~upper)
700  axb.errorbar(rb[cutu], yb[cutu], yerr=syb[cutu], marker='o', linestyle='none', color='b',
701  label='$i>=j$')
702  axb.errorbar(rb[cutl], yb[cutl], yerr=syb[cutl], marker='o', linestyle='none', color='r',
703  label='$i<j$')
704  plt.legend(loc='upper center', fontsize='x-large')
705  axb.set_xlabel(r'$\sqrt{i^2+j^2}$', fontsize='x-large')
706  axb.set_ylabel(r'$b_{ij}$', fontsize='x-large')
707  axb.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
708  axb.tick_params(axis='both', labelsize='x-large')
709  plt.tight_layout()
710  pdfPages.savefig(fig)
711 
712  return
713 
714  @staticmethod
715  def plotAcoeffsSum(aDict, bDict, pdfPages):
716  """Fig. 14. of Astier+19
717 
718  Cumulative sum of a_ij as a function of maximum separation. This plot displays the average over
719  channels.
720 
721  Parameters
722  ----------
723  aDict : `dict`, [`numpy.array`]
724  Dictionary keyed by amp names containing the fitted 'a' coefficients from the model
725  in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`).
726 
727  bDict : `dict`, [`numpy.array`]
728  Dictionary keyed by amp names containing the fitted 'b' coefficients from the model
729  in Eq. 20 of Astier+19 (if `ptcFitType` is `FULLCOVARIANCE`).
730 
731  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
732  PDF file where the plots will be saved.
733  """
734  assert (len(aDict) == len(bDict))
735  a, b = [], []
736  for amp in aDict:
737  if np.isnan(aDict[amp]).all() or np.isnan(bDict[amp]).all():
738  continue
739  a.append(aDict[amp])
740  b.append(bDict[amp])
741  a = np.array(a).mean(axis=0)
742  b = np.array(b).mean(axis=0)
743  fig = plt.figure(figsize=(7, 6))
744  w = 4*np.ones_like(a)
745  w[0, 1:] = 2
746  w[1:, 0] = 2
747  w[0, 0] = 1
748  wa = w*a
749  indices = range(1, a.shape[0]+1)
750  sums = [wa[0:n, 0:n].sum() for n in indices]
751  ax = plt.subplot(111)
752  ax.plot(indices, sums/sums[0], 'o', color='b')
753  ax.set_yscale('log')
754  ax.set_xlim(indices[0]-0.5, indices[-1]+0.5)
755  ax.set_ylim(None, 1.2)
756  ax.set_ylabel(r'$[\sum_{|i|<n\ &\ |j|<n} a_{ij}] / |a_{00}|$', fontsize='x-large')
757  ax.set_xlabel('n', fontsize='x-large')
758  ax.tick_params(axis='both', labelsize='x-large')
759  plt.tight_layout()
760  pdfPages.savefig(fig)
761 
762  return
763 
764  @staticmethod
765  def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons,
766  gainDict, pdfPages, maxr=None):
767  """Fig. 15 in Astier+19.
768 
769  Illustrates systematic bias from estimating 'a'
770  coefficients from the slope of correlations as opposed to the
771  full model in Astier+19.
772 
773  Parameters
774  ----------
775  aDict: `dict`
776  Dictionary of 'a' matrices (Eq. 20, Astier+19), with amp names as keys.
777 
778  aDictNoB: `dict`
779  Dictionary of 'a' matrices ('b'= 0 in Eq. 20, Astier+19), with amp names as keys.
780 
781  fullCovsModel : `dict`, [`str`, `list`]
782  Dictionary keyed by amp names containing covariances model per mean flux.
783 
784  fullCovsModelNoB : `dict`, [`str`, `list`]
785  Dictionary keyed by amp names containing covariances model (with 'b'=0 in Eq. 20 of
786  Astier+19) per mean flux.
787 
788  signalElectrons : `float`
789  Signal at which to evaluate the a_ij coefficients.
790 
791  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
792  PDF file where the plots will be saved.
793 
794  gainDict : `dict`, [`str`, `float`]
795  Dicgionary keyed by amp names with the gains in e-/ADU.
796 
797  maxr : `int`, optional
798  Maximum lag.
799  """
800 
801  fig = plt.figure(figsize=(7, 11))
802  title = [f"'a' relative bias at {signalElectrons} e", "'a' relative bias (b=0)"]
803  data = [(aDict, fullCovsModel), (aDictNoB, fullCovsModelNoB)]
804 
805  for k, pair in enumerate(data):
806  diffs = []
807  amean = []
808  for amp in pair[0]:
809  covModel = np.array(pair[1][amp])
810  if np.isnan(covModel).all():
811  continue
812  # Compute the "a" coefficients of the Antilogus+14 (1402.0725) model as in
813  # Guyonnet+15 (1501.01577, eq. 16, the slope of cov/var at a given flux mu in
814  # electrons). Eq. 16 of 1501.01577 is an approximation to the more complete
815  # model in Astier+19 (1905.08677).
816  var = covModel[0, 0, 0] # ADU^2
817  # For a result in electrons^-1, we have to use mu in electrons
818  aOld = covModel[0, :, :]/(var*signalElectrons)
819  a = pair[0][amp]
820  amean.append(a)
821  diffs.append((aOld-a))
822  amean = np.array(amean).mean(axis=0)
823  diff = np.array(diffs).mean(axis=0)
824  diff = diff/amean
825  diff = diff[:]
826  # The difference should be close to zero
827  diff[0, 0] = 0
828  if maxr is None:
829  maxr = diff.shape[0]
830  diff = diff[:maxr, :maxr]
831  ax0 = fig.add_subplot(2, 1, k+1)
832  im0 = ax0.imshow(diff.transpose(), origin='lower')
833  ax0.yaxis.set_major_locator(MaxNLocator(integer=True))
834  ax0.xaxis.set_major_locator(MaxNLocator(integer=True))
835  ax0.tick_params(axis='both', labelsize='x-large')
836  plt.colorbar(im0)
837  ax0.set_title(title[k])
838 
839  plt.tight_layout()
840  pdfPages.savefig(fig)
841 
842  return
843 
844  def _plotStandardPtc(self, dataset, ptcFitType, pdfPages):
845  """Plot PTC, var/signal vs signal, linearity, and linearity residual per amplifier.
846 
847  Parameters
848  ----------
849  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
850  The dataset containing the means, variances, exposure times, and mask.
851 
852  ptcFitType : `str`
853  Type of the model fit to the PTC. Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.
854 
855  pdfPages: `matplotlib.backends.backend_pdf.PdfPages`
856  PDF file where the plots will be saved.
857  """
858 
859  if ptcFitType == 'EXPAPPROXIMATION':
860  ptcFunc = funcAstier
861  stringTitle = (r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ ")
862  elif ptcFitType == 'POLYNOMIAL':
863  ptcFunc = funcPolynomial
864  for key in dataset.ptcFitPars:
865  deg = len(dataset.ptcFitPars[key]) - 1
866  break
867  stringTitle = r"Polynomial (degree: %g)" % (deg)
868  else:
869  raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
870  "Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
871 
872  legendFontSize = 6.5
873  labelFontSize = 8
874  titleFontSize = 9
875  supTitleFontSize = 18
876  markerSize = 25
877 
878  # General determination of the size of the plot grid
879  nAmps = len(dataset.ampNames)
880  if nAmps == 2:
881  nRows, nCols = 2, 1
882  nRows = np.sqrt(nAmps)
883  mantissa, _ = np.modf(nRows)
884  if mantissa > 0:
885  nRows = int(nRows) + 1
886  nCols = nRows
887  else:
888  nRows = int(nRows)
889  nCols = nRows
890 
891  f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
892  f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
893  f3, ax3 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
894 
895  for i, (amp, a, a2, a3) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten(),
896  ax3.flatten())):
897  meanVecOriginal = np.ravel(np.array(dataset.rawMeans[amp]))
898  varVecOriginal = np.ravel(np.array(dataset.rawVars[amp]))
899  mask = np.ravel(np.array(dataset.expIdMask[amp]))
900  if np.isnan(mask[0]): # All NaNs the whole amp is bad
901  a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
902  a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
903  a3.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
904  continue
905  else:
906  mask = mask.astype(bool)
907  meanVecFinal = meanVecOriginal[mask]
908  varVecFinal = varVecOriginal[mask]
909  meanVecOutliers = meanVecOriginal[np.invert(mask)]
910  varVecOutliers = varVecOriginal[np.invert(mask)]
911  pars, parsErr = np.array(dataset.ptcFitPars[amp]), np.array(dataset.ptcFitParsError[amp])
912  ptcRedChi2 = dataset.ptcFitChiSq[amp]
913  if ptcFitType == 'EXPAPPROXIMATION':
914  if len(meanVecFinal):
915  ptcA00, ptcA00error = pars[0], parsErr[0]
916  ptcGain, ptcGainError = pars[1], parsErr[1]
917  ptcNoise = np.sqrt((pars[2])) # pars[2] is in (e-)^2
918  ptcNoiseAdu = ptcNoise*(1./ptcGain)
919  ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2]))
920  stringLegend = (f"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e"
921  f"\nGain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU"
922  f"\nNoise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
923  r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
924  f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
925 
926  if ptcFitType == 'POLYNOMIAL':
927  if len(meanVecFinal):
928  ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1])
929  ptcNoiseAdu = np.sqrt((pars[0])) # pars[0] is in ADU^2
930  ptcNoise = ptcNoiseAdu*ptcGain
931  ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain
932  stringLegend = (f"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/ADU\n"
933  f"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e\n"
934  r"$\chi^2_{\rm{red}}$: " + f"{ptcRedChi2:.4}"
935  f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
936 
937  a.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
938  a.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
939  a.tick_params(labelsize=11)
940  a.set_xscale('linear')
941  a.set_yscale('linear')
942 
943  a2.set_xlabel(r'Mean Signal ($\mu$, ADU)', fontsize=labelFontSize)
944  a2.set_ylabel(r'Variance (ADU$^2$)', fontsize=labelFontSize)
945  a2.tick_params(labelsize=11)
946  a2.set_xscale('log')
947  a2.set_yscale('log')
948 
949  a3.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
950  a3.set_ylabel(r'Variance/$\mu$ (ADU)', fontsize=labelFontSize)
951  a3.tick_params(labelsize=11)
952  a3.set_xscale('log')
953  a3.set_yscale('linear')
954 
955  minMeanVecFinal = np.nanmin(meanVecFinal)
956  maxMeanVecFinal = np.nanmax(meanVecFinal)
957  meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal))
958  minMeanVecOriginal = np.nanmin(meanVecOriginal)
959  maxMeanVecOriginal = np.nanmax(meanVecOriginal)
960  deltaXlim = maxMeanVecOriginal - minMeanVecOriginal
961  a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
962  a.plot(meanVecFinal, ptcNoiseAdu**2 + (1./ptcGain)*meanVecFinal, color='green',
963  linestyle='--')
964  a.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
965  a.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
966  a.text(0.03, 0.66, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
967  a.set_title(amp, fontsize=titleFontSize)
968  a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
969 
970  # Same, but in log-scale
971  a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color='red')
972  a2.scatter(meanVecFinal, varVecFinal, c='blue', marker='o', s=markerSize)
973  a2.scatter(meanVecOutliers, varVecOutliers, c='magenta', marker='s', s=markerSize)
974  a2.text(0.03, 0.66, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
975  a2.set_title(amp, fontsize=titleFontSize)
976  a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal])
977 
978  # Var/mu vs mu
979  a3.plot(meanVecFit, ptcFunc(pars, meanVecFit)/meanVecFit, color='red')
980  a3.scatter(meanVecFinal, varVecFinal/meanVecFinal, c='blue', marker='o', s=markerSize)
981  a3.scatter(meanVecOutliers, varVecOutliers/meanVecOutliers, c='magenta', marker='s',
982  s=markerSize)
983  a3.text(0.05, 0.1, stringLegend, transform=a3.transAxes, fontsize=legendFontSize)
984  a3.set_title(amp, fontsize=titleFontSize)
985  a3.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
986 
987  f.suptitle("PTC \n Fit: " + stringTitle, fontsize=supTitleFontSize)
988  pdfPages.savefig(f)
989  f2.suptitle("PTC (log-log)", fontsize=supTitleFontSize)
990  pdfPages.savefig(f2)
991  f3.suptitle(r"Var/$\mu$", fontsize=supTitleFontSize)
992  pdfPages.savefig(f3)
993 
994  return
995 
996  def _plotLinearizer(self, dataset, linearizer, pdfPages):
997  """Plot linearity and linearity residual per amplifier
998 
999  Parameters
1000  ----------
1001  dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
1002  The dataset containing the means, variances, exposure times, and mask.
1003 
1004  linearizer : `lsst.ip.isr.Linearizer`
1005  Linearizer object
1006  """
1007  legendFontSize = 7
1008  labelFontSize = 7
1009  titleFontSize = 9
1010  supTitleFontSize = 18
1011 
1012  # General determination of the size of the plot grid
1013  nAmps = len(dataset.ampNames)
1014  if nAmps == 2:
1015  nRows, nCols = 2, 1
1016  nRows = np.sqrt(nAmps)
1017  mantissa, _ = np.modf(nRows)
1018  if mantissa > 0:
1019  nRows = int(nRows) + 1
1020  nCols = nRows
1021  else:
1022  nRows = int(nRows)
1023  nCols = nRows
1024 
1025  # Plot mean vs time (f1), and fractional residuals (f2)
1026  f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
1027  f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex='col', sharey='row', figsize=(13, 10))
1028  for i, (amp, a, a2) in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())):
1029  mask = dataset.expIdMask[amp]
1030  if np.isnan(mask[0]):
1031  a.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1032  a2.set_title(f"{amp} (BAD)", fontsize=titleFontSize)
1033  continue
1034  else:
1035  mask = mask.astype(bool)
1036  meanVecFinal = np.array(dataset.rawMeans[amp])[mask]
1037  timeVecFinal = np.array(dataset.rawExpTimes[amp])[mask]
1038 
1039  a.set_xlabel('Time (sec)', fontsize=labelFontSize)
1040  a.set_ylabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
1041  a.tick_params(labelsize=labelFontSize)
1042  a.set_xscale('linear')
1043  a.set_yscale('linear')
1044 
1045  a2.axhline(y=0, color='k')
1046  a2.axvline(x=0, color='k', linestyle='-')
1047  a2.set_xlabel(r'Mean signal ($\mu$, ADU)', fontsize=labelFontSize)
1048  a2.set_ylabel('Fractional nonlinearity (%)', fontsize=labelFontSize)
1049  a2.tick_params(labelsize=labelFontSize)
1050  a2.set_xscale('linear')
1051  a2.set_yscale('linear')
1052 
1053  pars, parsErr = linearizer.fitParams[amp], linearizer.fitParamsErr[amp]
1054  k0, k0Error = pars[0], parsErr[0]
1055  k1, k1Error = pars[1], parsErr[1]
1056  k2, k2Error = pars[2], parsErr[2]
1057  linRedChi2 = linearizer.fitChiSq[amp]
1058  stringLegend = (f"k0: {k0:.4}+/-{k0Error:.2e} ADU\nk1: {k1:.4}+/-{k1Error:.2e} ADU/t"
1059  f"\nk2: {k2:.2e}+/-{k2Error:.2e} ADU/t^2\n"
1060  r"$\chi^2_{\rm{red}}$: " + f"{linRedChi2:.4}")
1061  a.scatter(timeVecFinal, meanVecFinal)
1062  a.plot(timeVecFinal, funcPolynomial(pars, timeVecFinal), color='red')
1063  a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1064  a.set_title(f"{amp}", fontsize=titleFontSize)
1065 
1066  linearPart = k0 + k1*timeVecFinal
1067  fracLinRes = 100*(linearPart - meanVecFinal)/linearPart
1068  a2.plot(meanVecFinal, fracLinRes, c='g')
1069  a2.set_title(f"{amp}", fontsize=titleFontSize)
1070 
1071  f.suptitle("Linearity \n Fit: Polynomial (degree: %g)"
1072  % (len(pars)-1),
1073  fontsize=supTitleFontSize)
1074  f2.suptitle(r"Fractional NL residual" "\n"
1075  r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$",
1076  fontsize=supTitleFontSize)
1077  pdfPages.savefig(f)
1078  pdfPages.savefig(f2)
1079 
1080  @staticmethod
1081  def findGroups(x, maxDiff):
1082  """Group data into bins, with at most maxDiff distance between bins.
1083 
1084  Parameters
1085  ----------
1086  x: `list`
1087  Data to bin.
1088 
1089  maxDiff: `int`
1090  Maximum distance between bins.
1091 
1092  Returns
1093  -------
1094  index: `list`
1095  Bin indices.
1096  """
1097  ix = np.argsort(x)
1098  xsort = np.sort(x)
1099  index = np.zeros_like(x, dtype=np.int32)
1100  xc = xsort[0]
1101  group = 0
1102  ng = 1
1103 
1104  for i in range(1, len(ix)):
1105  xval = xsort[i]
1106  if (xval - xc < maxDiff):
1107  xc = (ng*xc + xval)/(ng+1)
1108  ng += 1
1109  index[ix[i]] = group
1110  else:
1111  group += 1
1112  ng = 1
1113  index[ix[i]] = group
1114  xc = xval
1115 
1116  return index
1117 
1118  @staticmethod
1119  def indexForBins(x, nBins):
1120  """Builds an index with regular binning. The result can be fed into binData.
1121 
1122  Parameters
1123  ----------
1124  x: `numpy.array`
1125  Data to bin.
1126  nBins: `int`
1127  Number of bin.
1128 
1129  Returns
1130  -------
1131  np.digitize(x, bins): `numpy.array`
1132  Bin indices.
1133  """
1134 
1135  bins = np.linspace(x.min(), x.max() + abs(x.max() * 1e-7), nBins + 1)
1136  return np.digitize(x, bins)
1137 
1138  @staticmethod
1139  def binData(x, y, binIndex, wy=None):
1140  """Bin data (usually for display purposes).
1141 
1142  Patrameters
1143  -----------
1144  x: `numpy.array`
1145  Data to bin.
1146 
1147  y: `numpy.array`
1148  Data to bin.
1149 
1150  binIdex: `list`
1151  Bin number of each datum.
1152 
1153  wy: `numpy.array`
1154  Inverse rms of each datum to use when averaging (the actual weight is wy**2).
1155 
1156  Returns:
1157  -------
1158 
1159  xbin: `numpy.array`
1160  Binned data in x.
1161 
1162  ybin: `numpy.array`
1163  Binned data in y.
1164 
1165  wybin: `numpy.array`
1166  Binned weights in y, computed from wy's in each bin.
1167 
1168  sybin: `numpy.array`
1169  Uncertainty on the bin average, considering actual scatter, and ignoring weights.
1170  """
1171 
1172  if wy is None:
1173  wy = np.ones_like(x)
1174  binIndexSet = set(binIndex)
1175  w2 = wy*wy
1176  xw2 = x*(w2)
1177  xbin = np.array([xw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet])
1178 
1179  yw2 = y*w2
1180  ybin = np.array([yw2[binIndex == i].sum()/w2[binIndex == i].sum() for i in binIndexSet])
1181 
1182  wybin = np.sqrt(np.array([w2[binIndex == i].sum() for i in binIndexSet]))
1183  sybin = np.array([y[binIndex == i].std()/np.sqrt(np.array([binIndex == i]).sum())
1184  for i in binIndexSet])
1185 
1186  return xbin, ybin, wybin, sybin
def run(self, filenameFull, datasetPtc, linearizer=None, log=None)
Definition: plotPtcGen2.py:146
def plot_a_b(aDict, bDict, pdfPages, bRange=3)
Definition: plotPtcGen2.py:579
def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB, gainDict, noiseDict, aDict, bDict, pdfPages)
Definition: plotPtcGen2.py:219
def plotNormalizedCovariances(self, i, j, inputMu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, covsWeightsNoB, pdfPages, offset=0.004, numberOfBins=10, plotData=True, topPlot=False, log=None)
Definition: plotPtcGen2.py:430
def _plotStandardPtc(self, dataset, ptcFitType, pdfPages)
Definition: plotPtcGen2.py:844
def covAstierMakeAllPlots(self, dataset, pdfPages, log=None)
Definition: plotPtcGen2.py:163
def plotRelativeBiasACoeffs(aDict, aDictNoB, fullCovsModel, fullCovsModelNoB, signalElectrons, gainDict, pdfPages, maxr=None)
Definition: plotPtcGen2.py:766
def _plotLinearizer(self, dataset, linearizer, pdfPages)
Definition: plotPtcGen2.py:996
def ab_vs_dist(aDict, bDict, pdfPages, bRange=4)
Definition: plotPtcGen2.py:632
daf::base::PropertyList * list
Definition: fits.cc:913
daf::base::PropertySet * set
Definition: fits.cc:912
def getFitDataFromCovariances(i, j, mu, fullCov, fullCovModel, fullCovSqrtWeights, gain=1.0, divideByMu=False, returnMasked=False)
def calculateWeightedReducedChi2(measured, model, weightsMeasured, nData, nParsModel)
Definition: utils.py:69
def funcPolynomial(pars, x)
Definition: utils.py:558
bool all(CoordinateExpr< N > const &expr) noexcept
Return true if all elements are true.
Angle abs(Angle const &a)
Definition: Angle.h:106
STL namespace.