From 7eda5bffe6e634e4a3cfd6771a0454859c5a26b9 Mon Sep 17 00:00:00 2001 From: Dan F-M Date: Mon, 6 Jun 2022 09:16:58 -0400 Subject: [PATCH] initial --- .copier-answers.yml | 8 + .github/workflows/build.yml | 13 + .gitignore | 1 + .pre-commit-config.yaml | 14 + LICENSE | 21 ++ README.md | 5 + matplotlibrc | 7 + metadata.yml | 5 + post.ipynb | 676 ++++++++++++++++++++++++++++++++++++ requirements.in | 7 + requirements.txt | 0 runtime.txt | 1 + 12 files changed, 758 insertions(+) create mode 100644 .copier-answers.yml create mode 100644 .github/workflows/build.yml create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 matplotlibrc create mode 100644 metadata.yml create mode 100644 post.ipynb create mode 100644 requirements.in create mode 100644 requirements.txt create mode 100644 runtime.txt diff --git a/.copier-answers.yml b/.copier-answers.yml new file mode 100644 index 0000000..30b17e3 --- /dev/null +++ b/.copier-answers.yml @@ -0,0 +1,8 @@ +_commit: 9418c84 +_src_path: gh:dfm-io/template +day: 29 +month: 7 +python_version: '3.9' +slug: histogram1 +title: The Histogram (Part 1) +year: 2014 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..95799fb --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,13 @@ +name: Build post + +on: + push: + branches: + - main + pull_request: + +jobs: + build: + uses: dfm-io/template/.github/workflows/post.yml@main + with: + python-version: "3.9" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0a764a4 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +env diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..e9d317e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.2.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - repo: https://github.com/psf/black + rev: "22.3.0" + hooks: + - id: black-jupyter + - repo: https://github.com/kynan/nbstripout + rev: "0.5.0" + hooks: + - id: nbstripout diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..98b366b --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014 Dan Foreman-Mackey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..ef88263 --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# The Histogram (Part 1) + +See the published version of this post at: https://dfm.io/posts/histogram1 + +Open an executable version on Binder at: https://mybinder.org/v2/gh/dfm-io/post--histogram1/executed?labpath=post.ipynb diff --git a/matplotlibrc b/matplotlibrc new file mode 100644 index 0000000..c06e600 --- /dev/null +++ b/matplotlibrc @@ -0,0 +1,7 @@ +savefig.dpi: 100 +figure.dpi: 100 +font.size: 14 +font.family: sans-serif +font.sans-serif: Liberation Sans +font.cursive: Liberation Sans +mathtext.fontset: custom diff --git a/metadata.yml b/metadata.yml new file mode 100644 index 0000000..39b49ac --- /dev/null +++ b/metadata.yml @@ -0,0 +1,5 @@ +title: "The Histogram (Part 1)" +date: 2014-07-29 +slug: "histogram1" +source: "post--histogram1" +math: true diff --git a/post.ipynb b/post.ipynb new file mode 100644 index 0000000..296b3f2 --- /dev/null +++ b/post.ipynb @@ -0,0 +1,676 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "brazilian-lawrence", + "metadata": {}, + "source": [ + "---\n", + "Title: The Histogram (Part 1)\n", + "Date: 2014-07-29\n", + "Category: Data Analysis\n", + "Slug: histogram1\n", + "Summary: Thousands of words about histograms (part 1)\n", + "Math: true\n", + "---" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "direct-vietnamese", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_format = \"retina\"\n", + "\n", + "from matplotlib import rcParams\n", + "\n", + "rcParams[\"savefig.dpi\"] = 100\n", + "rcParams[\"font.size\"] = 20" + ] + }, + { + "cell_type": "markdown", + "id": "excessive-contractor", + "metadata": {}, + "source": [ + "When I was writing [my recent paper](http://arxiv.org/abs/1406.3020) where I infer the population of exoplanets from an incomplete catalog of noisy measurements, I spent a lot of time thinking about histograms and I came across a few cute results that I wanted to share.\n", + "Histograms are commonly used as simple \"non-parametric\" estimates of probability distributions but they get something of a bad rap (*binning is sinning*) because it's easy to use them inappropriately!\n", + "For example, in astronomy, it's not uncommon for people to make a histogram of their data then fit a model to the bin heights (think luminosity functions, mass functions, *etc.*) instead of just modeling the points directly.\n", + "This definitely isn't the right thing to do in that case and maybe this should be the topic of a future post but there are some situations where making a histogram of your data isn't such a bad idea.\n", + "Even if a histogram *is* a good model for the data, we're often still left with the question of what we should do in the face of incompleteness and finite measurement uncertainties.\n", + "In this post, I derive the maximum-likelihood \"histogram\" when your data are censored by some (known) detection efficiency function and in a later post, I'll generalize this to include a treatment of measurement uncertainties (following [Hogg, Myers & Bovy 2010](http://arXiv.org/abs/1008.4146) and [DFM, Hogg & Morton](http://arxiv.org/abs/1406.3020)).\n", + "For all the results in this post, we will fix the bin locations and widths *a priori* but it would be interesting to generalize this derivation to include adaptive widths (see, for example, [Jake VanderPlas' excellent post](http://jakevdp.github.io/blog/2012/09/12/dynamic-programming-in-python/))." + ] + }, + { + "cell_type": "markdown", + "id": "biological-maple", + "metadata": {}, + "source": [ + "## The Maximum-Likelihood Histogram" + ] + }, + { + "cell_type": "markdown", + "id": "informal-cheat", + "metadata": {}, + "source": [ + "The act of making a histogram is often thought of as a (very intuitive) *procedure*: count the number of points that fall in each bin and normalize by the bin volume to get the bin heights.\n", + "It can be difficult to generalize a procedure like this to deal with more complicated models so I prefer to think of data analysis in terms of *probabilities*.\n", + "When we do this, it's easy to derive the above procedure as the *analytic maximum-likelihood result for a specific model!*\n", + "\n", + "To set this up, we have a set of $K$ data points $\\{ w_k \\}_{k=1}^K$ where each $w_k$ is a vector in an arbitrary number of dimensions $D$.\n", + "We're going to assume that these points are independent draws from the inhomogeneous Poisson process generated by the rate density\n", + "\n", + "$$ \\Gamma_\\theta (w) = \\frac{\\mathrm{d}N}{\\mathrm{d}w} $$\n", + "\n", + "where $\\theta$ is the set of parameters describing this function.\n", + "Under this assumption, the log-likelihood of the entire dataset is the commonly used result \n", + "\n", + "$$\\ln p(\\{ w_k \\}_{k=1}^K\\,|\\,\\theta) = \\ell (\\theta) = -\\int \\Gamma_\\theta (w)\\,\\mathrm{d}w + \\sum_{k=1}^K \\ln \\Gamma_\\theta (w_k) \\quad.\\label{eq:poisson}\n", + "$$\n", + "\n", + "This derivation is left to the interested reader… I've always wanted to say that!\n", + "A good starting place is Section 3.2 of [this PhD thesis](http://arxiv.org/abs/astro-ph/0202141)." + ] + }, + { + "cell_type": "markdown", + "id": "peripheral-lawrence", + "metadata": {}, + "source": [ + "Note that the derivation up to this point isn't specific to a histogram model and you can (and should) use a physical model for $\\Gamma_\\theta$ if you have one and fit for $\\theta$ numerically.\n", + "But now, for the purposes of today's discussion, let's model $\\Gamma_\\theta$ as a piecewise constant step function\n", + "\n", + "$$\n", + "\\Gamma_\\theta (w) = \\left\\{\\begin{array}{ll}\n", + " \\theta_1 & w \\in \\Delta_1 \\\\\n", + " \\theta_2 & w \\in \\Delta_2 \\\\\n", + " \\vdots & \\\\\n", + " \\theta_N & w \\in \\Delta_N \\\\\n", + " 0 & \\mathrm{otherwise}\n", + "\\end{array}\\right.\n", + "$$\n", + "\n", + "where the $N$ parameters $\\theta_n$ are the step heights and the step volumes $\\Delta_n$ have been fixed in advance.\n", + "When we use this model for the rate density in the likelihood expression above (Equation $\\ref{eq:poisson}$), we find\n", + "\n", + "$$ \\ell(\\theta) = -\\sum_{n=1}^N \\Delta_n\\,\\theta_n + \\sum_{k=1}^K \\,\\sum_{n=1}^N \\mathbf{1}[w_k \\in \\Delta_n]\\,\\ln \\theta_n \\quad.$$\n", + "\n", + "where $\\Delta_n$ is the volume of the $n$-th bin and the function $\\mathbf{1}[\\cdot]$ is one if its argument evaluates to true and zero otherwise." + ] + }, + { + "cell_type": "markdown", + "id": "searching-relevance", + "metadata": {}, + "source": [ + "The maximum-likelihood parameters $\\theta$ are the values that maximize the log-likelihood $\\ell(\\theta)$.\n", + "To find these, we can just compute the derivative of $\\ell(\\theta)$, set it to zero, and solve.\n", + "For a specific step $m$, this works out to give\n", + "\n", + "$$\\frac{\\mathrm{d}\\ell(\\theta)}{\\mathrm{d}\\theta_m} = -\\Delta_m + \\frac{N_m}{\\theta_m} \\to \\theta_m^* = \\frac{N_m}{\\Delta_m}$$\n", + "\n", + "where $N_m = \\sum_{k=1}^K \\mathbf{1}[w_k \\in \\Delta_m]$ is the number of points that fall in the $m$-th bin.\n", + "*This is exactly what you would get if you made a histogram!*" + ] + }, + { + "cell_type": "markdown", + "id": "cutting-maker", + "metadata": {}, + "source": [ + "Now we can go one step further and estimate the uncertainty $\\sigma_{\\theta_m^*}$ on $\\theta_m^*$ using the second derivative of $\\ell (\\theta)$\n", + "\n", + "$$\\frac{\\mathrm{d}^2\\ell(\\theta)}{{\\mathrm{d}\\theta_m}^2} = -\\frac{N_m}{{\\theta_m}^2}$$\n", + "\n", + "giving\n", + "\n", + "$$\n", + "\\begin{eqnarray}\n", + "\\sigma_{\\theta_m^*}^2 &=& -\\left[\\frac{\\mathrm{d}^2\\ell(\\theta)}{{\\mathrm{d}\\theta_m}^2}\\right]_{\\theta_m = \\theta_m^*}^{-1} \\\\\n", + "&=& \\frac{N_m}{\\Delta_m^2}\n", + "\\end{eqnarray}\n", + "$$\n", + "\n", + "the familiar $\\sqrt{N}$ counting statistics." + ] + }, + { + "cell_type": "markdown", + "id": "expressed-douglas", + "metadata": {}, + "source": [ + "Okay. Now that we've done all this work to derive this trivial and familiar result, let's get on to generalizing it!" + ] + }, + { + "cell_type": "markdown", + "id": "reverse-street", + "metadata": {}, + "source": [ + "## The Censored Histogram" + ] + }, + { + "cell_type": "markdown", + "id": "macro-aspect", + "metadata": {}, + "source": [ + "The generalization that we'll work through in this post is relevant for a lot of projects in astronomy and I expect that it occurs in many other fields too.\n", + "Suppose that you have a censored dataset where the completeness or detection efficiency $Q(w)$ is a function of the parameters $w$ and you want to determine the underlying population, not the observed distribution.\n", + "This occurs when studying the population of transiting exoplanets because smaller planets on longer periods are harder to detect than larger ones on shorter periods.\n", + "For the luminosity function of stars or quasars, this occurs because the detection efficiency scales with apparent brightness.\n", + "\n", + "In what follows, we'll assume that we know (from some other means) the completeness function $Q(w)$ of our data but you could relax this and infer the parameters of $Q(w)$ simultaneously with $\\theta$.\n", + "Unfortunately the result of this joint inference will no longer be analytic and, in practice, there will probably be large degeneracies." + ] + }, + { + "cell_type": "markdown", + "id": "banned-norwegian", + "metadata": {}, + "source": [ + "**Inverse detection efficiency (IDE)**  \n", + "The standard practice in these fields is to make the *weighted* histogram of the points where each point is counted with a weight inversely proportional to the detection efficiency at that point.\n", + "In other words, the estimated step height becomes\n", + "\n", + "$$ \\theta_m^* = \\frac{1}{\\Delta_m} \\, \\sum_{k=1}^K \\frac{\\mathbf{1}[w_k \\in \\Delta_m]}{Q(w_k)} $$\n", + "\n", + "where $Q(w_k)$ is the absolute probability of detecting an object at $w_k$.\n", + "**Don't do this!**\n", + "As you'll see in a moment, while this procedure seems intuitively correct, it gives biased results and the maximum-likelihood result is actually *very simple*.\n", + "\n", + "To see qualitatively why this procedure will, in general, yield biased inferences, let's consider a slightly pathological example.\n", + "In this example, there is a single bin $(0 <= w < 1)$ where the completeness function is\n", + "\n", + "$$Q(w) = \\frac{1}{1 + \\exp((w - 0.5)/0.01)} \\quad.$$\n", + "\n", + "The following figure shows a plot of this example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "signed-thesaurus", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as pl\n", + "\n", + "np.random.seed(12345)\n", + "\n", + "# Define the completeness function.\n", + "Q = lambda w: 1.0 / (1 + np.exp((w - 0.5) / 0.01))\n", + "\n", + "# Sample some points from a constant rate function\n", + "K = np.random.poisson(50)\n", + "w = np.random.uniform(0, 1, size=K)\n", + "wobs = w[np.random.rand(K) < Q(w)]\n", + "\n", + "# Plot the observations.\n", + "pl.plot(w, 0.5 * np.ones(K), \".r\")\n", + "pl.plot(wobs, 0.5 * np.ones(len(wobs)), \".k\")\n", + "pl.text(0.25, 0.53, \"observed\", ha=\"center\")\n", + "pl.text(0.75, 0.53, \"not observed\", ha=\"center\", color=\"red\")\n", + "\n", + "# Plot the completeness function.\n", + "w0 = np.linspace(0, 1, 5000)\n", + "pl.plot(w0, Q(w0), \"k\")\n", + "\n", + "pl.xlim(-0.1, 1.1)\n", + "pl.ylim(-0.1, 1.1)\n", + "pl.ylabel(r\"$Q(w)$\")\n", + "pl.xlabel(r\"$w$\");" + ] + }, + { + "cell_type": "markdown", + "id": "indoor-america", + "metadata": {}, + "source": [ + "In this figure, the black points show the locations of the points that get detected and the red points indicate points that would have been observed with a perfectly complete sample but were missed because of $Q(w)$.\n", + "If you look at the code above, you'll see that the true step height should be 50 but, if you evaluate the IDE approximation, you'll find that it gives" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "empirical-hello", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "th = sum(1.0 / Q(wobs))\n", + "sigma_th = th / np.sqrt(len(wobs))\n", + "print(\"{0:.1f} ± {1:.1f}\".format(th, sigma_th))" + ] + }, + { + "cell_type": "markdown", + "id": "local-insert", + "metadata": {}, + "source": [ + "This result is about *half* of what it should be and if you look at the figure above, I think you'll see what went wrong: only about half of the points were observed and each of those points was in a region where $Q(w) \\approx 1$, meaning that each point gets a weight of 1.\n", + "It's clear that in this case each point should get a weight of 2 (or one-over-a-half) but let's see if we can derive this intuition more formally." + ] + }, + { + "cell_type": "markdown", + "id": "supreme-lexington", + "metadata": {}, + "source": [ + "**The maximum-likelihood (ML) solution**  \n", + "The first thing to note is that when we have an incomplete catalog, the likelihood of the data given the population is exactly the same as our previous expression (Equation $\\ref{eq:poisson}$) but the rate $\\Gamma_\\theta (w)$ is replaced by the *observable rate* $\\hat{\\Gamma}_\\theta(w) = Q(w)\\,\\Gamma_\\theta (w)$.\n", + "Therefore, for a piecewise constant model of $\\Gamma_\\theta(w)$, the log-likelihood becomes\n", + "\n", + "$$ \\ell(\\theta) = -\\sum_{n=1}^N \\theta_n \\int_{\\Delta_n} Q(w)\\,\\mathrm{d}w + \\sum_{k=1}^K \\left[\\ln Q(w_k) + \\sum_{n=1}^N \\mathbf{1}[w_k \\in \\Delta_n]\\,\\ln \\theta_n \\right] \\quad.$$" + ] + }, + { + "cell_type": "markdown", + "id": "foster-feedback", + "metadata": {}, + "source": [ + "As before, let's take the derivative of this with respect to a specific step height $\\theta_m$ to find\n", + "\n", + "$$\\frac{\\mathrm{d}\\ell(\\theta)}{\\mathrm{d}\\theta_m} = -\\int_{\\Delta_m} Q(w)\\,\\mathrm{d}w + \\frac{N_m}{\\theta_m} \\to \\theta_m^* = \\frac{N_m}{\\int_{\\Delta_m}Q(w)\\,\\mathrm{d}w}$$\n", + "\n", + "with uncertainty\n", + "\n", + "$$\\sigma_{\\theta_m^*} = \\frac{\\sqrt{N_m}}{\\int_{\\Delta_m}Q(w)\\,\\mathrm{d}w} \\quad.$$" + ] + }, + { + "cell_type": "markdown", + "id": "consistent-ecology", + "metadata": {}, + "source": [ + "This result shows that the maximum-likelihood result is also a weighted histogram but instead of weighting the points by their *per-object* completeness, the weight of each point should be the *mean completeness in that bin*.\n", + "In the limit of a constant completeness function, the IDE procedure will give the correct result but whenever the detection efficiency varies across the bin (or if there are latent dimensions; I'll come back to this in the next example), **the IDE result is just plain wrong!**\n", + "To make matters worse, as you collect more data, the IDE result gets more precise but it's still just as wrong." + ] + }, + { + "cell_type": "markdown", + "id": "advisory-consideration", + "metadata": {}, + "source": [ + "Luckily, the ML solution will probably be easy to compute, even if you need to compute the weighting integrals numerically.\n", + "For our extreme example from above, the integral becomes $\\sim 0.5$ and the maximum-likelihood estimate of the step height is" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bulgarian-blood", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "# Evaluate the completeness integral numerically.\n", + "weight = np.mean(Q(np.random.rand(50000)))\n", + "\n", + "# Compute the maximum-likelihood step height.\n", + "th = len(wobs) / weight\n", + "sigma_th = th / np.sqrt(len(wobs))\n", + "print(\"{0:.1f} ± {1:.1f}\".format(th, sigma_th))" + ] + }, + { + "cell_type": "markdown", + "id": "civil-metabolism", + "metadata": {}, + "source": [ + "which is consistent with the correct result: 50." + ] + }, + { + "cell_type": "markdown", + "id": "senior-asbestos", + "metadata": {}, + "source": [ + "## A More Realistic Example" + ] + }, + { + "cell_type": "markdown", + "id": "likely-bidding", + "metadata": {}, + "source": [ + "Now that we've derived a formalism and developed some intuition for working with censored datasets, let's work through a more realistic example to see whether or not this machinery matters.\n", + "In this example, we'll add another level of complexity that is especially relevant in the study of exoplanets: latent dimensions.\n", + "\n", + "For transiting exoplanets, the detection efficiency scales approximately with the signal strength times the geometric transit probability\n", + "\n", + "$$Q(w) \\propto \\frac{R_\\star}{P^{2/3}\\,{M_\\star}^{1/3}}\\,\\left[\\frac{R}{R_\\star}\\right]^2\\,\\frac{1}{\\sqrt{P}}$$\n", + "\n", + "where $R_\\star$ is the star's radius, $M_\\star$ is the stellar mass, $P$ is the planet's orbital period, and $R$ is the planet's radius.\n", + "Then, if you want to measure the radius distribution of exoplanets, the standard practice is to run the one-dimensional IDE on the observed data computing the detection efficiency independently for each data point and ignoring the fact that the distribution is actually four-dimensional.\n", + "We won't work through exactly this example but instead, we'll use a simpler two-dimensional toy problem." + ] + }, + { + "cell_type": "markdown", + "id": "intense-portrait", + "metadata": {}, + "source": [ + "**The setup**  \n", + "For this example, we'll consider a two-dimensional problem with parameters $a \\in [1,\\,2]$ and $b \\in [1,\\,10]$ where the detection efficiency is given by\n", + "\n", + "$$Q(a,\\,b) = 0.8 \\, \\left[ 1+ \\exp \\left(10\\,\\frac{a^2}{\\sqrt{b}} - 10\\right) \\right]^{-1} \\quad.$$\n", + "\n", + "Here's a plot of this function:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ranking-manner", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "# Define the detection efficiency function.\n", + "def detection_efficiency(a, b):\n", + " return 0.8 / (1 + np.exp((a**2 / np.sqrt(b) - 1) / 0.1))\n", + "\n", + "\n", + "# Plot this function on a grid.\n", + "amn, amx, bmn, bmx = 1, 2, 1, 10\n", + "A, B = np.meshgrid(\n", + " np.linspace(amn, amx, 101), np.linspace(bmn, bmx, 103), indexing=\"ij\"\n", + ")\n", + "pl.pcolor(A, B, detection_efficiency(A, B), cmap=\"gray\", vmin=0, vmax=1)\n", + "pl.xlim(amn, amx)\n", + "pl.ylim(bmn, bmx)\n", + "pl.xlabel(r\"$a$\")\n", + "pl.ylabel(r\"$b$\")\n", + "pl.colorbar(label=r\"$Q(a,\\,b)$\");" + ] + }, + { + "cell_type": "markdown", + "id": "orange-scott", + "metadata": {}, + "source": [ + "**A synthetic dataset**  \n", + "Now let's generate a sample dataset and censor it using this completeness function.\n", + "For simplicity, we'll use a *uniform* population where a complete sample would be uniformly distributed in $a$ and $b$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "unlimited-blake", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "# Generate the true (uncensored) samples.\n", + "np.random.seed(1234)\n", + "true_rate = 500\n", + "K = np.random.poisson(true_rate * (amx - amn) * (bmx - bmn))\n", + "samples = np.vstack(\n", + " [\n", + " np.random.uniform(amn, amx, size=K),\n", + " np.random.uniform(bmn, bmx, size=K),\n", + " ]\n", + ").T\n", + "\n", + "# Censor this dataset using the completeness function.\n", + "q = detection_efficiency(samples[:, 0], samples[:, 1])\n", + "m = np.random.rand(K) < q\n", + "dataset = samples[m]\n", + "q = q[m]\n", + "\n", + "# Overplot the dataset on the completeness function.\n", + "pl.pcolor(A, B, detection_efficiency(A, B), cmap=\"gray\", vmin=0, vmax=1, alpha=0.5)\n", + "pl.plot(samples[:, 0], samples[:, 1], \".r\", ms=4)\n", + "pl.plot(dataset[:, 0], dataset[:, 1], \".b\", ms=4)\n", + "pl.xlim(amn, amx)\n", + "pl.ylim(bmn, bmx)\n", + "pl.xlabel(r\"$a$\")\n", + "pl.ylabel(r\"$b$\")\n", + "pl.colorbar(label=r\"$Q(a,\\,b)$\");" + ] + }, + { + "cell_type": "markdown", + "id": "elegant-warrior", + "metadata": {}, + "source": [ + "In this figure, the blue points are objects that passed the completeness cut and made it into the catalog and the red points were missed even though they would have been detected in a complete catalog.\n", + "\n", + "**Using inverse detection efficiency**  \n", + "Now, let's say that we're only interested in the $b$ distribution and even though we know that it should be a flat line, we'll model it as a histogram with a few bins.\n", + "To start, let's look at the IDE result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "located-remove", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "# Define the bins.\n", + "bin_edges = np.linspace(bmn, bmx, 11)\n", + "\n", + "# Compute the inverse detection efficiency model.\n", + "n, _ = np.histogram(dataset[:, 1], bin_edges)\n", + "n_ide, _ = np.histogram(dataset[:, 1], bin_edges, weights=1.0 / q)\n", + "\n", + "# This function makes a histogram plot given of a set of step heights.\n", + "def plot_hist(bin_edges, bin_heights, reluncert=None, offset=0, **kwargs):\n", + " x = np.array(list(zip(bin_edges[:-1], bin_edges[1:]))).flatten()\n", + " bh = bin_heights / np.diff(bin_edges)\n", + " y = np.array(list(zip(bh, bh))).flatten()\n", + " pl.plot(x, y, **kwargs)\n", + " if reluncert is not None:\n", + " pl.errorbar(\n", + " 0.5 * (bin_edges[:-1] + bin_edges[1:]) + offset,\n", + " bh,\n", + " yerr=bh * reluncert,\n", + " fmt=\"+\",\n", + " capsize=0,\n", + " color=kwargs.get(\"color\", \"k\"),\n", + " )\n", + "\n", + "\n", + "pl.gca().axhline(true_rate, lw=3, color=\"k\", alpha=0.1)\n", + "plot_hist(bin_edges, n_ide, reluncert=1 / np.sqrt(n), color=\"k\")\n", + "pl.xlabel(r\"$b$\")\n", + "pl.ylabel(r\"$\\Gamma(b)$\")\n", + "pl.ylim(0, 800);" + ] + }, + { + "cell_type": "markdown", + "id": "pregnant-monday", + "metadata": {}, + "source": [ + "In this figure, the *correct* result (the rate that was used to generate the dataset) is shown as the thick gray line and the IDE estimate of this function is shown as the step function with error bars.\n", + "As expected, the estimate is noisy and the error bars seem to be overly confident (especially in the regions of lower detection efficiency)." + ] + }, + { + "cell_type": "markdown", + "id": "horizontal-scenario", + "metadata": {}, + "source": [ + "**The maximum-likelihood**  \n", + "For comparison, let's use the same dataset and apply the maximum-likelihood method that we derived above.\n", + "First, we need to integrate (numerically) the completeness function over the bins where the $n$-th bin is the two-dimensional area defined by\n", + "\n", + "$$b_n \\le b < b_{n+1} \\quad \\mathrm{and} \\quad 1 \\le a < 2 \\quad.$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "suburban-rental", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "# Loop over the bins and numerically integrate Q(a, b) over a.\n", + "N = 50000\n", + "bq = np.empty(len(bin_edges) - 1)\n", + "a0 = np.random.uniform(amn, amx, N)\n", + "for i in range(len(bq)):\n", + " b0 = np.random.uniform(bin_edges[i], bin_edges[i + 1], N)\n", + " bq[i] = np.mean(detection_efficiency(a0, b0))\n", + "\n", + "# Plot the integrated completeness function.\n", + "plot_hist(bin_edges, bq, color=\"k\")\n", + "pl.xlabel(r\"$b$\")\n", + "pl.ylabel(r\"$Q_\\mathrm{int}(b)$\")\n", + "pl.ylim(0, 1);" + ] + }, + { + "cell_type": "markdown", + "id": "union-heath", + "metadata": {}, + "source": [ + "Using this integrated detection efficiency function, we can easily compute the ML step function given the prescription derived above and compare it to the IDE result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "pointed-locator", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "pl.gca().axhline(true_rate, lw=3, color=\"k\", alpha=0.1)\n", + "plot_hist(\n", + " bin_edges, n_ide, reluncert=1 / np.sqrt(n), offset=-0.05, ls=\"dotted\", color=\"k\"\n", + ")\n", + "plot_hist(bin_edges, n / bq, reluncert=1 / np.sqrt(n), offset=0.05, color=\"k\")\n", + "pl.xlabel(r\"$b$\")\n", + "pl.ylabel(r\"$\\Gamma(b)$\")\n", + "pl.ylim(0, 800);" + ] + }, + { + "cell_type": "markdown", + "id": "tropical-particle", + "metadata": {}, + "source": [ + "In this figure, the IDE result is shown as a dotted line and the ML result is given by the solid black line with error bars.\n", + "As before, the correct result is shown as the thick gray line.\n", + "It's clear from this comparison that the ML result is *much* better than the IDE computation—the results are consistent with the truth!" + ] + }, + { + "cell_type": "markdown", + "id": "cutting-silicon", + "metadata": {}, + "source": [ + "## Summary & Conclusions" + ] + }, + { + "cell_type": "markdown", + "id": "billion-miami", + "metadata": {}, + "source": [ + "In this post, we've derived a simple, maximum-likelihood method for making histograms of incomplete datasets.\n", + "This method should *always* give less biased results—at minimal extra computational cost—than the standard techniques used in astronomy and in some cases the results will be *drastically different*.\n", + "How different the results are from the two methods is going to be very problem dependent and it's possible that the existing results in the literature aren't very wrong because of this effect.\n", + "That being said, the \"correct\" method isn't really any more complicated than what we're doing already so I hope that someone will read this and do The Right Thing™ next time!\n", + "\n", + "Another huge benefit of casting the histogram procedure as a problem in probabilistic data analysis is that it forces us to think explicitly about our assumptions.\n", + "The result derived here is going to be the best possible result under the following assumptions:\n", + "\n", + "1. The data are independent samples from an inhomogeneous Poisson process where the rate function is a piecewise constant step function,\n", + "2. the measurement uncertainties on the data are negligible, and\n", + "3. the detection efficiency function $Q(w)$ is known but it doesn't need to be constant.\n", + "\n", + "In contrast, the IDE procedure is *only* correct if the detection efficiency is *constant* across each bin!\n", + "These assumptions are slightly less restrictive than the standard histogram procedure but assumption (2) is still a fairly glaring simplification and it is rarely satisfied in real astronomical datasets.\n", + "Therefore, in my next post, I'll relax the assumption of negligible uncertainties and demonstrate where and how this becomes a problem.\n", + "\n", + "Let me know if you have any comments by adding it below, [tweeting at me](https://twitter.com/exoplaneteer), and/or [opening a GitHub issue](https://github.com/dfm/dfm.io/issues)." + ] + }, + { + "cell_type": "markdown", + "id": "noticed-hardware", + "metadata": {}, + "source": [ + "**Acknowledgments**  \n", + "*I'd like to thank [Ruth Angus](http://ruthang.us) and [Morgan Fouesneau](https://twitter.com/mfouesneau) for useful comments.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "interpreted-nelson", + "metadata": { + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/requirements.in b/requirements.in new file mode 100644 index 0000000..82081ab --- /dev/null +++ b/requirements.in @@ -0,0 +1,7 @@ +notebook +jupyter +ipykernel +nbconvert +numpy +scipy +matplotlib diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/runtime.txt b/runtime.txt new file mode 100644 index 0000000..8fdd907 --- /dev/null +++ b/runtime.txt @@ -0,0 +1 @@ +python-3.9