From eeaf6e86d505acc74f0c5a6dab7269f5333b824c Mon Sep 17 00:00:00 2001 From: Sebastien Courroux Date: Wed, 13 Dec 2023 10:35:09 +0100 Subject: [PATCH] Add support for Panchro camera serie --- .gitignore | 2 - Alignment v2.ipynb | 853 + Alignment-10Band.ipynb | 25 +- Alignment-RigRelatives.ipynb | 32 +- Alignment.ipynb | 39 +- Batch Processing v2.ipynb | 355 + Batch Processing-10Band.ipynb | 699 +- Batch Processing.ipynb | 61 +- Captures.ipynb | 10 +- ImageSets.ipynb | 10 +- Images.ipynb | 8 +- MicaSense Image Processing Setup.ipynb | 70 +- MicaSense Image Processing Tutorial 1.ipynb | 30 +- MicaSense Image Processing Tutorial 2.ipynb | 22 +- MicaSense Image Processing Tutorial 3.ipynb | 20 +- Panels.ipynb | 19 +- README.md | 109 +- batch_processing_script.py | 166 + data/0000SET/000/IMG_0000_1.tif | 3 - data/0000SET/000/IMG_0000_2.tif | 3 - data/0000SET/000/IMG_0000_3.tif | 3 - data/0000SET/000/IMG_0000_4.tif | 3 - data/0000SET/000/IMG_0000_5.tif | 3 - data/0000SET/000/IMG_0001_1.tif | 3 - data/0000SET/000/IMG_0001_2.tif | 3 - data/0000SET/000/IMG_0001_3.tif | 3 - data/0000SET/000/IMG_0001_4.tif | 3 - data/0000SET/000/IMG_0001_5.tif | 3 - data/0001SET/000/IMG_0002_4.tif | 3 - data/0001SET/000/IMG_0003_1.tif | 3 - data/0002SET/000/IMG_0000_1.tif | 3 - data/0002SET/000/IMG_0000_2.tif | 3 - data/0002SET/000/IMG_0000_3.tif | 3 - data/0002SET/000/IMG_0000_4.tif | 3 - data/0002SET/000/IMG_0000_5.tif | 3 - data/10BANDSET/000/IMG_0000_1.tif | 3 - data/10BANDSET/000/IMG_0000_10.tif | 3 - data/10BANDSET/000/IMG_0000_2.tif | 3 - data/10BANDSET/000/IMG_0000_3.tif | 3 - data/10BANDSET/000/IMG_0000_4.tif | 3 - data/10BANDSET/000/IMG_0000_5.tif | 3 - data/10BANDSET/000/IMG_0000_6.tif | 3 - data/10BANDSET/000/IMG_0000_7.tif | 3 - data/10BANDSET/000/IMG_0000_8.tif | 3 - data/10BANDSET/000/IMG_0000_9.tif | 3 - data/10BANDSET/000/IMG_0431_1.tif | 3 - data/10BANDSET/000/IMG_0431_10.tif | 3 - data/10BANDSET/000/IMG_0431_2.tif | 3 - data/10BANDSET/000/IMG_0431_3.tif | 3 - data/10BANDSET/000/IMG_0431_4.tif | 3 - data/10BANDSET/000/IMG_0431_5.tif | 3 - data/10BANDSET/000/IMG_0431_6.tif | 3 - data/10BANDSET/000/IMG_0431_7.tif | 3 - data/10BANDSET/000/IMG_0431_8.tif | 3 - data/10BANDSET/000/IMG_0431_9.tif | 3 - data/ALTUM-PT/IMG_0000_1.tif | 3 + data/ALTUM-PT/IMG_0000_2.tif | 3 + data/ALTUM-PT/IMG_0000_3.tif | 3 + data/ALTUM-PT/IMG_0000_4.tif | 3 + data/ALTUM-PT/IMG_0000_5.tif | 3 + data/ALTUM-PT/IMG_0000_6.tif | 3 + data/ALTUM-PT/IMG_0000_7.tif | 3 + data/ALTUM-PT/IMG_0010_1.tif | 3 + data/ALTUM-PT/IMG_0010_2.tif | 3 + data/ALTUM-PT/IMG_0010_3.tif | 3 + data/ALTUM-PT/IMG_0010_4.tif | 3 + data/ALTUM-PT/IMG_0010_5.tif | 3 + data/ALTUM-PT/IMG_0010_6.tif | 3 + data/ALTUM-PT/IMG_0010_7.tif | 3 + data/ALTUM/IMG_0000_1.tif | 3 + data/ALTUM/IMG_0000_2.tif | 3 + data/ALTUM/IMG_0000_3.tif | 3 + data/ALTUM/IMG_0000_4.tif | 3 + data/ALTUM/IMG_0000_5.tif | 3 + data/ALTUM/IMG_0000_6.tif | 3 + data/ALTUM/IMG_0021_1.tif | 3 + data/ALTUM/IMG_0021_2.tif | 3 + data/ALTUM/IMG_0021_3.tif | 3 + data/ALTUM/IMG_0021_4.tif | 3 + data/ALTUM/IMG_0021_5.tif | 3 + data/ALTUM/IMG_0021_6.tif | 3 + data/ALTUM0SET/000/IMG_0000_1.tif | 3 - data/ALTUM1SET/000/IMG_0000_1.tif | 3 - data/ALTUM1SET/000/IMG_0000_2.tif | 3 - data/ALTUM1SET/000/IMG_0000_3.tif | 3 - data/ALTUM1SET/000/IMG_0000_4.tif | 3 - data/ALTUM1SET/000/IMG_0000_5.tif | 3 - data/ALTUM1SET/000/IMG_0000_6.tif | 3 - data/ALTUM1SET/000/IMG_0008_1.tif | 3 - data/ALTUM1SET/000/IMG_0008_2.tif | 3 - data/ALTUM1SET/000/IMG_0008_3.tif | 3 - data/ALTUM1SET/000/IMG_0008_4.tif | 3 - data/ALTUM1SET/000/IMG_0008_5.tif | 3 - data/ALTUM1SET/000/IMG_0008_6.tif | 3 - data/ALTUM1SET/000/IMG_0245_1.tif | 3 - data/ALTUM1SET/000/IMG_0245_2.tif | 3 - data/ALTUM1SET/000/IMG_0245_3.tif | 3 - data/ALTUM1SET/000/IMG_0245_4.tif | 3 - data/ALTUM1SET/000/IMG_0245_5.tif | 3 - data/ALTUM1SET/000/IMG_0245_6.tif | 3 - data/REDEDGE-MX-DUAL/IMG_0001_1.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_10.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_2.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_3.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_4.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_5.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_6.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_7.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_8.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0001_9.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_1.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_10.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_2.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_3.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_4.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_5.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_6.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_7.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_8.tif | 3 + data/REDEDGE-MX-DUAL/IMG_0007_9.tif | 3 + data/REDEDGE-MX/IMG_0001_1.tif | 3 + data/REDEDGE-MX/IMG_0001_2.tif | 3 + data/REDEDGE-MX/IMG_0001_3.tif | 3 + data/REDEDGE-MX/IMG_0001_4.tif | 3 + data/REDEDGE-MX/IMG_0001_5.tif | 3 + data/REDEDGE-MX/IMG_0020_1.tif | 3 + data/REDEDGE-MX/IMG_0020_2.tif | 3 + data/REDEDGE-MX/IMG_0020_3.tif | 3 + data/REDEDGE-MX/IMG_0020_4.tif | 3 + data/REDEDGE-MX/IMG_0020_5.tif | 3 + data/REDEDGE-P/IMG_0000_1.tif | 3 + data/REDEDGE-P/IMG_0000_2.tif | 3 + data/REDEDGE-P/IMG_0000_3.tif | 3 + data/REDEDGE-P/IMG_0000_4.tif | 3 + data/REDEDGE-P/IMG_0000_5.tif | 3 + data/REDEDGE-P/IMG_0000_6.tif | 3 + data/REDEDGE-P/IMG_0011_1.tif | 3 + data/REDEDGE-P/IMG_0011_2.tif | 3 + data/REDEDGE-P/IMG_0011_3.tif | 3 + data/REDEDGE-P/IMG_0011_4.tif | 3 + data/REDEDGE-P/IMG_0011_5.tif | 3 + data/REDEDGE-P/IMG_0011_6.tif | 3 + docs/Alignment v2.html | 16037 ++++++++++++++++ docs/Alignment-10Band.html | 15702 +++++++++++++++ docs/Alignment-RigRelatives.html | 15397 +++++++++++++++ docs/Alignment.html | 15551 +++++++++++++++ docs/Batch Processing v2.html | 15707 +++++++++++++++ docs/Batch Processing.html | 15747 +++++++++++++++ docs/Captures.html | 14760 ++++++++++++++ docs/ImageSets.html | 15632 +++++++++++++++ docs/Images.html | 14746 ++++++++++++++ docs/MicaSense Image Processing Setup.html | 14901 ++++++++++++++ ...MicaSense Image Processing Tutorial 1.html | 15372 +++++++++++++++ ...MicaSense Image Processing Tutorial 2.html | 15115 +++++++++++++++ ...MicaSense Image Processing Tutorial 3.html | 14927 ++++++++++++++ docs/Panels.html | 14797 ++++++++++++++ index.ipynb | 8 +- micasense/capture.py | 438 +- micasense/dls.py | 76 +- micasense/image.py | 372 +- micasense/imageset.py | 236 +- micasense/imageutils.py | 487 +- micasense/metadata.py | 194 +- micasense/panel.py | 143 +- micasense/plotutils.py | 67 +- micasense/utils.py | 81 +- micasense_conda_env.yml | 26 +- setup.py | 8 +- tests/conftest.py | 102 +- tests/test_capture.py | 150 +- tests/test_dls.py | 134 +- tests/test_image.py | 56 +- tests/test_imageset.py | 27 +- tests/test_imageutils.py | 129 +- tests/test_metadata.py | 77 +- tests/test_panel.py | 160 +- 176 files changed, 218676 insertions(+), 1588 deletions(-) create mode 100644 Alignment v2.ipynb create mode 100644 Batch Processing v2.ipynb mode change 100755 => 100644 MicaSense Image Processing Setup.ipynb mode change 100755 => 100644 MicaSense Image Processing Tutorial 1.ipynb create mode 100644 batch_processing_script.py delete mode 100644 data/0000SET/000/IMG_0000_1.tif delete mode 100644 data/0000SET/000/IMG_0000_2.tif delete mode 100644 data/0000SET/000/IMG_0000_3.tif delete mode 100644 data/0000SET/000/IMG_0000_4.tif delete mode 100644 data/0000SET/000/IMG_0000_5.tif delete mode 100644 data/0000SET/000/IMG_0001_1.tif delete mode 100644 data/0000SET/000/IMG_0001_2.tif delete mode 100644 data/0000SET/000/IMG_0001_3.tif delete mode 100644 data/0000SET/000/IMG_0001_4.tif delete mode 100644 data/0000SET/000/IMG_0001_5.tif delete mode 100755 data/0001SET/000/IMG_0002_4.tif delete mode 100755 data/0001SET/000/IMG_0003_1.tif delete mode 100644 data/0002SET/000/IMG_0000_1.tif delete mode 100644 data/0002SET/000/IMG_0000_2.tif delete mode 100644 data/0002SET/000/IMG_0000_3.tif delete mode 100644 data/0002SET/000/IMG_0000_4.tif delete mode 100644 data/0002SET/000/IMG_0000_5.tif delete mode 100644 data/10BANDSET/000/IMG_0000_1.tif delete mode 100644 data/10BANDSET/000/IMG_0000_10.tif delete mode 100644 data/10BANDSET/000/IMG_0000_2.tif delete mode 100644 data/10BANDSET/000/IMG_0000_3.tif delete mode 100644 data/10BANDSET/000/IMG_0000_4.tif delete mode 100644 data/10BANDSET/000/IMG_0000_5.tif delete mode 100644 data/10BANDSET/000/IMG_0000_6.tif delete mode 100644 data/10BANDSET/000/IMG_0000_7.tif delete mode 100644 data/10BANDSET/000/IMG_0000_8.tif delete mode 100644 data/10BANDSET/000/IMG_0000_9.tif delete mode 100644 data/10BANDSET/000/IMG_0431_1.tif delete mode 100644 data/10BANDSET/000/IMG_0431_10.tif delete mode 100644 data/10BANDSET/000/IMG_0431_2.tif delete mode 100644 data/10BANDSET/000/IMG_0431_3.tif delete mode 100644 data/10BANDSET/000/IMG_0431_4.tif delete mode 100644 data/10BANDSET/000/IMG_0431_5.tif delete mode 100644 data/10BANDSET/000/IMG_0431_6.tif delete mode 100644 data/10BANDSET/000/IMG_0431_7.tif delete mode 100644 data/10BANDSET/000/IMG_0431_8.tif delete mode 100644 data/10BANDSET/000/IMG_0431_9.tif create mode 100644 data/ALTUM-PT/IMG_0000_1.tif create mode 100644 data/ALTUM-PT/IMG_0000_2.tif create mode 100644 data/ALTUM-PT/IMG_0000_3.tif create mode 100644 data/ALTUM-PT/IMG_0000_4.tif create mode 100644 data/ALTUM-PT/IMG_0000_5.tif create mode 100644 data/ALTUM-PT/IMG_0000_6.tif create mode 100644 data/ALTUM-PT/IMG_0000_7.tif create mode 100644 data/ALTUM-PT/IMG_0010_1.tif create mode 100644 data/ALTUM-PT/IMG_0010_2.tif create mode 100644 data/ALTUM-PT/IMG_0010_3.tif create mode 100644 data/ALTUM-PT/IMG_0010_4.tif create mode 100644 data/ALTUM-PT/IMG_0010_5.tif create mode 100644 data/ALTUM-PT/IMG_0010_6.tif create mode 100644 data/ALTUM-PT/IMG_0010_7.tif create mode 100644 data/ALTUM/IMG_0000_1.tif create mode 100644 data/ALTUM/IMG_0000_2.tif create mode 100644 data/ALTUM/IMG_0000_3.tif create mode 100644 data/ALTUM/IMG_0000_4.tif create mode 100644 data/ALTUM/IMG_0000_5.tif create mode 100644 data/ALTUM/IMG_0000_6.tif create mode 100644 data/ALTUM/IMG_0021_1.tif create mode 100644 data/ALTUM/IMG_0021_2.tif create mode 100644 data/ALTUM/IMG_0021_3.tif create mode 100644 data/ALTUM/IMG_0021_4.tif create mode 100644 data/ALTUM/IMG_0021_5.tif create mode 100644 data/ALTUM/IMG_0021_6.tif delete mode 100644 data/ALTUM0SET/000/IMG_0000_1.tif delete mode 100644 data/ALTUM1SET/000/IMG_0000_1.tif delete mode 100644 data/ALTUM1SET/000/IMG_0000_2.tif delete mode 100644 data/ALTUM1SET/000/IMG_0000_3.tif delete mode 100644 data/ALTUM1SET/000/IMG_0000_4.tif delete mode 100644 data/ALTUM1SET/000/IMG_0000_5.tif delete mode 100644 data/ALTUM1SET/000/IMG_0000_6.tif delete mode 100644 data/ALTUM1SET/000/IMG_0008_1.tif delete mode 100644 data/ALTUM1SET/000/IMG_0008_2.tif delete mode 100644 data/ALTUM1SET/000/IMG_0008_3.tif delete mode 100644 data/ALTUM1SET/000/IMG_0008_4.tif delete mode 100644 data/ALTUM1SET/000/IMG_0008_5.tif delete mode 100644 data/ALTUM1SET/000/IMG_0008_6.tif delete mode 100644 data/ALTUM1SET/000/IMG_0245_1.tif delete mode 100644 data/ALTUM1SET/000/IMG_0245_2.tif delete mode 100644 data/ALTUM1SET/000/IMG_0245_3.tif delete mode 100644 data/ALTUM1SET/000/IMG_0245_4.tif delete mode 100644 data/ALTUM1SET/000/IMG_0245_5.tif delete mode 100644 data/ALTUM1SET/000/IMG_0245_6.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_1.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_10.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_2.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_3.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_4.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_5.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_6.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_7.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_8.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0001_9.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_1.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_10.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_2.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_3.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_4.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_5.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_6.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_7.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_8.tif create mode 100644 data/REDEDGE-MX-DUAL/IMG_0007_9.tif create mode 100644 data/REDEDGE-MX/IMG_0001_1.tif create mode 100644 data/REDEDGE-MX/IMG_0001_2.tif create mode 100644 data/REDEDGE-MX/IMG_0001_3.tif create mode 100644 data/REDEDGE-MX/IMG_0001_4.tif create mode 100644 data/REDEDGE-MX/IMG_0001_5.tif create mode 100644 data/REDEDGE-MX/IMG_0020_1.tif create mode 100644 data/REDEDGE-MX/IMG_0020_2.tif create mode 100644 data/REDEDGE-MX/IMG_0020_3.tif create mode 100644 data/REDEDGE-MX/IMG_0020_4.tif create mode 100644 data/REDEDGE-MX/IMG_0020_5.tif create mode 100644 data/REDEDGE-P/IMG_0000_1.tif create mode 100644 data/REDEDGE-P/IMG_0000_2.tif create mode 100644 data/REDEDGE-P/IMG_0000_3.tif create mode 100644 data/REDEDGE-P/IMG_0000_4.tif create mode 100644 data/REDEDGE-P/IMG_0000_5.tif create mode 100644 data/REDEDGE-P/IMG_0000_6.tif create mode 100644 data/REDEDGE-P/IMG_0011_1.tif create mode 100644 data/REDEDGE-P/IMG_0011_2.tif create mode 100644 data/REDEDGE-P/IMG_0011_3.tif create mode 100644 data/REDEDGE-P/IMG_0011_4.tif create mode 100644 data/REDEDGE-P/IMG_0011_5.tif create mode 100644 data/REDEDGE-P/IMG_0011_6.tif create mode 100644 docs/Alignment v2.html create mode 100644 docs/Alignment-10Band.html create mode 100644 docs/Alignment-RigRelatives.html create mode 100644 docs/Alignment.html create mode 100644 docs/Batch Processing v2.html create mode 100644 docs/Batch Processing.html create mode 100644 docs/Captures.html create mode 100644 docs/ImageSets.html create mode 100644 docs/Images.html create mode 100644 docs/MicaSense Image Processing Setup.html create mode 100644 docs/MicaSense Image Processing Tutorial 1.html create mode 100644 docs/MicaSense Image Processing Tutorial 2.html create mode 100644 docs/MicaSense Image Processing Tutorial 3.html create mode 100644 docs/Panels.html mode change 100644 => 100755 micasense/utils.py diff --git a/.gitignore b/.gitignore index b8fb8868..22acdfca 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,6 @@ # Add any directories, files, or patterns you don't want to be tracked by version control *.pyc .ipynb_checkpoints -.vscode -/.idea __pycache__ /dist/ /*.egg-info \ No newline at end of file diff --git a/Alignment v2.ipynb b/Alignment v2.ipynb new file mode 100644 index 00000000..f82ea663 --- /dev/null +++ b/Alignment v2.ipynb @@ -0,0 +1,853 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "573422ea", + "metadata": {}, + "source": [ + "# Active Image Alignment\n", + "For most use cases, each band of a multispectral capture must be aligned with the other bands in order to create meaningful data. In this tutorial, we show how to align the band to each other using open source OpenCV utilities.\n", + "\n", + "Image alignment allows the combination of images into true-color (RGB) and false color (such as CIR) composites, useful for scouting using single images as well as for display and management uses. In addition to composite images, alignment allows the calculation of pixel-accurate indices such as NDVI or NDRE at the single image level which can be very useful for applications like plant counting and coverage estimations, where mosaicing artifacts may otherwise skew analysis results.\n", + "\n", + "The image alignment method described below tends to work well on images with abundant image features, or areas of significant contrast. Cars, buildings, parking lots, and roads tend to provide the best results. This approach may not work well on images which contain few features or very repetitive features, such as full canopy row crops or fields of repetitive small crops such lettuce or strawberries. We will disscuss more about the advantages and disadvantages of these methods below.\n", + "\n", + "The functions behind this alignment process can work with most versions of RedEdge and Altum firmware. They will work best with RedEdge (3,M,MX) versions above 3.2.0 which include the \"RigRelatives\" tags, and all RedEdge-P/Altum/Altum-PT imagery. These tags provide a starting point for the image transformation and can help to ensure convergence of the algorithm.\n", + "\n", + "# Opening Images\n", + "As we have done in previous examples, we use the micasense.capture class to open, radiometrically correct, and visualize all the bands of a MicaSense capture.\n", + "\n", + "First, we'll load the `autoreload` extension. This lets us change underlying code (such as library functions) without having to reload the entire workbook and kernel. This is useful in this workbook because the cell that runs the alignment can take a long time to run, so with the `autoreload` extension we can update the code after the alignment step for analysis and visualization without needing to re-compute the alignments each time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b09cd3b1", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7426068b", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "import os, glob\n", + "import micasense.capture as capture\n", + "%matplotlib inline\n", + "from pathlib import Path\n", + "import matplotlib.pyplot as plt\n", + "plt.rcParams[\"figure.facecolor\"] = \"w\"\n", + "\n", + "panelNames = None\n", + "\n", + "# set your image paths here. See more here: https://docs.python.org/3/library/pathlib.html\n", + "# if using Windows, you need to an an \"r\" to the path like this: Path(r\"C:\\Files\") \n", + "\n", + "# imagePath = Path(\"./data/REDEDGE-MX-DUAL\")\n", + "\n", + "# # these will return lists of image paths as strings \n", + "# imageNames = list(imagePath.glob('IMG_0007_*.tif'))\n", + "# imageNames = [x.as_posix() for x in imageNames]\n", + "\n", + "# panelNames = list(imagePath.glob('IMG_0001_*.tif'))\n", + "# panelNames = [x.as_posix() for x in panelNames]\n", + "\n", + "# imagePath = Path(\"./data/REDEDGE-MX\")\n", + "\n", + "# # these will return lists of image paths as strings \n", + "# imageNames = list(imagePath.glob('IMG_0020_*.tif'))\n", + "# imageNames = [x.as_posix() for x in imageNames]\n", + "\n", + "# panelNames = list(imagePath.glob('IMG_0001_*.tif'))\n", + "# panelNames = [x.as_posix() for x in panelNames]\n", + "\n", + "# imagePath = Path(\"./data/ALTUM\")\n", + "\n", + "# # these will return lists of image paths as strings \n", + "# imageNames = list(imagePath.glob('IMG_0021_*.tif'))\n", + "# imageNames = [x.as_posix() for x in imageNames]\n", + "\n", + "# panelNames = list(imagePath.glob('IMG_0000_*.tif'))\n", + "# panelNames = [x.as_posix() for x in panelNames]\n", + "\n", + "# imagePath = Path(\"./data/REDEDGE-P\")\n", + "\n", + "# # these will return lists of image paths as strings \n", + "# imageNames = list(imagePath.glob('IMG_0011_*.tif'))\n", + "# imageNames = [x.as_posix() for x in imageNames]\n", + "\n", + "# panelNames = list(imagePath.glob('IMG_0000_*.tif'))\n", + "# panelNames = [x.as_posix() for x in panelNames]\n", + "\n", + "imagePath = Path(\"./data/ALTUM-PT\")\n", + "\n", + "# these will return lists of image paths as strings \n", + "imageNames = list(imagePath.glob('IMG_0010_*.tif'))\n", + "imageNames = [x.as_posix() for x in imageNames]\n", + "\n", + "panelNames = list(imagePath.glob('IMG_0000_*.tif'))\n", + "panelNames = [x.as_posix() for x in panelNames]\n", + "\n", + "\n", + "if panelNames is not None:\n", + " panelCap = capture.Capture.from_filelist(panelNames)\n", + "else:\n", + " panelCap = None\n", + "\n", + "thecapture = capture.Capture.from_filelist(imageNames)\n", + "\n", + "# get camera model for future use \n", + "cam_model = thecapture.camera_model\n", + "# if this is a multicamera system like the RedEdge-MX Dual,\n", + "# we can combine the two serial numbers to help identify \n", + "# this camera system later. \n", + "if len(thecapture.camera_serials) > 1:\n", + " cam_serial = \"_\".join(thecapture.camera_serials)\n", + " print(cam_serial)\n", + "else:\n", + " cam_serial = thecapture.camera_serial\n", + " \n", + "print(\"Camera model:\",cam_model)\n", + "print(\"Bit depth:\", thecapture.bits_per_pixel)\n", + "print(\"Camera serial number:\", cam_serial)\n", + "print(\"Capture ID:\",thecapture.uuid)\n", + "\n", + "# determine if this sensor has a panchromatic band \n", + "if cam_model == 'RedEdge-P' or cam_model == 'Altum-PT':\n", + " panchroCam = True\n", + "else:\n", + " panchroCam = False\n", + " panSharpen = False \n", + "\n", + "if panelCap is not None:\n", + " if panelCap.panel_albedo() is not None:\n", + " panel_reflectance_by_band = panelCap.panel_albedo()\n", + " else:\n", + " panel_reflectance_by_band = [0.49]*len(thecapture.eo_band_names()) #RedEdge band_index order\n", + " panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) \n", + " irradiance_list = panelCap.panel_irradiance(panel_reflectance_by_band) + [0] # add to account for uncalibrated LWIR band, if applicable\n", + " img_type = \"reflectance\"\n", + " thecapture.plot_undistorted_reflectance(panel_irradiance)\n", + "else:\n", + " if thecapture.dls_present():\n", + " img_type='reflectance'\n", + " irradiance_list = thecapture.dls_irradiance() + [0]\n", + " thecapture.plot_undistorted_reflectance(thecapture.dls_irradiance())\n", + " else:\n", + " img_type = \"radiance\"\n", + " thecapture.plot_undistorted_radiance() \n", + " irradiance_list = None" + ] + }, + { + "cell_type": "markdown", + "id": "8de5a774", + "metadata": {}, + "source": [ + "# Check for existing warp matrices \n", + "If we have already successfully aligned captures from this specific camera, we can typically save some time and use the alignment warp matrices for other captures from the same camera" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35de1be0", + "metadata": {}, + "outputs": [], + "source": [ + "from skimage.transform import ProjectiveTransform\n", + "import numpy as np\n", + "\n", + "if panchroCam:\n", + " warp_matrices_filename = cam_serial + \"_warp_matrices_SIFT.npy\"\n", + "else:\n", + " warp_matrices_filename = cam_serial + \"_warp_matrices_opencv.npy\"\n", + "\n", + "if Path('./' + warp_matrices_filename).is_file():\n", + " print(\"Found existing warp matrices for camera\", cam_serial)\n", + " load_warp_matrices = np.load(warp_matrices_filename, allow_pickle=True)\n", + " loaded_warp_matrices = []\n", + " for matrix in load_warp_matrices: \n", + " if panchroCam:\n", + " transform = ProjectiveTransform(matrix=matrix.astype('float64'))\n", + " loaded_warp_matrices.append(transform)\n", + " else:\n", + " loaded_warp_matrices.append(matrix.astype('float32'))\n", + " print(\"Warp matrices successfully loaded.\")\n", + "\n", + " if panchroCam:\n", + " warp_matrices_SIFT = loaded_warp_matrices\n", + " else:\n", + " warp_matrices = loaded_warp_matrices\n", + "else:\n", + " print(\"No existing warp matrices found. Create them later in the notebook.\")\n", + " warp_matrices_SIFT = False\n", + " warp_matrices = False" + ] + }, + { + "cell_type": "markdown", + "id": "185627ff", + "metadata": {}, + "source": [ + "# Unwarp and Align (OpenCV method for RedEdge3/M/MX/Dual and original Altum)\n", + "Alignment is a three-step process:\n", + "\n", + "Images are unwarped using the built-in lens calibration\n", + "A transformation is found to align each band to a common band\n", + "The aligned images are combined and cropped, removing pixels which don't overlap in all bands.\n", + "We provide utilities to find the alignment transformations within a single capture. Our experience shows that once a good alignment transformation is found, it tends to be stable over a flight and, in most cases, over many flights. The transformation may change if the camera undergoes a shock event (such as a hard landing or drop) or if the temperature changes substantially between flights. In these events, a new transformation may need to be found.\n", + "\n", + "Further, since this approach finds a 2-dimensional (affine) transformation between images, it won't work when the parallax between bands results in a 3-dimensional depth field. This can happen if very close to the target or when targets are visible at significantly different ranges, such as a nearby tree or building against a background much farther way. In these cases, it will be necessary to use photogrammetry techniques to find a 3-dimensional mapping between images.\n", + "\n", + "For best alignment results it's good to select a capture which has features which visible in all bands. Man-made objects such as cars, roads, and buildings tend to work very well, while captures of only repeating crop rows tend to work poorly. Remember, once a good transformation has been found for flight, it can be generally be applied across all of the images.\n", + "\n", + "It's also good to use an image for alignment which is taken near the same level above ground as the rest of the flights. Above approximately 35m AGL, the alignment will be consistent. However, if images taken closer to the ground are used, such as panel images, the same alignment transformation will not work for the flight data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e154cdd1", + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "import time\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import micasense.imageutils as imageutils\n", + "import micasense.plotutils as plotutils\n", + "\n", + "# We use a different alignment method for RedEdge-P and Altum-PT, \n", + "# so if the imagery is from this kind of camera, skip this step \n", + "if not panchroCam:\n", + " st = time.time()\n", + " # set to True if you'd like to ignore existing warp matrices and create new ones\n", + " regenerate = True \n", + " pyramid_levels = 0 # for images with RigRelatives, setting this to 0 or 1 may improve alignment\n", + " max_alignment_iterations = 10\n", + "\n", + " # match_index: \n", + " # for non-panchromatic cameras we want to use band 1, which is green \n", + " # the green band has zero rig relative offsets \n", + " # NOTE: These band numbers are zero-indexed \n", + " # special parameters for RedEdge-MX dual camera system \n", + " if len(thecapture.eo_band_names()) == 10:\n", + " print(\"is 10 band\")\n", + " pyramid_levels = 3\n", + " match_index = 4\n", + " max_alignment_iterations = 20\n", + " else: \n", + " match_index = 1\n", + "\n", + " warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY\n", + "\n", + " print(\"Aligning images. Depending on settings this can take from a few seconds to many minutes\")\n", + " # Can potentially increase max_iterations for better results, but longer runtimes\n", + " if warp_matrices and not regenerate:\n", + " print(\"Using existing warp matrices...\")\n", + " try:\n", + " irradiance = panel_irradiance+[0]\n", + " except NameError:\n", + " irradiance = None\n", + " else:\n", + " warp_matrices, alignment_pairs = imageutils.align_capture(thecapture,\n", + " ref_index = match_index,\n", + " max_iterations = max_alignment_iterations,\n", + " warp_mode = warp_mode,\n", + " pyramid_levels = pyramid_levels)\n", + "\n", + " print(\"Finished Aligning\")\n", + " et = time.time()\n", + " elapsed_time = et - st\n", + " print('Alignment time:', int(elapsed_time), 'seconds')" + ] + }, + { + "cell_type": "markdown", + "id": "c06bfdf3", + "metadata": {}, + "source": [ + "# Crop Aligned Images and create aligned capture stack \n", + "After finding image alignments, we may need to remove pixels around the edges which aren't present in every image in the capture. To do this we use the affine transforms found above and the image distortions from the image metadata. OpenCV provides a couple of handy helpers for this task in the cv2.undistortPoints() and cv2.transform() methods. These methods take a set of pixel coordinates and apply our undistortion matrix and our affine transform, respectively. So, just as we did when registering the images, we first apply the undistortion process to the coordinates of the image borders, then we apply the affine transformation to that result. The resulting pixel coordinates tell us where the image borders end up after this pair of transformations, and we can then crop the resultant image to these coordinates." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5fbfb346", + "metadata": {}, + "outputs": [], + "source": [ + "if not panchroCam:\n", + " cropped_dimensions, edges = imageutils.find_crop_bounds(thecapture, warp_matrices, warp_mode=warp_mode, reference_band=match_index)\n", + " print(\"Cropped dimensions:\",cropped_dimensions)\n", + " im_aligned = thecapture.create_aligned_capture(warp_matrices=warp_matrices, motion_type=warp_mode, img_type=img_type, match_index=match_index)\n" + ] + }, + { + "cell_type": "markdown", + "id": "c362623a", + "metadata": {}, + "source": [ + "# Alignment and pan-sharpening for RedEdge-P and Altum-PT\n", + "For older cameras we use OpenCV for capture alignment. For RedEdge-P and Altum-PT, we use SIFT (scale-invariant feature transform). We will use SIFT to create the warp matrices, then use these warp matrices to align the capture. See more here: \n", + "https://scikit-image.org/docs/stable/auto_examples/features_detection/plot_sift.html\n", + "https://en.wikipedia.org/wiki/Scale-invariant_feature_transform\n", + "\n", + "For sensors with a panchromatic band (Altum-PT or RedEdge-P), we may wish to create a pan-sharpened stack. This example uses a linear interpolation method for pan-sharpening.\n", + "\n", + "The `radiometric_pan_sharpen` function takes in the SIFT warp matrices and outputs an upsampled image stack (all bands are changed to the resolution of the panchromatic sensor) and a pan-sharpened stack. \n", + "\n", + "Note: it is important to choose an initial alignment image that has a lot of straight, manmade features, such as roads or buildings. Trying to align a capture that is strictly vegetation will take a long time and may not be very good. This process can take a while, depending on how feature-rich the capture is. We have seen some Altum-PT captures align after 2 minutes, and some can take upwards of 30 minutes. Be patient! Once an alignment has been found, it can be used on other captures from the same camera, and the alignment process will be much faster. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12c2a278", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# from micasense.imageutils import brovey_pan_sharpen,radiometric_pan_sharpen\n", + "from skimage.transform import ProjectiveTransform\n", + "import time\n", + "\n", + "if panchroCam: \n", + " # set to True if you'd like to ignore existing warp matrices and create new ones\n", + " regenerate = True\n", + " st = time.time()\n", + " if not warp_matrices_SIFT or regenerate:\n", + " print(\"Generating new warp matrices...\")\n", + " warp_matrices_SIFT = thecapture.SIFT_align_capture(min_matches = 10)\n", + " \n", + " sharpened_stack, upsampled = thecapture.radiometric_pan_sharpened_aligned_capture(warp_matrices=warp_matrices_SIFT, irradiance_list=irradiance_list, img_type=img_type)\n", + " \n", + "# we can also use the Rig Relatives from the image metadata to do a quick, rudimentary alignment \n", + "# warp_matrices0=thecapture.get_warp_matrices(ref_index=5)\n", + "# sharpened_stack,upsampled = radiometric_pan_sharpen(thecapture,warp_matrices=warp_matrices0)\n", + "\n", + " print(\"Pansharpened shape:\", sharpened_stack.shape)\n", + " print(\"Upsampled shape:\", upsampled.shape)\n", + " # re-assign to im_aligned to match rest of code \n", + " im_aligned = upsampled\n", + " et = time.time()\n", + " elapsed_time = et - st\n", + " print('Alignment and pan-sharpening time:', int(elapsed_time), 'seconds')" + ] + }, + { + "cell_type": "markdown", + "id": "3e9d51b1", + "metadata": {}, + "source": [ + "# Save warp matrices\n", + "Once an alignment for your camera has been found, it can be saved to a file for later use with this notebook. It can also be used on the Batch Alignment notebook for aligning all of the captures from an entire flight." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "84855db4", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy\n", + "import skimage\n", + "from skimage.transform import warp,matrix_transform,resize,FundamentalMatrixTransform,estimate_transform,ProjectiveTransform\n", + "\n", + "if panchroCam:\n", + " working_wm = warp_matrices_SIFT\n", + "else:\n", + " working_wm = warp_matrices\n", + "if not Path('./' + warp_matrices_filename).is_file() or regenerate:\n", + " temp_matrices = []\n", + " for x in working_wm:\n", + " if isinstance(x, numpy.ndarray):\n", + " temp_matrices.append(x)\n", + " if isinstance(x, skimage.transform._geometric.ProjectiveTransform):\n", + " temp_matrices.append(x.params)\n", + " np.save(warp_matrices_filename, np.array(temp_matrices, dtype=object), allow_pickle=True)\n", + " print(\"Saved to\", Path('./' + warp_matrices_filename).resolve())\n", + "else:\n", + " print(\"Matrices already exist at\",Path('./' + warp_matrices_filename).resolve())" + ] + }, + { + "cell_type": "markdown", + "id": "47d3fb77", + "metadata": {}, + "source": [ + "# Multispectral band histogram \n", + "We can compare the radiance between multispectral bands, and between upsampled/pansharpened in the case of RedEdge-P and Altum-PT" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e036b45", + "metadata": {}, + "outputs": [], + "source": [ + "theColors = {'Blue': 'blue', 'Green': 'green', 'Red': 'red', \\\n", + " 'Red edge': 'maroon', 'NIR': 'purple', 'Panchro': 'yellow', 'PanchroB': 'orange',\\\n", + " 'Red edge-740': 'salmon', 'Red Edge': 'maroon', 'Blue-444': 'aqua', \\\n", + " 'Green-531': 'lime', 'Red-650': 'lightcoral', 'Red edge-705':'brown'}\n", + "\n", + "eo_count = len(thecapture.eo_indices())\n", + "multispec_min = np.min(np.percentile(im_aligned[:,:,1:eo_count].flatten(),0.01))\n", + "multispec_max = np.max(np.percentile(im_aligned[:,:,1:eo_count].flatten(), 99.99))\n", + "\n", + "theRange = (multispec_min,multispec_max)\n", + "\n", + "fig, axis = plt.subplots(1, 1, figsize=(10,4))\n", + "for x,y in zip(thecapture.eo_indices(),thecapture.eo_band_names()):\n", + " axis.hist(im_aligned[:,:,x].ravel(), bins=512, range=theRange, \\\n", + " histtype=\"step\", label=y, color=theColors[y], linewidth=1.5)\n", + "plt.title(\"Multispectral histogram (radiance)\")\n", + "axis.legend()\n", + "plt.show()\n", + "\n", + "if panchroCam:\n", + " eo_count = len(thecapture.eo_indices())\n", + " multispec_min = np.min(np.percentile(sharpened_stack[:,:,1:eo_count].flatten(),0.01))\n", + " multispec_max = np.max(np.percentile(sharpened_stack[:,:,1:eo_count].flatten(), 99.99))\n", + "\n", + " theRange = (multispec_min,multispec_max)\n", + "\n", + " fig, axis = plt.subplots(1, 1, figsize=(10,4))\n", + " for x,y in zip(thecapture.eo_indices(),thecapture.eo_band_names()):\n", + " axis.hist(sharpened_stack[:,:,x].ravel(), bins=512, range=theRange, \\\n", + " histtype=\"step\", label=y, color=theColors[y], linewidth=1.5)\n", + " plt.title(\"Pan-sharpened multispectral histogram (radiance)\")\n", + " axis.legend()\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "3ccb819b", + "metadata": {}, + "source": [ + "# Visualize Aligned Images\n", + "Once the transformation has been found, it can be verified by compositing the aligned images to check alignment. The image 'stack' containing all bands can also be exported to a multi-band TIFF file for viewing in external software such as QGIS. Useful composites are a naturally colored RGB as well as color infrared, or CIR." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8626c90b", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# figsize=(30,23) # use this size for full-image-resolution display\n", + "figsize=(16,13) # use this size for export-sized display\n", + "\n", + "rgb_band_indices = [thecapture.band_names_lower().index('red'),\n", + " thecapture.band_names_lower().index('green'),\n", + " thecapture.band_names_lower().index('blue')]\n", + "cir_band_indices = [thecapture.band_names_lower().index('nir'),\n", + " thecapture.band_names_lower().index('red'),\n", + " thecapture.band_names_lower().index('green')]\n", + "\n", + "# Create normalized stacks for viewing\n", + "im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],im_aligned.shape[2]), dtype=np.float32)\n", + "im_min = np.percentile(im_aligned[:,:,rgb_band_indices].flatten(), 0.5) # modify these percentiles to adjust contrast\n", + "im_max = np.percentile(im_aligned[:,:,rgb_band_indices].flatten(), 99.5) # for many images, 0.5 and 99.5 are good values\n", + "\n", + "if panchroCam:\n", + " im_display_sharp = np.zeros((sharpened_stack.shape[0],sharpened_stack.shape[1],sharpened_stack.shape[2]), dtype=np.float32 )\n", + " im_min_sharp = np.percentile(sharpened_stack[:,:,rgb_band_indices].flatten(), 0.5) # modify these percentiles to adjust contrast\n", + " im_max_sharp = np.percentile(sharpened_stack[:,:,rgb_band_indices].flatten(), 99.5) # for many images, 0.5 and 99.5 are good values\n", + "\n", + "\n", + "# for rgb true color, we use the same min and max scaling across the 3 bands to \n", + "# maintain the \"white balance\" of the calibrated image\n", + "for i in rgb_band_indices:\n", + " im_display[:,:,i] = imageutils.normalize(im_aligned[:,:,i], im_min, im_max)\n", + " if panchroCam: \n", + " im_display_sharp[:,:,i] = imageutils.normalize(sharpened_stack[:,:,i], im_min_sharp, im_max_sharp)\n", + "\n", + "rgb = im_display[:,:,rgb_band_indices]\n", + "\n", + "if panchroCam:\n", + " rgb_sharp = im_display_sharp[:,:,rgb_band_indices]\n", + "\n", + "nir_band = thecapture.band_names_lower().index('nir')\n", + "red_band = thecapture.band_names_lower().index('red')\n", + "\n", + "ndvi = (im_aligned[:,:,nir_band] - im_aligned[:,:,red_band]) / (im_aligned[:,:,nir_band] + im_aligned[:,:,red_band])\n", + "\n", + "# for cir false color imagery, we normalize the NIR,R,G bands within themselves, which provides\n", + "# the classical CIR rendering where plants are red and soil takes on a blue tint\n", + "for i in cir_band_indices:\n", + " im_display[:,:,i] = imageutils.normalize(im_aligned[:,:,i])\n", + "\n", + "cir = im_display[:,:,cir_band_indices]\n", + "if panchroCam:\n", + " fig, (ax1,ax2) = plt.subplots(1, 2, figsize=figsize)\n", + "else:\n", + " fig, ax1 = plt.subplots(1, 1, figsize=figsize)\n", + "ax1.set_title(\"Red-Green-Blue Composite\")\n", + "ax1.imshow(rgb)\n", + "if panchroCam:\n", + " ax2.set_title(\"Red-Green-Blue Composite (pan-sharpened)\")\n", + " ax2.imshow(rgb_sharp)\n", + "\n", + "fig, (ax3,ax4) = plt.subplots(1, 2, figsize=figsize)\n", + "ax3.set_title(\"NDVI\")\n", + "ax3.imshow(ndvi)\n", + "ax4.set_title(\"Color Infrared (CIR) Composite\")\n", + "ax4.imshow(cir)\n", + "\n", + "# set custom lims if you want to zoom in to image to see more detail \n", + "# this is useful for comparing upsampled and pan-sharpened stacks \n", + "# custom_xlim=(1500,2000)\n", + "# custom_ylim=(2000,1500)\n", + "# plt.setp([ax1,ax2,ax3,ax4], xlim=custom_xlim, ylim=custom_ylim)\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "17f71519", + "metadata": {}, + "source": [ + "# Image Enhancement\n", + "There are many techniques for image enhancement, but one which is commonly used to improve the visual sharpness of imagery is the unsharp mask. Here, we apply an unsharp mask to the RGB image to improve the visualization, and then apply a gamma curve to make the darkest areas brighter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1185fd16", + "metadata": {}, + "outputs": [], + "source": [ + "if panchroCam:\n", + " rgb = rgb_sharp\n", + "# Create an enhanced version of the RGB render using an unsharp mask\n", + "gaussian_rgb = cv2.GaussianBlur(rgb, (9,9), 10.0)\n", + "gaussian_rgb[gaussian_rgb<0] = 0\n", + "gaussian_rgb[gaussian_rgb>1] = 1\n", + "unsharp_rgb = cv2.addWeighted(rgb, 1.5, gaussian_rgb, -0.5, 0)\n", + "unsharp_rgb[unsharp_rgb<0] = 0\n", + "unsharp_rgb[unsharp_rgb>1] = 1\n", + "\n", + "# Apply a gamma correction to make the render appear closer to what our eyes would see\n", + "gamma = 1.4\n", + "gamma_corr_rgb = unsharp_rgb**(1.0/gamma)\n", + "fig = plt.figure(figsize=figsize)\n", + "plt.imshow(gamma_corr_rgb, aspect='equal')\n", + "plt.axis('off')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "6d1f6189", + "metadata": {}, + "source": [ + "# Stack Export\n", + "We can easily export the stacks into an image using the GDAL library (http://www.glal.org). Once exported, these image stacks can be opened in software such as QGIS and raster operations such as NDVI or NDRE computation can be done in that software. The stacks include geographic information. \n", + "\n", + "If you prefer, you may set `sort_by_wavelength` to `True` in the `save_capture_as_stack` function.\n", + "\n", + "Unless otherwise specified, this will save in your working folder, that is your `imageprocessing` directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2d716be", + "metadata": {}, + "outputs": [], + "source": [ + "#set output name to unique capture ID, e.g. FWoNSvgDNBX63Xv378qs\n", + "outputName = thecapture.uuid\n", + "\n", + "st = time.time()\n", + "if panchroCam:\n", + " # in this example, we can export both a pan-sharpened stack and an upsampled stack\n", + " # so you can compare them in GIS. In practice, you would typically only output the pansharpened stack \n", + " thecapture.save_capture_as_stack(outputName+\"-pansharpened.tif\", sort_by_wavelength=True,pansharpen=True,img_type=img_type)\n", + " thecapture.save_capture_as_stack(outputName+\"-upsampled.tif\", sort_by_wavelength=True,pansharpen=False,img_type=img_type)\n", + "else:\n", + " thecapture.save_capture_as_stack(outputName+\"-noPanels.tif\", sort_by_wavelength=True,img_type=img_type)\n", + "\n", + "et = time.time()\n", + "elapsed_time = et - st\n", + "print(\"Time to save stacks:\", int(elapsed_time), \"seconds.\")" + ] + }, + { + "cell_type": "markdown", + "id": "f4d9cd64", + "metadata": {}, + "source": [ + "# NDVI Computation\n", + "For raw index computation on single images, the `numpy` package provides a simple way to do math and simple visualization on images. Below, we compute and visualize an image histogram, and then use that to pick a color map range for visualizing the NDVI of an image.\n", + "\n", + "## Plant Classification\n", + "After computing the NDVI and prior to displaying it, we use a very rudimentary method for focusing on the plants and removing the soil and shadow information from our images and histograms. Below, we remove non-plant pixels by setting to zero any pixels in the image where the NIR reflectance is less than 20%. This helps to ensure that the NDVI and NDRE histograms aren't skewed substantially by soil noise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a572a55", + "metadata": {}, + "outputs": [], + "source": [ + "from micasense import plotutils\n", + "import matplotlib.pyplot as plt\n", + "\n", + "nir_band = thecapture.band_names_lower().index('nir')\n", + "red_band = thecapture.band_names_lower().index('red')\n", + "\n", + "thelayer = im_aligned\n", + "if panchroCam:\n", + " thelayer = sharpened_stack\n", + "np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero errors in the index calculation\n", + "\n", + "# Compute Normalized Difference Vegetation Index (NDVI) from the NIR(3) and RED (2) bands\n", + "ndvi = (thelayer[:,:,nir_band] - thelayer[:,:,red_band]) / (thelayer[:,:,nir_band] + thelayer[:,:,red_band])\n", + "print(\"Image type:\",img_type)\n", + "\n", + "# remove shadowed areas (mask pixels with NIR reflectance < 20%))\n", + "# this does not seem to work on panchro stacks \n", + "if img_type == 'reflectance':\n", + " ndvi = np.ma.masked_where(thelayer[:,:,nir_band] < 0.20, ndvi) \n", + "elif img_type == 'radiance':\n", + " lower_pct_radiance = np.percentile(thelayer[:,:,nir_band], 10.0)\n", + " ndvi = np.ma.masked_where(thelayer[:,:,nir_band] < lower_pct_radiance, ndvi) \n", + "# Compute and display a histogram\n", + "# ndvi_hist_min = np.min(ndvi) \n", + "# ndvi_hist_max = np.max(ndvi) \n", + "ndvi_hist_min = np.min(np.percentile(ndvi,0.5))\n", + "ndvi_hist_max = np.max(np.percentile(ndvi,99.5))\n", + "fig, axis = plt.subplots(1, 1, figsize=(10,4))\n", + "axis.hist(ndvi.ravel(), bins=512, range=(ndvi_hist_min, ndvi_hist_max))\n", + "plt.title(\"NDVI Histogram\")\n", + "plt.show()\n", + "\n", + "min_display_ndvi = 0.45 # further mask soil by removing low-ndvi values\n", + "#min_display_ndvi = np.percentile(ndvi.flatten(), 5.0) # modify with these percentilse to adjust contrast\n", + "max_display_ndvi = np.percentile(ndvi.flatten(), 99.5) # for many images, 0.5 and 99.5 are good values\n", + "masked_ndvi = np.ma.masked_where(ndvi < min_display_ndvi, ndvi)\n", + "\n", + "#reduce the figure size to account for colorbar\n", + "figsize=np.asarray(figsize) - np.array([3,2])\n", + "\n", + "#plot NDVI over an RGB basemap, with a colorbar showing the NDVI scale\n", + "fig, axis = plotutils.plot_overlay_withcolorbar(gamma_corr_rgb, \n", + " masked_ndvi, \n", + " figsize = (14,7), \n", + " title = 'NDVI filtered to only plants over RGB base layer',\n", + " vmin = min_display_ndvi,\n", + " vmax = max_display_ndvi)\n", + "fig.savefig(thecapture.uuid+'_ndvi_over_rgb.png')" + ] + }, + { + "cell_type": "markdown", + "id": "d8b4cf19", + "metadata": {}, + "source": [ + "# NDRE Computation\n", + "In the same manner, we can compute, filter, and display another index useful for MicaSense cameras, the Normalized Difference Red Edge (NDRE) index. We also filter out shadows and soil to ensure our display focuses only on the plant health." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b3fe7a83", + "metadata": {}, + "outputs": [], + "source": [ + "# Compute Normalized Difference Red Edge Index from the NIR(3) and RedEdge(4) bands\n", + "rededge_band = thecapture.band_names_lower().index('red edge')\n", + "ndre = (thelayer[:,:,nir_band] - thelayer[:,:,rededge_band]) / (thelayer[:,:,nir_band] + thelayer[:,:,rededge_band])\n", + "\n", + "# Mask areas with shadows and low NDVI to remove soil\n", + "masked_ndre = np.ma.masked_where(ndvi < min_display_ndvi, ndre)\n", + "\n", + "# Compute a histogram\n", + "ndre_hist_min = np.min(np.percentile(masked_ndre,0.5))\n", + "ndre_hist_max = np.max(np.percentile(masked_ndre,99.5))\n", + "fig, axis = plt.subplots(1, 1, figsize=(10,4))\n", + "axis.hist(masked_ndre.ravel(), bins=512, range=(ndre_hist_min, ndre_hist_max))\n", + "plt.title(\"NDRE Histogram (filtered to only plants)\")\n", + "plt.show()\n", + "\n", + "min_display_ndre = np.percentile(masked_ndre, 5)\n", + "max_display_ndre = np.percentile(masked_ndre, 99.5)\n", + "\n", + "fig, axis = plotutils.plot_overlay_withcolorbar(gamma_corr_rgb, \n", + " masked_ndre, \n", + " figsize=(14,7), \n", + " title='NDRE filtered to only plants over RGB base layer',\n", + " vmin=min_display_ndre,vmax=max_display_ndre)\n", + "fig.savefig(thecapture.uuid+'_ndre_over_rgb.png')" + ] + }, + { + "cell_type": "markdown", + "id": "898cc28a", + "metadata": {}, + "source": [ + "# Thermal Imagery\n", + "If our image is from an Altum or Altum-PT and includes a thermal band, we can display the re-sampled and aligned thermal data over the RGB data to maintain the context of the thermal information.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "176e2c34", + "metadata": {}, + "outputs": [], + "source": [ + "if len(thecapture.lw_indices()) > 0:\n", + " lwir_band = thecapture.band_names_lower().index('lwir')\n", + "\n", + " # by default we don't mask the thermal, since it's native resolution is much lower than the MS\n", + " if panchroCam:\n", + " masked_thermal = sharpened_stack[:,:,lwir_band]\n", + " else:\n", + " masked_thermal = im_aligned[:,:,lwir_band]\n", + " # Alternatively we can mask the thermal only to plants here, which is useful for large contiguous areas\n", + " # masked_thermal = np.ma.masked_where(ndvi < 0.45, im_aligned[:,:,5])\n", + "\n", + "\n", + " # Compute a histogram\n", + " fig, axis = plt.subplots(1, 1, figsize=(10,4))\n", + " axis.hist(masked_thermal.ravel(), bins=512, range=(np.min(np.percentile(masked_thermal,1)), np.max(np.percentile(masked_thermal,99))))\n", + " plt.title(\"Thermal Histogram\")\n", + " plt.show()\n", + "\n", + " min_display_therm = np.percentile(masked_thermal, 1)\n", + " max_display_therm = np.percentile(masked_thermal, 99)\n", + "\n", + " fig, axis = plotutils.plot_overlay_withcolorbar(gamma_corr_rgb,\n", + " masked_thermal, \n", + " figsize=(14,7), \n", + " title='Temperature over True Color',\n", + " vmin=min_display_therm,vmax=max_display_therm,\n", + " overlay_alpha=0.25,\n", + " overlay_colormap='jet',\n", + " overlay_steps=16,\n", + " display_contours=True,\n", + " contour_steps=16,\n", + " contour_alpha=.4,\n", + " contour_fmt=\"%.0fC\")\n", + " fig.savefig(thecapture.uuid+'_thermal_over_rgb.png')" + ] + }, + { + "cell_type": "markdown", + "id": "bc9b29ec", + "metadata": {}, + "source": [ + "# Red vs NIR Reflectance\n", + "Finally, we show a classic agricultural remote sensing output in the tassled cap plot. This plot can be useful for visualizing row crops and plots the Red Reflectance channel on the X-axis against the NIR reflectance channel on the Y-axis. This plot also clearly shows the line of the soil in that space. The tassled cap view isn't very useful for this arid data set; however, we can see the \"badge of trees\" of high NIR reflectance and relatively low red reflectance. This provides an example of one of the uses of aligned images for single capture analysis." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c0250872", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "x_band = red_band\n", + "y_band = nir_band\n", + "x_max = np.max(np.percentile(im_aligned[:,:,x_band],99.99))\n", + "y_max = np.max(np.percentile(im_aligned[:,:,y_band],99.99))\n", + "\n", + "fig = plt.figure(figsize=(12,12))\n", + "plt.hexbin(im_aligned[:,:,x_band],im_aligned[:,:,y_band],gridsize=640,bins='log',extent=(0,x_max,0,y_max))\n", + "ax = fig.gca()\n", + "ax.set_xlim([0,x_max])\n", + "ax.set_ylim([0,y_max])\n", + "plt.xlabel(\"{} Reflectance\".format(thecapture.band_names()[x_band]))\n", + "plt.ylabel(\"{} Reflectance\".format(thecapture.band_names()[y_band]))\n", + "plt.show()\n", + "\n", + "if panchroCam:\n", + " x_band = red_band\n", + " y_band = nir_band\n", + " x_max = np.max(np.percentile(sharpened_stack[:,:,x_band],99.99))\n", + " y_max = np.max(np.percentile(sharpened_stack[:,:,y_band],99.99))\n", + "\n", + " fig = plt.figure(figsize=(12,12))\n", + " plt.hexbin(sharpened_stack[:,:,x_band],sharpened_stack[:,:,y_band],gridsize=640,bins='log',extent=(0,x_max,0,y_max))\n", + " ax = fig.gca()\n", + " ax.set_xlim([0,x_max])\n", + " ax.set_ylim([0,y_max])\n", + " plt.xlabel(\"{} Reflectance (pan-sharpened)\".format(thecapture.band_names()[x_band]))\n", + " plt.ylabel(\"{} Reflectance (pan-sharpened)\".format(thecapture.band_names()[y_band]))\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "4ec2d106", + "metadata": {}, + "source": [ + "# Print warp_matrices for usage elsewhere, such as Batch Processing\n", + "Lastly, we output the `warp_matrices` that we got for this image stack for usage elsewhere. Currently, these can be used in the Batch Processing.ipynb notebook to save reflectance-compensated stacks of images to a directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c7cf8f48", + "metadata": {}, + "outputs": [], + "source": [ + "if panchroCam:\n", + " print(warp_matrices_SIFT)\n", + "else:\n", + " print(warp_matrices)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Alignment-10Band.ipynb b/Alignment-10Band.ipynb index 87d05a87..a9ead889 100644 --- a/Alignment-10Band.ipynb +++ b/Alignment-10Band.ipynb @@ -39,9 +39,9 @@ "panelNames = None\n", "paneCap = None\n", "\n", - "imagePath = os.path.join('.','data','10BANDSET','000')\n", - "imageNames = glob.glob(os.path.join(imagePath,'IMG_0431_*.tif'))\n", - "panelNames = glob.glob(os.path.join(imagePath,'IMG_0000_*.tif'))\n", + "imagePath = os.path.join('.','data','REDEDGE-MX-DUAL')\n", + "imageNames = glob.glob(os.path.join(imagePath,'IMG_0007_*.tif'))\n", + "panelNames = glob.glob(os.path.join(imagePath,'IMG_0001_*.tif'))\n", "\n", "# Allow this code to align both radiance and reflectance images; bu excluding\n", "# a definition for panelNames above, radiance images will be used\n", @@ -115,7 +115,7 @@ "warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY\n", "pyramid_levels = 3 # for 10-band imagery we use a 3-level pyramid. In some cases\n", "\n", - "print(\"Alinging images. Depending on settings this can take from a few seconds to many minutes\")\n", + "print(\"Aligning images. Depending on settings this can take from a few seconds to many minutes\")\n", "# Can potentially increase max_iterations for better results, but longer runtimes\n", "warp_matrices, alignment_pairs = imageutils.align_capture(capture,\n", " ref_index = match_index,\n", @@ -137,11 +137,18 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ + "\n", "cropped_dimensions, edges = imageutils.find_crop_bounds(capture, warp_matrices, warp_mode=warp_mode)\n", - "im_aligned = imageutils.aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, match_index, img_type=img_type)" + "im_aligned = imageutils.aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, match_index, img_type=img_type)\n", + "display(im_aligned)\n", + "print(\"fff\")\n", + "im_aligned = capture.create_aligned_capture(warp_matrices=warp_matrices, motion_type=warp_mode, img_type=img_type, match_index=match_index)\n", + "display(im_aligned)" ] }, { @@ -269,7 +276,7 @@ "\n", "sort_by_wavelength = True # set to false if you want stacks in camera-band-index order\n", " \n", - "outRaster = driver.Create(filename+\".tiff\", cols, rows, bands, gdal.GDT_UInt16, options = [ 'INTERLEAVE=BAND','COMPRESS=DEFLATE' ])\n", + "outRaster = driver.Create(filename+\"C.tiff\", cols, rows, bands, gdal.GDT_UInt16, options = [ 'INTERLEAVE=BAND','COMPRESS=DEFLATE' ])\n", "try:\n", " if outRaster is None:\n", " raise IOError(\"could not load gdal GeoTiff driver\")\n", @@ -481,7 +488,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -495,7 +502,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/Alignment-RigRelatives.ipynb b/Alignment-RigRelatives.ipynb index 215318b3..aa0e80a7 100644 --- a/Alignment-RigRelatives.ipynb +++ b/Alignment-RigRelatives.ipynb @@ -38,13 +38,20 @@ "import os, glob\n", "import micasense.capture as capture\n", "%matplotlib inline\n", + "from pathlib import Path\n", "\n", "panelNames = None\n", "\n", - "# This is an altum image with RigRelatives and a thermal band\n", - "imagePath = os.path.join('.','data','ALTUM1SET','000')\n", - "imageNames = glob.glob(os.path.join(imagePath,'IMG_0008_*.tif'))\n", - "panelNames = glob.glob(os.path.join(imagePath,'IMG_0000_*.tif'))\n", + "# This is an Altum image with RigRelatives and a thermal band\n", + "\n", + "imagePath = Path(\"./data/ALTUM\")\n", + "\n", + "# these will return lists of image paths as strings \n", + "imageNames = list(imagePath.glob('IMG_0021_*.tif'))\n", + "imageNames = [x.as_posix() for x in imageNames]\n", + "\n", + "panelNames = list(imagePath.glob('IMG_0000_*.tif'))\n", + "panelNames = [x.as_posix() for x in panelNames]\n", "\n", "if panelNames is not None:\n", " panelCap = capture.Capture.from_filelist(panelNames)\n", @@ -61,7 +68,7 @@ " if panelCap.panel_albedo() is not None:\n", " panel_reflectance_by_band = panelCap.panel_albedo()\n", " else:\n", - " panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order\n", + " panel_reflectance_by_band = [0.49, 0.49, 0.49, 0.49, 0.49] #RedEdge band_index order\n", " panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) \n", " img_type = \"reflectance\"\n", " capture.plot_undistorted_reflectance(panel_irradiance)\n", @@ -97,11 +104,14 @@ "import micasense.imageutils as imageutils\n", "import micasense.plotutils as plotutils\n", "\n", + "reference_band = 5\n", "warp_mode = cv2.MOTION_HOMOGRAPHY\n", - "warp_matrices = capture.get_warp_matrices()\n", + "warp_matrices = capture.get_warp_matrices(ref_index=reference_band)\n", + "\n", + "cropped_dimensions,edges = imageutils.find_crop_bounds(capture,warp_matrices,reference_band=reference_band)\n", + "im_aligned = imageutils.aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, reference_band, img_type=img_type)\n", "\n", - "cropped_dimensions,edges = imageutils.find_crop_bounds(capture,warp_matrices)\n", - "im_aligned = imageutils.aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, None, img_type=img_type)\n", + "print(im_aligned.shape)\n", "\n", "print(\"warp_matrices={}\".format(warp_matrices))" ] @@ -128,7 +138,7 @@ "cir_band_indices = [3,2,1]\n", "\n", "# Create an empty normalized stack for viewing\n", - "im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],capture.num_bands+1), dtype=np.float32 )\n", + "im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],im_aligned.shape[2]), dtype=np.float32 )\n", "\n", "im_min = np.percentile(im_aligned[:,:,0:2].flatten(), 0.1) # modify with these percentilse to adjust contrast\n", "im_max = np.percentile(im_aligned[:,:,0:2].flatten(), 99.9) # for many images, 0.5 and 99.5 are good values\n", @@ -451,7 +461,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -465,7 +475,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/Alignment.ipynb b/Alignment.ipynb index 9e416d38..ecc51d26 100644 --- a/Alignment.ipynb +++ b/Alignment.ipynb @@ -34,30 +34,28 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ "import os, glob\n", "import micasense.capture as capture\n", "%matplotlib inline\n", + "from pathlib import Path\n", + "import matplotlib.pyplot as plt\n", + "plt.rcParams[\"figure.facecolor\"] = \"w\"\n", "\n", "panelNames = None\n", "\n", - "# # This is an older RedEdge image without RigRelatives\n", - "# imagePath = os.path.join(os.path.abspath('.'),'data','0000SET','000')\n", - "# imageNames = glob.glob(os.path.join(imagePath,'IMG_0001_*.tif'))\n", - "# panelNames = glob.glob(os.path.join(imagePath,'IMG_0000_*.tif'))\n", - "\n", - "# # Image from the example RedEdge imageSet (see the ImageSet notebook) without RigRelatives.\n", - "# imagePath = os.path.expanduser(os.path.join('~','Downloads','RedEdgeImageSet','0000SET'))\n", - "# imageNames = glob.glob(os.path.join(imagePath,'000','IMG_0013_*.tif'))\n", - "# panelNames = glob.glob(os.path.join(imagePath,'000','IMG_0000_*.tif'))\n", + "imagePath = Path(\"./data/ALTUM\")\n", "\n", - "# This is an altum image with RigRelatives and a thermal band\n", - "imagePath = os.path.join('.','data','ALTUM1SET','000')\n", - "imageNames = glob.glob(os.path.join(imagePath,'IMG_0245_*.tif'))\n", - "panelNames = glob.glob(os.path.join(imagePath,'IMG_0000_*.tif'))\n", + "# these will return lists of image paths as strings \n", + "imageNames = list(imagePath.glob('IMG_0021_*.tif'))\n", + "imageNames = [x.as_posix() for x in imageNames]\n", "\n", + "panelNames = list(imagePath.glob('IMG_0000_*.tif'))\n", + "panelNames = [x.as_posix() for x in panelNames]\n", "\n", "# Allow this code to align both radiance and reflectance images; bu excluding\n", "# a definition for panelNames above, radiance images will be used\n", @@ -76,7 +74,7 @@ " if panelCap.panel_albedo() is not None:\n", " panel_reflectance_by_band = panelCap.panel_albedo()\n", " else:\n", - " panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order\n", + " panel_reflectance_by_band = [0.49, 0.49, 0.49, 0.49, 0.49] #RedEdge band_index order\n", " panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) \n", " img_type = \"reflectance\"\n", " capture.plot_undistorted_reflectance(panel_irradiance)\n", @@ -130,7 +128,7 @@ "warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY\n", "pyramid_levels = 0 # for images with RigRelatives, setting this to 0 or 1 may improve alignment\n", "\n", - "print(\"Alinging images. Depending on settings this can take from a few seconds to many minutes\")\n", + "print(\"Aligning images. Depending on settings this can take from a few seconds to many minutes\")\n", "# Can potentially increase max_iterations for better results, but longer runtimes\n", "warp_matrices, alignment_pairs = imageutils.align_capture(capture,\n", " ref_index = match_index,\n", @@ -155,7 +153,8 @@ "metadata": {}, "outputs": [], "source": [ - "cropped_dimensions, edges = imageutils.find_crop_bounds(capture, warp_matrices, warp_mode=warp_mode)\n", + "cropped_dimensions, edges = imageutils.find_crop_bounds(capture, warp_matrices, warp_mode=warp_mode, reference_band=match_index)\n", + "print(cropped_dimensions)\n", "im_aligned = imageutils.aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, match_index, img_type=img_type)" ] }, @@ -288,7 +287,7 @@ "\n", "if im_aligned.shape[2] == 6:\n", " filename = filename + \"t\" #thermal\n", - "outRaster = driver.Create(filename+\".tiff\", cols, rows, im_aligned.shape[2], gdal.GDT_UInt16)\n", + "outRaster = driver.Create(filename+\"B.tiff\", cols, rows, im_aligned.shape[2], gdal.GDT_UInt16)\n", "\n", "normalize = (img_type == 'radiance') # normalize radiance images to fit with in UInt16\n", "\n", @@ -559,7 +558,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -573,7 +572,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/Batch Processing v2.ipynb b/Batch Processing v2.ipynb new file mode 100644 index 00000000..20268c9f --- /dev/null +++ b/Batch Processing v2.ipynb @@ -0,0 +1,355 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "20e37d9b", + "metadata": {}, + "source": [ + "# Batch Processing Example\n", + "In this example, we use the `micasense.imageset` class to load a set of directories of images into a list of `micasense.capture` objects, and we iterate over that list, saving out each image as an aligned stack of images as separate bands in a single tiff file each. Part of this process (via `imageutils.write_exif_to_stack`) injects that the GPS, capture datetime, camera model, etc into the processed images, allowing us to stitch those images using commercial software such as Pix4DMapper or Agisoft Metashape.\n", + "\n", + "Note: for this example to work, the images must have a valid RigRelatives tag. This requires RedEdge (3/M/MX) version of at least 3.4.0, or any version of RedEdge-P/Altum-PT/Altum/RedEdge-MX Dual. If your images don't meet that spec, you can also follow this support article to add the RigRelatives tag to your imagery: https://support.micasense.com/hc/en-us/articles/360006368574-Modifying-older-collections-for-Pix4Dfields-support" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c64ead96", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "markdown", + "id": "80646655", + "metadata": {}, + "source": [ + "# Load Images into ImageSet\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2081b06b", + "metadata": {}, + "outputs": [], + "source": [ + "from ipywidgets import FloatProgress, Layout\n", + "from IPython.display import display\n", + "import micasense.imageset as imageset\n", + "import micasense.capture as capture\n", + "import os, glob\n", + "import multiprocessing\n", + "from pathlib import Path\n", + "\n", + "# set to True if you have an Altum-PT\n", + "# or RedEdge-P and wish to output pan-sharpened stacks \n", + "panSharpen = True \n", + "\n", + "# If creating a lot of stacks, it is more efficient to save the metadata\n", + "# and then write all of the exif to the images after the stacks are created\n", + "write_exif_to_individual_stacks = False\n", + "\n", + "panelNames = None\n", + "useDLS = True\n", + "\n", + "# set your image path here. See more here: https://docs.python.org/3/library/pathlib.html\n", + "imagePath = Path(\"./data/REDEDGE-MX\")\n", + "\n", + "# these will return lists of image paths as strings. Comment out of you aren't using panels. \n", + "panelNames = list(imagePath.glob('IMG_0001_*.tif'))\n", + "panelNames = [x.as_posix() for x in panelNames]\n", + "\n", + "if panelNames:\n", + " panelCap = capture.Capture.from_filelist(panelNames)\n", + "\n", + "# destinations on your computer to put the stacks\n", + "# and RGB thumbnails\n", + "outputPath = imagePath / '..' / 'stacks'\n", + "thumbnailPath = outputPath / 'thumbnails'\n", + "\n", + "cam_model = panelCap.camera_model\n", + "cam_serial = panelCap.camera_serial\n", + "\n", + "# determine if this sensor has a panchromatic band \n", + "if cam_model == 'RedEdge-P' or cam_model == 'Altum-PT':\n", + " panchroCam = True\n", + "else:\n", + " panchroCam = False\n", + " panSharpen = False \n", + " \n", + "# if this is a multicamera system like the RedEdge-MX Dual,\n", + "# we can combine the two serial numbers to help identify \n", + "# this camera system later. \n", + "if len(panelCap.camera_serials) > 1:\n", + " cam_serial = \"_\".join(panelCap.camera_serials)\n", + " print(\"Serial number:\",cam_serial)\n", + "else:\n", + " cam_serial = panelCap.camera_serial\n", + " print(\"Serial number:\",cam_serial)\n", + " \n", + "overwrite = False # can be set to set to False to continue interrupted processing\n", + "generateThumbnails = True\n", + "\n", + "# Allow this code to align both radiance and reflectance images; but excluding\n", + "# a definition for panelNames above, radiance images will be used\n", + "# For panel images, efforts will be made to automatically extract the panel information\n", + "# but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance\n", + "# will need to be set in the panel_reflectance_by_band variable.\n", + "# Note: radiance images will not be used to properly create NDVI/NDRE images below.\n", + "if panelNames is not None:\n", + " panelCap = capture.Capture.from_filelist(panelNames)\n", + "else:\n", + " panelCap = None\n", + "\n", + "if panelCap is not None:\n", + " if panelCap.panel_albedo() is not None and not any(v is None for v in panelCap.panel_albedo()):\n", + " panel_reflectance_by_band = panelCap.panel_albedo()\n", + " else:\n", + " panel_reflectance_by_band = [0.49]*len(panelCap.eo_band_names()) #RedEdge band_index order\n", + " \n", + " panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) \n", + " img_type = \"reflectance\"\n", + "else:\n", + " if useDLS:\n", + " img_type='reflectance'\n", + " else:\n", + " img_type = \"radiance\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60736009", + "metadata": {}, + "outputs": [], + "source": [ + "## This progress widget is used for display of the long-running process\n", + "f = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description=\"Loading\")\n", + "display(f)\n", + "def update_f(val):\n", + " if (val - f.value) > 0.005 or val == 1: #reduces cpu usage from updating the progressbar by 10x\n", + " f.value=val\n", + "\n", + "%time imgset = imageset.ImageSet.from_directory(imagePath, progress_callback=update_f)\n", + "update_f(1.0)" + ] + }, + { + "cell_type": "markdown", + "id": "6c0a234d", + "metadata": {}, + "source": [ + "# Capture map\n", + "We can map out the capture GPS locations to ensure we are processing the right data. A GeoJSON of the captures will later be saved to the outputPath." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e9c437c", + "metadata": {}, + "outputs": [], + "source": [ + "import math\n", + "import numpy as np\n", + "from mapboxgl.viz import *\n", + "from mapboxgl.utils import df_to_geojson, create_radius_stops, scale_between\n", + "from mapboxgl.utils import create_color_stops\n", + "import pandas as pd\n", + "\n", + "data, columns = imgset.as_nested_lists()\n", + "df = pd.DataFrame.from_records(data, index='timestamp', columns=columns)\n", + "\n", + "#Insert your mapbox token here\n", + "token = 'pk.eyJ1Ijoic3RlcGhlbm1hbmd1bTIiLCJhIjoiY2xmOXdnYzF1MDFqejNvdGE0YW13aTN5ZyJ9.AG_ckhUqTBjuGC2LuWCfQQ'\n", + "color_property = 'dls-yaw'\n", + "num_color_classes = 8\n", + "\n", + "min_val = df[color_property].min()\n", + "max_val = df[color_property].max()\n", + "\n", + "import jenkspy\n", + "geojson_data = df_to_geojson(df,columns[3:],lat='latitude',lon='longitude')\n", + "breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)\n", + "color_stops = create_color_stops(breaks,colors='YlOrRd')\n", + "\n", + "viz = CircleViz(geojson_data, access_token=token, color_property=color_property,\n", + " color_stops=color_stops,\n", + " center=[df['longitude'].median(),df['latitude'].median()], \n", + " zoom=16, height='600px',\n", + " style='mapbox://styles/mapbox/satellite-streets-v9')\n", + "viz.show()" + ] + }, + { + "cell_type": "markdown", + "id": "e540a655", + "metadata": {}, + "source": [ + "# Define which warp method to use\n", + "For newer data sets with RigRelatives tags (images captured with RedEdge (3/M/MX) version 3.4.0 or greater with a valid calibration load, see https://support.micasense.com/hc/en-us/articles/360005428953-Updating-RedEdge-for-Pix4Dfields), we can use the RigRelatives for a simple alignment. To use this simple alignment, simply set `warp_matrices=None` \n", + "\n", + "For sets without those tags, or sets that require a RigRelatives optimization, we can go through the Alignment.ipynb notebook and get a set of warp_matrices that we can use here to align." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "635ad8d8", + "metadata": {}, + "outputs": [], + "source": [ + "from numpy import array\n", + "from numpy import float32\n", + "from skimage.transform import ProjectiveTransform\n", + "\n", + "if panchroCam:\n", + " warp_matrices_filename = cam_serial + \"_warp_matrices_SIFT.npy\"\n", + "else:\n", + " warp_matrices_filename = cam_serial + \"_warp_matrices_opencv.npy\"\n", + "\n", + "if Path('./' + warp_matrices_filename).is_file():\n", + " print(\"Found existing warp matrices for camera\", cam_serial)\n", + " load_warp_matrices = np.load(warp_matrices_filename, allow_pickle=True)\n", + " loaded_warp_matrices = []\n", + " for matrix in load_warp_matrices: \n", + " if panchroCam:\n", + " transform = ProjectiveTransform(matrix=matrix.astype('float64'))\n", + " loaded_warp_matrices.append(transform)\n", + " else:\n", + " loaded_warp_matrices.append(matrix.astype('float32'))\n", + "\n", + " if panchroCam:\n", + " warp_matrices_SIFT = loaded_warp_matrices\n", + " else:\n", + " warp_matrices = loaded_warp_matrices\n", + " print(\"Loaded warp matrices from\",Path('./' + warp_matrices_filename).resolve())\n", + "else:\n", + " print(\"No warp matrices found at expected location:\",warp_matrices_filename)\n", + " \n" + ] + }, + { + "cell_type": "markdown", + "id": "3aae7f09", + "metadata": {}, + "source": [ + "## Align images and save each capture to a layered TIFF file" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cdc195da", + "metadata": {}, + "outputs": [], + "source": [ + "import exiftool\n", + "import datetime\n", + "import micasense.imageutils as imageutils\n", + "exif_list = []\n", + "## This progress widget is used for display of the long-running process\n", + "f2 = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description=\"Saving\")\n", + "display(f2)\n", + "def update_f2(val):\n", + " f2.value=val\n", + "\n", + "if not os.path.exists(outputPath):\n", + " os.makedirs(outputPath)\n", + "if generateThumbnails and not os.path.exists(thumbnailPath):\n", + " os.makedirs(thumbnailPath)\n", + "\n", + "# Save out geojson data so we can open the image capture locations in our GIS\n", + "with open(os.path.join(outputPath,'imageSet.json'),'w') as f:\n", + " f.write(str(geojson_data))\n", + " \n", + "try:\n", + " irradiance = panel_irradiance+[0]\n", + "except NameError:\n", + " irradiance = None\n", + "\n", + "start = datetime.datetime.now()\n", + "for i,capture in enumerate(imgset.captures):\n", + " outputFilename = str(i).zfill(4) + \"_\" + capture.uuid+'.tif'\n", + " thumbnailFilename = str(i).zfill(4) + \"_\" + capture.uuid+'.jpg'\n", + " fullOutputPath = os.path.join(outputPath, outputFilename)\n", + " fullThumbnailPath= os.path.join(thumbnailPath, thumbnailFilename)\n", + " if (not os.path.exists(fullOutputPath)) or overwrite:\n", + " if(len(capture.images) == len(imgset.captures[0].images)):\n", + " if panchroCam:\n", + " capture.radiometric_pan_sharpened_aligned_capture(warp_matrices=warp_matrices_SIFT,irradiance_list=capture.dls_irradiace(), img_type=img_type, write_exif=write_exif_to_individual_stacks)\n", + " else:\n", + " capture.create_aligned_capture(irradiance_list=irradiance, warp_matrices=warp_matrices)\n", + " exif_list.append(imageutils.prepare_exif_for_stacks(capture,fullOutputPath))\n", + " capture.save_capture_as_stack(fullOutputPath, pansharpen=panSharpen,sort_by_wavelength=True, write_exif=write_exif_to_individual_stacks)\n", + " if generateThumbnails:\n", + " capture.save_capture_as_rgb(fullThumbnailPath)\n", + " capture.clear_image_data()\n", + " update_f2(float(i)/float(len(imgset.captures)))\n", + "update_f2(1.0)\n", + "end = datetime.datetime.now()\n", + "\n", + "print(\"Saving time: {}\".format(end-start))\n", + "print(\"Alignment+Saving rate: {:.2f} images per second\".format(float(len(imgset.captures))/float((end-start).total_seconds())))" + ] + }, + { + "cell_type": "markdown", + "id": "f4b88c80", + "metadata": {}, + "source": [ + "# Write EXIF data to stacks\n", + "As mentioned above, it is more time intensive to write the exif data to each image as it is created. Here, we write the exif data after all of the TIFF files have been created. This should take a few seconds per stack." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fac88b6a", + "metadata": {}, + "outputs": [], + "source": [ + "if write_exif_to_individual_stacks == False:\n", + " start = datetime.datetime.now()\n", + " for exif in exif_list:\n", + " imageutils.write_exif_to_stack(existing_exif_list=exif)\n", + " end = datetime.datetime.now()\n", + " print(\"Saving time: {}\".format(end-start))\n", + " print(\"Alignment+Saving rate: {:.2f} images per second\".format(float(len(exif_list))/float((end-start).total_seconds())))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea464070", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Batch Processing-10Band.ipynb b/Batch Processing-10Band.ipynb index 4f083b79..0fc0e062 100644 --- a/Batch Processing-10Band.ipynb +++ b/Batch Processing-10Band.ipynb @@ -13,7 +13,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -30,7 +30,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -40,16 +40,19 @@ "import micasense.capture as capture\n", "import os, glob\n", "import multiprocessing\n", + "from pathlib import Path\n", "\n", "panelNames = None\n", "useDLS = True\n", "\n", - "imagePath = os.path.expanduser(os.path.join('~','Downloads','DualCam-Farm','farm_only'))\n", - "panelNames = glob.glob(os.path.join(imagePath,'IMG_0002_*.tif'))\n", + "imagePath = os.path.expanduser(os.path.join('.','data','REDEDGE-MX-DUAL'))\n", + "panelNames = glob.glob(os.path.join(imagePath,'IMG_0001_*.tif'))\n", "\n", "outputPath = os.path.join(imagePath,'..','stacks')\n", "thumbnailPath = os.path.join(outputPath, '..', 'thumbnails')\n", "\n", + "\n", + "\n", "overwrite = False # Set to False to continue interrupted processing\n", "generateThumbnails = True\n", "\n", @@ -80,9 +83,32 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "279fc40758f9463b8d8c7def1f4bb514", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FloatProgress(value=0.0, description='Loading', layout=Layout(width='100%'), max=1.0)" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 60.7 ms, sys: 12.4 ms, total: 73.1 ms\n", + "Wall time: 471 ms\n" + ] + } + ], "source": [ "## This progress widget is used for display of the long-running process\n", "f = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description=\"Loading\")\n", @@ -97,11 +123,593 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "metadata": { "scrolled": false }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/stephen/anaconda3/envs/micasense/lib/python3.7/site-packages/IPython/core/display.py:724: UserWarning: Consider using IPython.display.IFrame instead\n", + " warnings.warn(\"Consider using IPython.display.IFrame instead\")\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "import math\n", "import numpy as np\n", @@ -123,13 +731,13 @@ "max_val = df[color_property].max()\n", "\n", "import jenkspy\n", - "breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)\n", + "# breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)\n", "\n", - "color_stops = create_color_stops(breaks,colors='YlOrRd')\n", + "# color_stops = create_color_stops(breaks,colors='YlOrRd')\n", "geojson_data = df_to_geojson(df,columns[3:],lat='latitude',lon='longitude')\n", "\n", "viz = CircleViz(geojson_data, access_token=token, color_property=color_property,\n", - " color_stops=color_stops,\n", + "# color_stops=color_stops,\n", " center=[df['longitude'].median(),df['latitude'].median()], \n", " zoom=16, height='600px',\n", " style='mapbox://styles/mapbox/satellite-streets-v9')\n", @@ -148,7 +756,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -156,27 +764,27 @@ "from numpy import float32\n", "\n", "# Use the warp_matrices derived from the Alignment Tutorial for this RedEdge set without RigRelatives\n", - "warp_matrices = [array([[ 1.0020243e+00, -3.7388311e-04, 2.4971788e+01],\n", - " [ 6.7297497e-04, 1.0005866e+00, 1.7188536e+01],\n", - " [ 2.4259109e-06, -9.2373267e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9140632e-01, -4.6332614e-05, 4.8500401e+01],\n", - " [ 3.2340995e-05, 9.9200422e-01, -1.0915921e+01],\n", - " [-7.3704086e-07, 5.0890253e-07, 1.0000000e+00]], dtype=float32), array([[ 1.0018263e+00, -2.1731904e-04, 5.5316315e+00],\n", - " [ 7.2411756e-04, 1.0021795e+00, 5.8745198e+00],\n", - " [-1.9047379e-08, 9.7758209e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9152303e-01, -5.4825414e-03, 4.1536880e+01],\n", - " [ 3.8441001e-03, 9.9495757e-01, 1.7250452e+01],\n", - " [-3.2921032e-06, -2.4233820e-08, 1.0000000e+00]], dtype=float32), array([[ 1.0006192e+00, -3.0658240e-04, -2.5816131e-01],\n", - " [ 7.8755329e-05, 9.9954307e-01, 2.9809377e-01],\n", - " [ 9.1640561e-07, -1.0784843e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9773926e-01, -6.3800282e-04, 5.2199936e+01],\n", - " [-3.4246168e-03, 9.9601907e-01, 2.0550659e+01],\n", - " [-4.6251063e-07, -4.8716843e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9622118e-01, 3.1637053e-03, 3.7498917e+01],\n", - " [-6.7951437e-03, 9.9743211e-01, 8.9517927e+00],\n", - " [-3.6472218e-06, -2.4649705e-06, 1.0000000e+00]], dtype=float32), array([[ 9.8943901e-01, 3.7658634e-04, 9.4948044e+00],\n", - " [-4.0384033e-03, 9.8851675e-01, 1.5366467e+01],\n", - " [-2.4371677e-06, -3.8438825e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9749213e-01, 1.6272087e-03, 4.3243721e-01],\n", - " [-7.3282972e-05, 9.9533182e-01, 3.5523354e+01],\n", - " [ 3.8597086e-06, -4.0187538e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9992698e-01, 6.6664284e-03, -9.0784521e+00],\n", - " [-9.0053231e-03, 9.9836856e-01, 1.5190173e+01],\n", - " [-1.6761204e-07, -3.6131762e-06, 1.0000000e+00]], dtype=float32)]" + "warp_matrices = [array([[ 9.7928989e-01, 1.3615261e-04, 1.9574374e-02],\n", + " [-3.7023663e-03, 9.9245304e-01, 2.8355631e+01],\n", + " [-1.0651237e-05, 2.6095395e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9525303e-01, -7.0169556e-04, -6.3409243e+00],\n", + " [-7.1914408e-05, 9.9811482e-01, -6.2239196e-02],\n", + " [-3.6460631e-06, 1.8568957e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9596852e-01, -2.5820474e-03, -1.2950540e+01],\n", + " [ 2.8751660e-03, 9.9919146e-01, 2.6645420e+01],\n", + " [-1.8280600e-06, 2.3902323e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9592507e-01, 4.5777867e-03, 1.4982515e+01],\n", + " [-3.4031910e-03, 9.9773425e-01, 2.2611160e+01],\n", + " [-9.0995320e-07, 2.3299665e-06, 1.0000000e+00]], dtype=float32), array([[ 1.0000000e+00, 2.8202797e-20, 8.9091027e-15],\n", + " [-5.6938062e-19, 1.0000000e+00, 6.4872178e-15],\n", + " [-1.2599002e-21, 1.3424955e-23, 1.0000000e+00]], dtype=float32), array([[ 9.9348122e-01, -1.8129036e-02, 1.1003100e+00],\n", + " [ 1.2132729e-02, 9.9091125e-01, 2.8742294e+01],\n", + " [-2.9285566e-06, -6.2816152e-06, 1.0000000e+00]], dtype=float32), array([[ 9.8954433e-01, -1.8810771e-02, 1.1892141e+01],\n", + " [ 1.3862181e-02, 9.8975760e-01, 2.0044521e+01],\n", + " [-4.0683108e-06, -4.2664565e-06, 1.0000000e+00]], dtype=float32), array([[ 9.8794365e-01, -1.2153405e-02, 1.8298512e+01],\n", + " [ 8.4343022e-03, 9.8829275e-01, 2.8633160e+01],\n", + " [-2.5430195e-06, -3.1491891e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9505156e-01, -1.7393423e-02, 3.6898847e+00],\n", + " [ 1.3367594e-02, 9.9287426e-01, 1.8703876e+01],\n", + " [-1.3000300e-06, -4.7085673e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9182397e-01, -1.2042225e-02, 1.2686370e+01],\n", + " [ 7.6934961e-03, 9.8897797e-01, 2.7970253e+01],\n", + " [-9.2655773e-07, -6.0620891e-06, 1.0000000e+00]], dtype=float32)]" ] }, { @@ -190,7 +798,22 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e7142fb6b14c4be980bee622e12b7cc3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FloatProgress(value=0.0, description='Saving', layout=Layout(width='100%'), max=1.0)" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "import exiftool\n", "import datetime\n", @@ -208,11 +831,11 @@ " os.makedirs(outputPath)\n", "if generateThumbnails and not os.path.exists(thumbnailPath):\n", " os.makedirs(thumbnailPath)\n", - "\n", + " \n", "# Save out geojson data so we can open the image capture locations in our GIS\n", "with open(os.path.join(outputPath,'imageSet.json'),'w') as f:\n", " f.write(str(geojson_data))\n", - "\n", + " \n", "# If we didn't provide a panel above, irradiance set to None will cause DLS data to be used\n", "try:\n", " irradiance = panel_irradiance+[0]\n", @@ -329,7 +952,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -343,7 +966,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.12" } }, "nbformat": 4, diff --git a/Batch Processing.ipynb b/Batch Processing.ipynb index 2c5557e7..0b41cbfa 100644 --- a/Batch Processing.ipynb +++ b/Batch Processing.ipynb @@ -40,16 +40,24 @@ "import micasense.capture as capture\n", "import os, glob\n", "import multiprocessing\n", + "from pathlib import Path\n", "\n", "panelNames = None\n", "useDLS = True\n", "\n", - "imagePath = os.path.expanduser(os.path.join('~','Downloads','RedEdgeImageSet','0000SET'))\n", - "panelNames = glob.glob(os.path.join(imagePath,'000','IMG_0000_*.tif'))\n", + "# set your image path here. See more here: https://docs.python.org/3/library/pathlib.html\n", + "imagePath = Path(\"./data/REDEDGE-MX\")\n", + "\n", + "# these will return lists of image paths as strings \n", + "panelNames = list(imagePath.glob('IMG_0001_*.tif'))\n", + "panelNames = [x.as_posix() for x in panelNames]\n", + "\n", "panelCap = capture.Capture.from_filelist(panelNames)\n", "\n", - "outputPath = os.path.join(imagePath,'..','stacks')\n", - "thumbnailPath = os.path.join(outputPath, '..', 'thumbnails')\n", + "# destinations on your computer to put the stacks\n", + "# and RGB thumbnails\n", + "outputPath = imagePath / '..' / 'stacks'\n", + "thumbnailPath = outputPath / 'thumbnails'\n", "\n", "overwrite = False # can be set to set to False to continue interrupted processing\n", "generateThumbnails = True\n", @@ -69,7 +77,7 @@ " if panelCap.panel_albedo() is not None and not any(v is None for v in panelCap.panel_albedo()):\n", " panel_reflectance_by_band = panelCap.panel_albedo()\n", " else:\n", - " panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order\n", + " panel_reflectance_by_band = [0.49, 0.49, 0.49, 0.49, 0.49] #RedEdge band_index order\n", " \n", " panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) \n", " img_type = \"reflectance\"\n", @@ -124,13 +132,13 @@ "max_val = df[color_property].max()\n", "\n", "import jenkspy\n", - "breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)\n", + "# breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)\n", "\n", - "color_stops = create_color_stops(breaks,colors='YlOrRd')\n", + "# color_stops = create_color_stops(breaks,colors='YlOrRd')\n", "geojson_data = df_to_geojson(df,columns[3:],lat='latitude',lon='longitude')\n", "\n", "viz = CircleViz(geojson_data, access_token=token, color_property=color_property,\n", - " color_stops=color_stops,\n", + "# color_stops=color_stops,\n", " center=[df['longitude'].median(),df['latitude'].median()], \n", " zoom=16, height='600px',\n", " style='mapbox://styles/mapbox/satellite-streets-v9')\n", @@ -159,17 +167,17 @@ "# Set warp_matrices to none to align using RigRelatives\n", "# Or\n", "# Use the warp_matrices derived from the Alignment Tutorial for this RedEdge set without RigRelatives\n", - "warp_matrices = [array([[ 1.0022864e+00, -2.5218755e-03, -7.8898020e+00],\n", - " [ 2.3614739e-03, 1.0036649e+00, -1.3134377e+01],\n", - " [-1.7785899e-06, 1.1343118e-06, 1.0000000e+00]], dtype=float32), array([[1., 0., 0.],\n", - " [0., 1., 0.],\n", - " [0., 0., 1.]], dtype=float32), array([[ 9.9724638e-01, -1.5535230e-03, 1.2301294e+00],\n", - " [ 8.6745428e-04, 9.9738181e-01, -1.6499169e+00],\n", - " [-8.2816513e-07, -3.4488804e-07, 1.0000000e+00]], dtype=float32), array([[ 1.0007139e+00, -8.4427800e-03, 1.6312805e+01],\n", - " [ 6.2834378e-03, 9.9977130e-01, -1.6011697e+00],\n", - " [-1.9520389e-06, -6.3762940e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9284178e-01, 9.2155562e-04, 1.6069822e+01],\n", - " [-3.2895457e-03, 9.9262553e-01, -5.0333548e-01],\n", - " [-1.5845577e-06, -1.7680986e-06, 1.0000000e+00]], dtype=float32)]" + "warp_matrices = [array([[ 1.0109243e+00, 1.3997733e-03, 7.1472993e+00],\n", + " [ 2.3857178e-03, 1.0134553e+00, -1.1758372e+01],\n", + " [ 1.2079964e-06, 4.8187062e-06, 1.0000000e+00]], dtype=float32), array([[1.0000000e+00, 0.0000000e+00, 1.1368684e-13],\n", + " [0.0000000e+00, 1.0000000e+00, 0.0000000e+00],\n", + " [0.0000000e+00, 0.0000000e+00, 1.0000000e+00]], dtype=float32), array([[ 1.0093619e+00, 3.3198819e-03, -3.2410549e+01],\n", + " [ 8.4111997e-04, 1.0132477e+00, 1.3615667e+01],\n", + " [-4.9676191e-07, 7.0111369e-06, 1.0000000e+00]], dtype=float32), array([[ 1.0142691e+00, 6.4295256e-03, -2.3179104e+01],\n", + " [ 4.6660731e-05, 1.0160753e+00, -1.2280207e+01],\n", + " [ 2.7585847e-06, 7.7613631e-06, 1.0000000e+00]], dtype=float32), array([[ 1.0161121e+00, 2.1327983e-03, -2.0565905e+01],\n", + " [ 1.3823286e-03, 1.0168507e+00, -6.6128030e+00],\n", + " [ 2.2218899e-06, 3.5776138e-06, 1.0000000e+00]], dtype=float32)]" ] }, { @@ -311,15 +319,22 @@ "else:\n", " exiftool_cmd = 'exiftool'\n", " \n", - "cmd = '{} -csv=\"{}\" -overwrite_original {}'.format(exiftool_cmd, fullCsvPath, outputPath)\n", + "cmd = '{} -csv=\"{}\" -overwrite_original \"{}\"'.format(exiftool_cmd, fullCsvPath, outputPath)\n", "print(cmd)\n", - "subprocess.check_call(cmd)" + "subprocess.check_call(cmd, shell=True)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -333,7 +348,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.12" } }, "nbformat": 4, diff --git a/Captures.ipynb b/Captures.ipynb index 9b02d519..86417c1c 100644 --- a/Captures.ipynb +++ b/Captures.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "# Captures\n", - "A `micasense.capture.Capture` object holds a set of 5 (or in the case of Altum 6) images which are captured at the same moment together in a MicaSense camera. Files which meet this criteria will tend to have the same filename except for the suffix, but that is not required to load a captures. Captures can be loaded by starting with one image by calling Capture.from_file('file_name.tif') and adding others using the append_file, or by providing a list of filenames or images. See capture.py for more creation methods." + "A `micasense.capture.Capture` object holds a set of images (one per band) which are captured at the same moment together in a MicaSense camera. Files which meet this criteria will tend to have the same filename except for the suffix, but that is not required to load a captures. Captures can be loaded by starting with one image by calling Capture.from_file('file_name.tif') and adding others using the append_file, or by providing a list of filenames or images. See capture.py for more creation methods." ] }, { @@ -18,8 +18,8 @@ "import micasense.capture as capture\n", "%matplotlib inline\n", "\n", - "imagePath = os.path.join('.','data','0000SET','000')\n", - "imageNames = glob.glob(os.path.join(imagePath,'IMG_0000_*.tif'))\n", + "imagePath = os.path.join('.','data','REDEDGE-MX')\n", + "imageNames = glob.glob(os.path.join(imagePath,'IMG_0001_*.tif'))\n", "\n", "capture = capture.Capture.from_filelist(imageNames)\n", "capture.plot_raw()" @@ -56,7 +56,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -70,7 +70,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/ImageSets.ipynb b/ImageSets.ipynb index f0b9d4af..91ab038b 100644 --- a/ImageSets.ipynb +++ b/ImageSets.ipynb @@ -43,7 +43,7 @@ " if (val - f.value) > 0.005 or val == 1: #reduces cpu usage from updating the progressbar by 10x\n", " f.value=val\n", "\n", - "images_dir = os.path.expanduser(os.path.join('~','Downloads','RedEdgeImageSet','0000SET'))\n", + "images_dir = os.path.expanduser(os.path.join('.','data','REDEDGE-MX'))\n", "%time imgset = imageset.ImageSet.from_directory(images_dir, progress_callback=update_f)" ] }, @@ -181,12 +181,12 @@ " panel_timestamps.append(cap.utc_time())\n", " panel_radiances.append(cap.panel_radiance())\n", " dls_irradiances.append(cap.dls_irradiance())\n", - "\n", + " \n", "dls_irradiances = np.asarray(dls_irradiances)\n", "panel_radiances = np.asarray(panel_radiances)\n", "\n", "###\n", - "panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order\n", + "panel_reflectance_by_band = [0.49, 0.49, 0.49, 0.49, 0.49] #RedEdge band_index order\n", "panel_irradiance = ground_captures[0].panel_irradiance(panel_reflectance_by_band)\n", "plt.figure()\n", "plt.scatter(ground_captures[0].center_wavelengths(), panel_irradiance);\n", @@ -229,7 +229,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -243,7 +243,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/Images.ipynb b/Images.ipynb index efb29c99..c90c2d99 100644 --- a/Images.ipynb +++ b/Images.ipynb @@ -20,8 +20,8 @@ "import os, glob\n", "%matplotlib inline\n", "\n", - "imagePath = os.path.join('.','data','0000SET','000')\n", - "imageName = glob.glob(os.path.join(imagePath,'IMG_0000_1.tif'))[0]\n", + "imagePath = os.path.join('.','data','ALTUM')\n", + "imageName = glob.glob(os.path.join(imagePath,'IMG_0021_1.tif'))[0]\n", "\n", "img = Image(imageName)\n", "img.plot_all();" @@ -77,7 +77,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -91,7 +91,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/MicaSense Image Processing Setup.ipynb b/MicaSense Image Processing Setup.ipynb old mode 100755 new mode 100644 index bdf8d3ea..cf2ec33d --- a/MicaSense Image Processing Setup.ipynb +++ b/MicaSense Image Processing Setup.ipynb @@ -2,21 +2,19 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "# MicaSense Image Processing Setup\n", "\n", "## Overview\n", "\n", - "This series of tutorials will be a walk through on how to process RedEdge data from raw images through conversion to reflectance. In this first tutorial, we will cover the tools required to do this, get them installed, and verify that the installation works.\n", + "This series of tutorials will be a walk through on how to process RedEdge data from raw images through conversion to reflectance. In this first tutorial, we will cover the tools required to do this, get them installed, and verify that the installation works. \n", "\n", "## System Requirements\n", "\n", - "Our tutorials are written using Python3. Python has great library support for image processing through libraries such as OpenCV, SciKit Image, and others. In this tutorial, we'll use Python, OpenCV, numpy, and matplotlib, as well as the standalone exiftool and it's Python wrapper to open and manipulate RedEdge images to transform raw digital number values into quantitative reflectance. Python 2.7 can work for this tutorial, but we only test with Python3 and later tutorials use some extra libraries that are best supported in Python3 (specifically pysolar), so we recommend that if you're starting with Python from scratch to install Python3.\n", + "Our tutorials are written using Python3. Python has great library support for image processing through libraries such as OpenCV, SciKit Image, and others. In this tutorial, we'll use python, OpenCV, numpy, and matplotlib, as well as the standalone exiftool and it's python wrapper to open and manipulate RedEdge images to transform raw digital number values into quantitative reflectance.\n", "\n", - "This tutorial has been tested on Windows, MacOS, and Linux. It is likely to work on other platforms, especially unix-based platforms like macOS, but you will have to do the legwork to get the required software installed and working.\n", + "This tutorial has been tested on Windows, MacOS, and Linux. It is likely to work on other platforms, especially unix-based platforms like OSX, but you will have to do the legwork to get the required software installed and working.\n", "\n", "### Software/Libraries Overview\n", "\n", @@ -26,18 +24,18 @@ "* [numpy](https://www.numpy.org/)\n", "* [openCV](https://opencv.org/releases.html)\n", "* [matplotlib](https://matplotlib.org/users/installing.html)\n", - "* [exiftool](https://exiftool.org/) + [pyexiftool](https://github.com/sylikc/pyexiftool)\n", + "* [exiftool](https://exiftool.org/) + [pyexiftool](https://github.com/smarnach/pyexiftool) (version 0.4.13 only)\n", "* [scikit-image](https://scikit-image.org/)\n", - "* [zbar](http://zbar.sourceforge.net/) + [pyzbar](https://github.com/NaturalHistoryMuseum/pyzbar)\n", + "* [zbar](https://zbar.sourceforge.net/) + [pyzbar](https://github.com/NaturalHistoryMuseum/pyzbar)\n", "* [pysolar](http://pysolar.org/)\n", "* [pandas](https://pandas.pydata.org/)\n", "* [mapboxgl](https://github.com/mapbox/mapboxgl-jupyter)\n", "\n", - "Below, we go through the options to download and install a full working Python environment with these tools (and their dependencies). We're using the [Anaconda](https://www.anaconda.com/download/) or [miniconda](https://conda.io/miniconda.html) environments where possible to ease installation, but if you're already a Python package management guru, you can use `git` to checkout this code repository and look at the `micasense_conda_env.yml` file for the dependencies you'll need in your virtual environment.\n", + "Below, we go through the options to download and install a full working python environment with these tools (and their dependencies). We're using the [Anaconda](https://www.anaconda.com/download/) or [miniconda](https://conda.io/miniconda.html) environments where possible to ease installation, but if you're already a python package management guru, you can use `git` to checkout this code repository and look at the `micasense_conda_env.yml` file for the dependencies you'll need in your virtual environment.\n", "\n", "### Linux (Debian/Ubuntu)\n", "\n", - "For linux (and Mac, to some extent) you can either install the libraries directly using `pip` or install `miniconda` or `anaconda` to create completely separate environments. We have had success installing `miniconda` locally -- it's a smaller install than `anaconda` and can be installed without using `sudo` and doesn't impact the system-installed Python or Python libraries. You will likely still need to use `sudo` to install \n", + "For linux (and Mac, to some extent) you can either install the libraries directly using `pip` or install `miniconda` or `anaconda` to create completely separate environments. We have had success installing `miniconda` locally -- it's a smaller install than `anaconda` and can be installed without using `sudo` and doesn't impact the system-installed python or python libraries. You will likely still need to use `sudo` to install \n", "\n", "The following is what we had to do on a fresh Ubuntu 18.04 image to install the library. First we installed some system tools and libraries:\n", "\n", @@ -47,9 +45,9 @@ " \n", "Next we installed [exiftool](https://exiftool.org/):\n", "\n", - " wget https://cpan.metacpan.org/authors/id/E/EX/EXIFTOOL/Image-ExifTool-12.15.tar.gz\n", - " tar -xvzf Image-ExifTool-12.15.tar.gz \n", - " cd Image-ExifTool-12.15/\n", + " wget https://exiftool.org/Image-ExifTool-12.57.tar.gz\n", + " tar -xvzf Image-ExifTool-12.57.tar.gz \n", + " cd Image-ExifTool-12.57/\n", " perl Makefile.PL \n", " make test\n", " sudo make install\n", @@ -77,13 +75,13 @@ "\n", "### Windows setup\n", "\n", - "When installing on Windows we rely on the [Anaconda](https://www.anaconda.com/download/) Python environment to do most of the heavy lifting for us.\n", + "When installing on Windows we rely on the [Anaconda](https://www.anaconda.com/download/) python environment to do most of the heavy lifting for us.\n", "\n", - "* Install [Anaconda](https://www.anaconda.com/download/) for your system by downloading the **Python 3.6** version\n", + "* Install [Anaconda](https://www.anaconda.com/download/) for your system by downloading the **Python 3.7** version\n", "\n", " * When installing Anaconda, choose **\"install for only me\"** instead of \"install for all users,\" as this simplifies installation of other packages\n", "\n", - "* Download the [exiftool windows package](https://exiftool.org/) and unzip it to a permanent location such as `c:\\exiftool\\`. Now we need to tell the Python code where to find exiftool (so we don't have to set it up in every script we write), and we do that by adding the path to exiftool as an environment variable.\n", + "* Download the [exiftool windows package](https://exiftool.org/) and unzip it to a permanent location such as `c:\\exiftool\\`. Now we need to tell the python code where to find exiftool (so we don't have to set it up in every script we write), and we do that by adding the path to exiftool as an environment variable.\n", " * Create an environment variable called `exiftoolpath` with a value of the full path to exiftool. For example, `c:\\exiftool\\exiftool.exe`\n", " * To do this on Windows 10, press Start button or the Windows key, then type `Path` and click `Edit Environment Variables for Your Account`\n", " * Click `New`\n", @@ -98,32 +96,30 @@ " * When it's done, run `activate micasense` to activate the environment configured\n", " * Each time you run start a new anaconda prompt, you'll need to run `activate micasense`\n", " \n", - "### macOS setup\n", + "### MacOS setup\n", "\n", - "First, install [Homebrew](https://brew.sh/) - The Missing Package Manager for macOS (or Linux). If not already installed, Homebrew will also install macOS `xcode` developer tools for you. Open your Terminal and:\n", + "The following steps to get going on MacOS worked for us. \n", "\n", - "`/bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"`\n", + "First we installed `git` by installing the `xcode` developer tools, or you can follow the instructions at the [git site](https://git-scm.com/downloads). \n", "\n", - "Next, using Homebrew, install the dependencies like:\n", + "Next, we [downloaded and installed exiftool](https://exiftool.org/) using the MacOS installer.\n", "\n", - "```\n", - "brew install cask\n", - "brew install exiftool\n", - "brew install zbar\n", - "brew install miniconda # or brew install anaconda \n", - "brew install git\n", - "brew install git-lfs\n", - "```\n", + "Third, we installed [Homebrew](https://brew.sh/) and used it to install `zbar`:\n", + "\n", + " /usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\n", + " brew install zbar\n", + " \n", + "Then we installed miniconda. If you're comfortable on the command line, navigate to the [miniconda download page](https://conda.io/miniconda.html) and download the installer for your system. Open an iTerm and follow the [installation instructions](https://conda.io/docs/user-guide/install/index.html).\n", "\n", "If instead you're more comfortable with graphical installers, the [Anaconda](https://www.anaconda.com/download/) version for **Python 3.7** may be right for you.\n", "\n", - "Once these tools are installed, you can check out this repository and create the `micasense conda` environment by opening a Terminal and running the following commands:\n", + "Once these tools are installed, you can check out this repository and create the `micasense conda` environment by opening an iTerm and running the following commands:\n", " \n", " git clone https://github.com/micasense/imageprocessing.git\n", " cd imageprocessing\n", " conda env create -f micasense_conda_env.yml\n", " \n", - "This will take a while (5-10 minutes isn't uncommon). Once it's done, one way to verify our install by running the built-in tests:\n", + "This will take a while (5-10 minutes isn't uncommon). Once it's done, one way to verify our install by running the built-in tests:\n", "\n", " conda activate micasense\n", " pytest .\n", @@ -143,14 +139,14 @@ "\n", "## Testing Installation\n", "\n", - "The following Python snippet can be run from a jupyter notebook, inside iPython, or by saving to a script and running from the command line. If you're on windows, make sure you have set the location of exiftool in the `exiftoolpath` environment variable. If this script succeeds, your system is ready to go! If not, check the installation documentation for the module import that is having issues.\n" + "The following python snippet can be run from a jupyter notebook, inside iPython, or by saving to a script and running from the command line. If you're on windows, make sure you have set the location of exiftool in the `exiftoolpath` environment variable. If this script succeeds, your system is ready to go! If not, check the installation documentation for the module import that is having issues.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "tags": [] + "scrolled": false }, "outputs": [], "source": [ @@ -200,8 +196,8 @@ "outputs": [], "source": [ "from micasense.image import Image\n", - "imagePath = os.path.join('.','data','0000SET','000')\n", - "imageName = glob.glob(os.path.join(imagePath,'IMG_0000_1.tif'))[0]\n", + "imagePath = os.path.join('.','data','REDEDGE-MX')\n", + "imageName = glob.glob(os.path.join(imagePath,'IMG_0001_1.tif'))[0]\n", "\n", "img = Image(imageName)\n", "img.plot_raw(figsize=(8.73,8.73));\n", @@ -227,7 +223,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -241,7 +237,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.7.12" }, "toc": { "nav_menu": {}, @@ -284,5 +280,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 2 } diff --git a/MicaSense Image Processing Tutorial 1.ipynb b/MicaSense Image Processing Tutorial 1.ipynb old mode 100755 new mode 100644 index 1cc0c578..57c50463 --- a/MicaSense Image Processing Tutorial 1.ipynb +++ b/MicaSense Image Processing Tutorial 1.ipynb @@ -32,11 +32,11 @@ "import math\n", "%matplotlib inline\n", "\n", - "imagePath = os.path.join('.','data','0000SET','000')\n", - "imageName = os.path.join(imagePath,'IMG_0000_4.tif')\n", + "imagePath = os.path.join('.','data','REDEDGE-MX')\n", + "imageName = os.path.join(imagePath,'IMG_0001_4.tif')\n", "\n", "# Read raw image DN values\n", - "# reads 16 bit tif - this will likely not work for 12 bit images\n", + "# reads 16 bit tif - converts 12 bit to 16 bit if necessary \n", "imageRaw=plt.imread(imageName)\n", "\n", "# Display the image\n", @@ -196,7 +196,7 @@ "\n", "Now that we have a flat and calibrated radiance image, we can convert into reflectance. To do this, we will use the radiance values of the panel image of known reflectance to determine a scale factor between radiance and reflectance.\n", "\n", - "In this case, we have our MicaSense calibrated reflectance panel and it's known reflectance of 62% in the band of interest. We will extract the area of the image containing the lambertian panel, determine it's radiance to reflectance scale factor, and then scale the whole image by that factor to get a reflectance image." + "In this case, we have our MicaSense calibrated reflectance panel and its known reflectance of 49% in the band of interest. We will extract the area of the image containing the lambertian panel, determine it's radiance to reflectance scale factor, and then scale the whole image by that factor to get a reflectance image." ] }, { @@ -208,19 +208,19 @@ "outputs": [], "source": [ "markedImg = radianceImage.copy()\n", - "ulx = 660 # upper left column (x coordinate) of panel area\n", - "uly = 490 # upper left row (y coordinate) of panel area\n", - "lrx = 840 # lower right column (x coordinate) of panel area\n", - "lry = 670 # lower right row (y coordinate) of panel area\n", + "ulx = 610 # upper left column (x coordinate) of panel area\n", + "uly = 610 # upper left row (y coordinate) of panel area\n", + "lrx = 770 # lower right column (x coordinate) of panel area\n", + "lry = 760 # lower right row (y coordinate) of panel area\n", "cv2.rectangle(markedImg,(ulx,uly),(lrx,lry),(0,255,0),3)\n", "\n", "# Our panel calibration by band (from MicaSense for our specific panel)\n", "panelCalibration = { \n", - " \"Blue\": 0.67, \n", - " \"Green\": 0.69, \n", - " \"Red\": 0.68, \n", - " \"Red edge\": 0.67, \n", - " \"NIR\": 0.61 \n", + " \"Blue\": 0.49, \n", + " \"Green\": 0.49, \n", + " \"Red\": 0.49, \n", + " \"Red edge\": 0.49, \n", + " \"NIR\": 0.49 \n", "}\n", "\n", "# Select panel region from radiance image\n", @@ -337,7 +337,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -351,7 +351,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/MicaSense Image Processing Tutorial 2.ipynb b/MicaSense Image Processing Tutorial 2.ipynb index 5c15319a..82e191c8 100644 --- a/MicaSense Image Processing Tutorial 2.ipynb +++ b/MicaSense Image Processing Tutorial 2.ipynb @@ -40,7 +40,7 @@ "import micasense.image as image\n", "%matplotlib inline\n", "\n", - "image_path = os.path.join('.','data','0000SET','000','IMG_0000_1.tif')\n", + "image_path = os.path.join('.','data','ALTUM','IMG_0000_1.tif')\n", "img = image.Image(image_path)\n", "img.plot_raw();" ] @@ -94,7 +94,7 @@ "import os, glob\n", "import micasense.capture as capture\n", "\n", - "images_path = os.path.join('.','data','0000SET','000')\n", + "images_path = os.path.join('.','data','ALTUM')\n", "image_names = glob.glob(os.path.join(images_path,'IMG_0000_*.tif'))\n", "cap = capture.Capture.from_filelist(image_names)\n", "cap.plot_radiance();" @@ -169,16 +169,10 @@ "import micasense.image as image\n", "import micasense.panel as panel\n", "\n", - "image_path = os.path.join('.','data','0000SET','000','IMG_0000_1.tif')\n", + "image_path = os.path.join('.','data','ALTUM','IMG_0000_1.tif')\n", "img = image.Image(image_path)\n", - "# panelCorners - if we dont have zbar installed to scan the QR codes, detect panel manually and \n", - "panelCorners = [[[809,613],[648,615],[646,454],[808,452]],\n", - " [[772,623],[613,625],[610,464],[770,462]],\n", - " [[771,651],[611,653],[610,492],[770,490]],\n", - " [[829,658],[668,659],[668,496],[829,496]],\n", - " [[807,632],[648,634],[645,473],[805,471]]]\n", - "\n", - "pnl = panel.Panel(img,panelCorners = panelCorners[0])\n", + "\n", + "pnl = panel.Panel(img)\n", "print(\"Panel found: {}\".format(pnl.panel_detected()))\n", "print(\"Panel serial: {}\".format(pnl.serial))\n", "print(\"QR Code Corners:\\n{}\".format(pnl.qr_corners()))\n", @@ -224,7 +218,7 @@ "\n", "import micasense.imageset as imageset\n", "import os\n", - "images_dir = os.path.join('.','data','0000SET')\n", + "images_dir = os.path.join('.','data','ALTUM')\n", "\n", "imgset = imageset.ImageSet.from_directory(images_dir, progress_callback=update_f)\n", "\n", @@ -260,7 +254,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -274,7 +268,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/MicaSense Image Processing Tutorial 3.ipynb b/MicaSense Image Processing Tutorial 3.ipynb index 74718c30..36c02fe0 100644 --- a/MicaSense Image Processing Tutorial 3.ipynb +++ b/MicaSense Image Processing Tutorial 3.ipynb @@ -16,7 +16,7 @@ "source": [ "## Update for DLS2\n", "\n", - "This tutorial was originally written as a guide to basic processing of DLS1 data for RedEdge-3 and RedeEdge-M. Since October 2018, Altum and RedEdge-MX have been shipped standard with DLS2. DLS1 is a small, red square with a single diffuser on top. DLS2 is a larger, thinner black rectangle with two small diffusers on top and 8 small diffusers on different up-looking faces.\n", + "This tutorial was originally written as a guide to basic processing of DLS1 data for RedEdge-3 and RedeEdge-M. Since October 2018, Altum and RedEdge-MX have been shipped standard with DLS2. DLS1 is a small, red square with a single diffuser on top. DLS2 is a larger, thinner black rectangle with two small diffusers on top and 8 small diffusers on different up-looking faces. \n", "\n", "For DLS2 data, we recommend using the `Capture` class to access the `Capture.dls_irradiance()` method. This will provide a compensated horizontal irradiance useful for radiometrically correcting imagery. We leave the below intact for legacy users and as a tutorial on remote sensing calibrations and terms. \n", "\n", @@ -101,17 +101,9 @@ "import os, glob\n", "import micasense.capture as capture\n", "\n", - "images_path = os.path.join('.','data','0000SET','000')\n", + "images_path = os.path.join('.','data','REDEDGE-P')\n", "image_names = glob.glob(os.path.join(images_path,'IMG_0000_*.tif'))\n", - "cap = capture.Capture.from_filelist(image_names)\n", - "# set panel corners manually if zbar is not installed\n", - "panelCorners = [[[809,613],[648,615],[646,454],[808,452]],\n", - " [[772,623],[613,625],[610,464],[770,462]],\n", - " [[771,651],[611,653],[610,492],[770,490]],\n", - " [[829,658],[668,659],[668,496],[829,496]],\n", - " [[807,632],[648,634],[645,473],[805,471]]]\n", - "\n", - "cap.set_panelCorners(panelCorners)" + "cap = capture.Capture.from_filelist(image_names)" ] }, { @@ -199,7 +191,7 @@ "source": [ "import math\n", "\n", - "panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] #RedEdge band_index order\n", + "panel_reflectance_by_band = [0.49] * len(cap.eo_band_names()) # This panel has an average of 49% reflectance for each EO band\n", "\n", "panel_radiances = np.array(cap.panel_radiance())\n", "irr_from_panel = math.pi * panel_radiances / panel_reflectance_by_band\n", @@ -234,7 +226,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -248,7 +240,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/Panels.ipynb b/Panels.ipynb index 6568f554..b519a690 100644 --- a/Panels.ipynb +++ b/Panels.ipynb @@ -31,8 +31,8 @@ "from micasense.panel import Panel\n", "%matplotlib inline\n", "\n", - "imagePath = os.path.join('.','data','0000SET','000')\n", - "imageName = glob.glob(os.path.join(imagePath,'IMG_0000_1.tif'))[0]\n", + "imagePath = os.path.join('.','data','ALTUM')\n", + "imageName = glob.glob(os.path.join(imagePath,'IMG_0000_4.tif'))[0]\n", "\n", "img = Image(imageName)\n", "panel = Panel(img)\n", @@ -54,11 +54,16 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ - "imagePath = os.path.join('.','data','ALTUM1SET','000')\n", - "imageName = glob.glob(os.path.join(imagePath,'IMG_0000_1.tif'))[0]\n", + "# imagePath = os.path.join('.','data','ALTUM1SET','000')\n", + "# imageName = glob.glob(os.path.join(imagePath,'IMG_0000_1.tif'))[0]\n", + "\n", + "imagePath = os.path.join('.','data','ALTUM')\n", + "imageName = glob.glob(os.path.join(imagePath,'IMG_0000_4.tif'))[0]\n", "\n", "img = Image(imageName)\n", "if img.auto_calibration_image:\n", @@ -90,7 +95,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -104,7 +109,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.12" }, "toc": { "nav_menu": {}, diff --git a/README.md b/README.md index 78b08db0..78c1f9bd 100644 --- a/README.md +++ b/README.md @@ -2,74 +2,108 @@ ## MicaSense RedEdge and Altum Image Processing Tutorials -This repository includes tutorials and examples for processing MicaSense RedEdge and Altum images into usable information using the Python programming language. RedEdge images captured with firmware 2.1.0 (released June 2017) or newer are required. Altum images captured with all firmware versions are supported. Dual-camera (10-band) capture are also included. - -The intended audience is researchers and developers with some software development experience that want to do their own image processing. While a number of commercial tools fully support processing RedEdge data into reflectance maps, there are a number of reasons to process your own data, including controlling the entire radiometric workflow (for academic or publication reasons), pre-processing images to be used in a non-radiometric photogrammetry suite, or processing single sets of images without building a larger map. +This repository includes tutorials and examples for processing MicaSense RedEdge and Altum images into usable +information using the python programming language. RedEdge images captured with firmware 2.1.0 (released June 2017) or +newer are required. Altum images captured with all firmware versions are supported. Dual-camera (10-band) capture are +also included. *As of 2023, RedEdge-P and Altum-PT are also supported in the "v2" notebooks.* Previous notebooks have +been updated to refer to newer images in this repository. + +The intended audience is researchers and developers with some software development experience that want to do their own +image processing. While a number of commercial tools fully support processing MicaSense data into reflectance maps, +there are a number of reasons to process your own data, including controlling the entire radiometric workflow (for +academic or publication reasons), pre-processing images to be used in a non-radiometric photogrammetry suite, or +processing single sets of 5 images without building a larger map. ### What do I need to succeed? -A working knowledge of running Python software on your system and using the command line are both very helpful. We've worked hard to make these tutorials straightforward to run and understand, but the target audience is someone that's looking to learn more about how to process their own imagery and write software to perform more powerful analysis. +A working knowledge of running Python software on your system and using the command line are both very helpful. We've +worked hard to make these tutorials straightforward to run and understand, but the target audience is someone that's +looking to learn more about how to process their own imagery and write software to perform more powerful analysis. -You can start today even if you don't have your own RedEdge or Altum. We provide example images, including full flight datasets. +You can start today even if you don't have your own RedEdge or Altum. We provide example images, including full flight +datasets. -For a user of RedEdge or Altum that wants a turnkey processing solution, this repository probably is not the best place to start. Instead, consider one of the MicaSense processing partners who provide turnkey software for processing and analysis. +For a user of RedEdge or Altum that wants a turnkey processing solution, this repository probably is not the best place +to start. Instead consider one of the MicaSense processing partners who provide turnkey software for processing and +analysis. -### How do I get set up? +### Tutorial Articles -First, [check out the setup tutorial](https://micasense.github.io/imageprocessing/MicaSense%20Image%20Processing%20Setup.html) which will walk you through installing and checking the necessary tools to run the remaining tutorials. +[Click here to view the tutorial articles](https://micasense.github.io/imageprocessing/index.html). The set of example +notebooks and their outputs can be viewed in your browser without downloading anything or running any code. -Next, [click here to view the tutorial articles](https://micasense.github.io/imageprocessing/index.html). The set of example notebooks and their outputs can be viewed in your browser without downloading anything or running any code. +### How do I get set up? -For a quick start, make sure you have [git](https://git-scm.com/downloads), [git-lfs](https://git-lfs.github.com/), and [Anaconda](https://www.anaconda.com/) installed. +First you'll need to install [git](https://git-scm.com/downloads) and [git-lfs](https://git-lfs.github.com/). Install +both before running `git clone` or you may have issues with the example data files included. -And then: -``` -git clone https://github.com/micasense/imageprocessing -cd imageprocessing -conda env create -f micasense_conda_env.yml # or pip install . -conda activate micasense -jupyter notebook . -``` +Next, `git clone` this repository, as it has all the code and examples you'll need. + +Once you have git installed and the repository cloned, you are ready to start with the first tutorial. Check out +the [setup tutorial](https://micasense.github.io/imageprocessing/MicaSense%20Image%20Processing%20Setup.html) which will +walk through installing and checking the necessary tools to run the remaining tutorials. ### MicaSense Library Usage -In addition to the tutorials, we've created library code that shows some common transformations, usages, and applications of RedEdge and Altum imagery. In general, these are intended for developers that are familiar with installing and managing python packages and third party software. The purpose of this code is readability and clarity to help others develop processing workflows, therefore performance may not be optimal. +In addition to the tutorials, we've created library code that shows some common transformations, usages, and +applications of RedEdge imagery. In general, these are intended for developers that are familiar with installing and +managing python packages and third party software. The purpose of this code is readability and clarity to help others +develop processing workflows, therefore performance may not be optimal. -While this code is similar to an installable Python library (and supports the `python setup.py install` process) the main purpose of this library is one of documentation and education. For this reason, we expect most users to be looking at the source code for understanding or improvement, so they will run the notebooks from the directory that the library was `git clone`d it into. +While this code is similar to an installable python library (and supports the `python setup.py install` process) the +main purpose of this library is one of documentation and education. For this reason, we expect most users to be looking +at the source code for understanding or improvement, so they will run the notebooks from the directory that the library +was `git clone`d it into. ### Running this code -The code in these tutorials consists of two parts. First, the tutorials generally end in `.ipynb` and are the Jupyter notebooks that were used to create the web page tutorials linked above. You can run this code by opening a terminal/iTerm (Linux/macOS) or Anaconda Command Prompt (Windows), navigating to the folder you cloned the git repository into, and running +The code in these tutorials consists of two parts. First, the tutorials generally end in `.ipynb` and are the Jupyter +notebooks that were used to create the web page tutorials linked above. You can run this code by opening a +terminal/iTerm (linux/mac) or Anaconda Command Prompt (Windows), navigating to the folder you cloned the git repository +into, and running ```bash jupyter notebook . ``` -That command should open a web browser window showing the set of files and folder in the repository. Click the `...Setup.ipynb` notebook to get started. +That command should open a web browser window showing the set of files and folder in the repository. Click +the `...Setup.ipynb` notebook to get started. -Second, a set of helper utilities is available in the `micasense` folder that can be used both with these tutorials as well as separtely. +Second, a set of helper utilities is available in the `micasense` folder that can be used both with these tutorials as +well as separtely. -Note that some of the hyperlinks in the notebooks may give you a 404 Not Found error. This is because the links are setup to allow the list of files above to be accessed on the github.io site. When running the notebooks, use your jupyter "home" tab to open the different notebooks. +Note that some of the hyperlinks in the notebooks may give you a 404 Not Found error. This is because the links are +setup to allow the list of files above to be accessed on the github.io site. When running the notebooks, use your +jupyter "home" tab to open the different notebooks. ### Contribution guidelines -Find a problem with the tutorial? Please look through the existing issues (open and closed) and if it's new, [create an issue on github](https://github.com/micasense/imageprocessing/issues). +Find a problem with the tutorial? Please look through the existing issues (open and closed) and if it's +new, [create an issue on github](https://github.com/micasense/imageprocessing/issues). -Want to correct an issue or expand library functionality? Fork the repository, make your fix, and submit a pull request on github. +Want to correct an issue or expand library functionality? Fork the repository, make your fix, and submit a pull request +on github. -Have a question? Please double-check that you're able to run the setup notebook successfully, and resolve any issues with that first. If you're pulling newer code, it may be necessary in some cases to delete and re-create your `micasense` conda environment to make sure you have all of the expected packages. +Have a question? Please double-check that you're able to run the setup notebook successfully, and resolve any issues +with that first. If you're pulling newer code, it may be necessary in some cases to delete and re-create +your `micasense` conda environment to make sure you have all of the expected packages. -This code is a community effort and is not supported by MicaSense support. Please don't reach out to MicaSense support for issues with this codebase; instead, work through the above troubleshooting steps and then [create an issue on github](https://github.com/micasense/imageprocessing/issues). +This code is a community effort and is not supported by MicaSense support. Please don't reach out to MicaSense support +for issues with this codebase; instead, work through the above troubleshooting steps and +then [create an issue on github](https://github.com/micasense/imageprocessing/issues). ### Tests -Tests for many library functions are included in the `tests` diretory. Install the `pytest` module through your package manager (e.g. `pip install pytest`) and then tests can be run from the main directory using the command: +Tests for many library functions are included in the `tests` diretory. Install the `pytest` module through your package +manager (e.g. `pip install pytest`) and then tests can be run from the main directory using the command: ```bash pytest ``` -Test execution can be relatively slow (2-3 minutes) as there is a lot of image processing occuring in some of the tests, and quite a bit of re-used IO. To speed up tests, install the `pytest-xdist` plugin using `conda` or `pip` and achieve a significant speed up by running tests in parallel. +Test execution can be relatively slow (2-3 minutes) as there is a lot of image processing occuring in some of the tests, +and quite a bit of re-used IO. To speed up tests, install the `pytest-xdist` plugin using `conda` or `pip` and achieve a +significant speed up by running tests in parallel. ```bash pytest -n auto @@ -77,7 +111,7 @@ pytest -n auto Data used by the tests is included in the `data` folder. -### For (Tutorial) Developers +### For (Tutorial) Developers To generate the HTML pages after updating the jupyter notebooks, run the following command in the repository directory: @@ -91,8 +125,15 @@ The MIT License (MIT) Copyright (c) 2017-2019 MicaSense, Inc. -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/batch_processing_script.py b/batch_processing_script.py new file mode 100644 index 00000000..7708a946 --- /dev/null +++ b/batch_processing_script.py @@ -0,0 +1,166 @@ +import argparse +import os +import time +from pathlib import Path + +import numpy as np +import pandas as pd +from mapboxgl.utils import df_to_geojson +from skimage.transform import ProjectiveTransform + +import micasense.capture as capture +import micasense.imageset as imageset + +parser = argparse.ArgumentParser( + prog='MicaSenseBatchProcessing', + description='Create aligned, radiometrically corrected image stacks from raw MicaSense imagery', + epilog='epilog' +) + +parser.add_argument('--imagepath', required=True, type=Path) +parser.add_argument('--outputpath', required=True, type=Path) +parser.add_argument('--panelpath', required=True, nargs='*') +parser.add_argument('--alignmentimage') + +args = parser.parse_args() + +print(args.imagepath) +print(args.panelpath) + +pan_sharpen = True + +use_dls = True + +image_path = args.imagepath + +# these will return lists of image paths as strings +# panelNames = list(imagePath.glob('IMG_0000_*.tif')) +# panelNames = [x.as_posix() for x in panelNames] + +panel_names = args.panelpath +panelCap = capture.Capture.from_filelist(panel_names) + +# destinations on your computer to put the stacks +# and RGB thumbnails +outputPath = args.outputpath.resolve().as_posix() +print(outputPath) +thumbnailPath = args.outputpath / 'thumbnails' +thumbnailPath = thumbnailPath.resolve().as_posix() +print(thumbnailPath) + +cam_model = panelCap.camera_model +cam_serial = panelCap.camera_serial + +# determine if this sensor has a panchromatic band +if cam_model == 'RedEdge-P' or cam_model == 'Altum-PT': + panchro_cam = True +else: + panchro_cam = False + pan_sharpen = False + +# if this is a multicamera system like the RedEdge-MX Dual, +# we can combine the two serial numbers to help identify +# this camera system later. +if len(panelCap.camera_serials) > 1: + cam_serial = "_".join(panelCap.camera_serials) + print("Serial number:", cam_serial) +else: + cam_serial = panelCap.camera_serial + print("Serial number:", cam_serial) + +overwrite = False # can be set to False to continue interrupted processing +generateThumbnails = True + +# Allow this code to align both radiance and reflectance images; but excluding +# a definition for panelNames above, radiance images will be used +# For panel images, efforts will be made to automatically extract the panel information +# but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance +# will need to be set in the panel_reflectance_by_band variable. +# Note: radiance images will not be used to properly create NDVI/NDRE images below. +if panel_names is not None: + panelCap = capture.Capture.from_filelist(panel_names) +else: + panelCap = None + +if panelCap is not None: + if panelCap.panel_albedo() is not None and not any(v is None for v in panelCap.panel_albedo()): + panel_reflectance_by_band = panelCap.panel_albedo() + else: + panel_reflectance_by_band = [0.49] * len(panelCap.eo_band_names()) # RedEdge band_index order + + panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) + img_type = "reflectance" +else: + if use_dls: + img_type = 'reflectance' + else: + img_type = "radiance" + +imgset = imageset.ImageSet.from_directory(image_path) + +data, columns = imgset.as_nested_lists() +df = pd.DataFrame.from_records(data, index='timestamp', columns=columns) + +geojson_data = df_to_geojson(df, columns[3:], lat='latitude', lon='longitude') + +if panchro_cam: + warp_matrices_filename = cam_serial + "_warp_matrices_SIFT.npy" +else: + warp_matrices_filename = cam_serial + "_warp_matrices_opencv.npy" + +if Path('./' + warp_matrices_filename).is_file(): + print("Found existing warp matrices for camera", cam_serial) + load_warp_matrices = np.load(warp_matrices_filename, allow_pickle=True) + loaded_warp_matrices = [] + for matrix in load_warp_matrices: + if panchro_cam: + transform = ProjectiveTransform(matrix=matrix.astype('float64')) + loaded_warp_matrices.append(transform) + else: + loaded_warp_matrices.append(matrix.astype('float32')) + + if panchro_cam: + warp_matrices_SIFT = loaded_warp_matrices + else: + warp_matrices = loaded_warp_matrices + print("Loaded warp matrices from", Path('./' + warp_matrices_filename).resolve()) +else: + print("No warp matrices found at expected location:", warp_matrices_filename) + +if not os.path.exists(outputPath): + os.makedirs(outputPath) +if generateThumbnails and not os.path.exists(thumbnailPath): + os.makedirs(thumbnailPath) + +# Save out geojson data, so we can open the image capture locations in our GIS +with open(os.path.join(outputPath, 'imageSet.json'), 'w') as f: + f.write(str(geojson_data)) + +try: + irradiance = panel_irradiance + [0] +except NameError: + irradiance = None + +start = time.time() +for i, capture in enumerate(imgset.captures): + outputFilename = capture.uuid + '.tif' + thumbnailFilename = capture.uuid + '.jpg' + fullOutputPath = os.path.join(outputPath, outputFilename) + fullThumbnailPath = os.path.join(thumbnailPath, thumbnailFilename) + if (not os.path.exists(fullOutputPath)) or overwrite: + if (len(capture.images) == len(imgset.captures[0].images)): + if panchro_cam: + capture.radiometric_pan_sharpened_aligned_capture(warp_matrices=warp_matrices_SIFT, + irradiance_list=irradiance) + else: + capture.create_aligned_capture(irradiance_list=irradiance, warp_matrices=warp_matrices) + capture.save_capture_as_stack(fullOutputPath, pansharpen=pan_sharpen, sort_by_wavelength=False) + if generateThumbnails: + capture.save_capture_as_rgb(fullThumbnailPath) + current = time.time() + diff = current - start + print("Saved stack", str(i), "of", str(len(imgset.captures)), "in", str(int(diff)), "seconds", end="\r") + capture.clear_image_data() +end = time.time() + +print("Saving time:", end - start) diff --git a/data/0000SET/000/IMG_0000_1.tif b/data/0000SET/000/IMG_0000_1.tif deleted file mode 100644 index c64d7a57..00000000 --- a/data/0000SET/000/IMG_0000_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:248f42023cda80cae83944027b96d284d9b5f4573892dc597e99677ffe472eef -size 2465274 diff --git a/data/0000SET/000/IMG_0000_2.tif b/data/0000SET/000/IMG_0000_2.tif deleted file mode 100644 index a347f9a1..00000000 --- a/data/0000SET/000/IMG_0000_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e521e805839c129ea6b23c98e785a9f63486db042b05e6fdc9c248723909ecea -size 2465278 diff --git a/data/0000SET/000/IMG_0000_3.tif b/data/0000SET/000/IMG_0000_3.tif deleted file mode 100644 index e7625663..00000000 --- a/data/0000SET/000/IMG_0000_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:110d36022643142501a04f8ceac1309abf43bd03fe39cde9ee0101676986e0eb -size 2465274 diff --git a/data/0000SET/000/IMG_0000_4.tif b/data/0000SET/000/IMG_0000_4.tif deleted file mode 100644 index 8d691162..00000000 --- a/data/0000SET/000/IMG_0000_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:488fcc857be9583e69f533c010a82eee7e96dc690e266678f896f78f8d4e84ea -size 2465274 diff --git a/data/0000SET/000/IMG_0000_5.tif b/data/0000SET/000/IMG_0000_5.tif deleted file mode 100644 index 1d2d4c56..00000000 --- a/data/0000SET/000/IMG_0000_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:47d7a1a0c2eb289ce8ee1069c861e2a9838c7441d5038d6e4c9c9e37af1299fc -size 2465280 diff --git a/data/0000SET/000/IMG_0001_1.tif b/data/0000SET/000/IMG_0001_1.tif deleted file mode 100644 index 56d43656..00000000 --- a/data/0000SET/000/IMG_0001_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ee210953eefc85e3551b143f91b65c058743afdf4a2ff5a8cac98350cb8b7c8e -size 2465280 diff --git a/data/0000SET/000/IMG_0001_2.tif b/data/0000SET/000/IMG_0001_2.tif deleted file mode 100644 index 19d2f54b..00000000 --- a/data/0000SET/000/IMG_0001_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d1b25a7317b45ae5d64c8111048faac74ed77ddb2b29533b3b50218a6c1eccac -size 2465282 diff --git a/data/0000SET/000/IMG_0001_3.tif b/data/0000SET/000/IMG_0001_3.tif deleted file mode 100644 index d0564132..00000000 --- a/data/0000SET/000/IMG_0001_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:048230b498e5f8d8e68935e4ecc64de99a3899b7900e721dcd876034fdd0ad93 -size 2465276 diff --git a/data/0000SET/000/IMG_0001_4.tif b/data/0000SET/000/IMG_0001_4.tif deleted file mode 100644 index 4ee5e1aa..00000000 --- a/data/0000SET/000/IMG_0001_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3581de2646fcf28b42a059551a1938ddda3ba744f954d6246748a4af85e8ecf2 -size 2465278 diff --git a/data/0000SET/000/IMG_0001_5.tif b/data/0000SET/000/IMG_0001_5.tif deleted file mode 100644 index ebd96c48..00000000 --- a/data/0000SET/000/IMG_0001_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6240d3c9fd8e89ee57c6d06ff837a38b40e12c7a3456dc19ac6bbcb5bf4bd833 -size 2465282 diff --git a/data/0001SET/000/IMG_0002_4.tif b/data/0001SET/000/IMG_0002_4.tif deleted file mode 100755 index 732f64f4..00000000 --- a/data/0001SET/000/IMG_0002_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1187b500f0196ce91e3b88a110e154b848a68d2a29bae2d9f69a30bc968bc88e -size 2465342 diff --git a/data/0001SET/000/IMG_0003_1.tif b/data/0001SET/000/IMG_0003_1.tif deleted file mode 100755 index 83c26d36..00000000 --- a/data/0001SET/000/IMG_0003_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ad25ef1f833b633a6652ed798d324efa1b7220569f71dc2419bcfe23108c2a4b -size 2464016 diff --git a/data/0002SET/000/IMG_0000_1.tif b/data/0002SET/000/IMG_0000_1.tif deleted file mode 100644 index b2128f10..00000000 --- a/data/0002SET/000/IMG_0000_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e0ef38734a94aae45347f96512d1d084d470956e805c0907a8d7aa659d289ce6 -size 2466318 diff --git a/data/0002SET/000/IMG_0000_2.tif b/data/0002SET/000/IMG_0000_2.tif deleted file mode 100644 index 421c52e0..00000000 --- a/data/0002SET/000/IMG_0000_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aa00bc26b2037d67f1de7c4618b9d3ac6fc1bea5e22851dcf13a3cdcdad8a400 -size 2466308 diff --git a/data/0002SET/000/IMG_0000_3.tif b/data/0002SET/000/IMG_0000_3.tif deleted file mode 100644 index b4915698..00000000 --- a/data/0002SET/000/IMG_0000_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e41e98bea9994eff7959f4e93c134f0185f8287e2cda1bd19493528de3610b6f -size 2466300 diff --git a/data/0002SET/000/IMG_0000_4.tif b/data/0002SET/000/IMG_0000_4.tif deleted file mode 100644 index 0e2702e2..00000000 --- a/data/0002SET/000/IMG_0000_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:008b19f85db896219d662eb88434103e8393af09e43fbbe00f0be0fa40e848b8 -size 2466336 diff --git a/data/0002SET/000/IMG_0000_5.tif b/data/0002SET/000/IMG_0000_5.tif deleted file mode 100644 index df248a6b..00000000 --- a/data/0002SET/000/IMG_0000_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1c149e3bc4b0edc16c351c46922aec0aa42c14a820e00de818edf7c9a6252169 -size 2466332 diff --git a/data/10BANDSET/000/IMG_0000_1.tif b/data/10BANDSET/000/IMG_0000_1.tif deleted file mode 100644 index 6aed3457..00000000 --- a/data/10BANDSET/000/IMG_0000_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a40134121e1b10440279b37da6f489a70046ee8bb1b570ab32e486882f2b3848 -size 2466196 diff --git a/data/10BANDSET/000/IMG_0000_10.tif b/data/10BANDSET/000/IMG_0000_10.tif deleted file mode 100644 index 7f6a9b04..00000000 --- a/data/10BANDSET/000/IMG_0000_10.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:600a305204cbab2b7fe0dc5c6398ee68e8c7b77605aa4de3d300124ff09d30de -size 2465694 diff --git a/data/10BANDSET/000/IMG_0000_2.tif b/data/10BANDSET/000/IMG_0000_2.tif deleted file mode 100644 index 8a6c1347..00000000 --- a/data/10BANDSET/000/IMG_0000_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:72991a75c5e0804744cb16b97ec88da24391602203e19dd0a05140b059ef2694 -size 2466188 diff --git a/data/10BANDSET/000/IMG_0000_3.tif b/data/10BANDSET/000/IMG_0000_3.tif deleted file mode 100644 index 5026afcc..00000000 --- a/data/10BANDSET/000/IMG_0000_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2c5bfcf7596d76a853ff5e945fa83b98713f03d1e4ec61f0a693c42c81cf9a17 -size 2466188 diff --git a/data/10BANDSET/000/IMG_0000_4.tif b/data/10BANDSET/000/IMG_0000_4.tif deleted file mode 100644 index ce9b1242..00000000 --- a/data/10BANDSET/000/IMG_0000_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6dd4f58759ef4b2627728eb4c28adca711ece017223f8cca64eb6bfd2940cc5f -size 2466196 diff --git a/data/10BANDSET/000/IMG_0000_5.tif b/data/10BANDSET/000/IMG_0000_5.tif deleted file mode 100644 index e2ea0e58..00000000 --- a/data/10BANDSET/000/IMG_0000_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8f6d0e3802c78a4ce5e107f26c97cc4ad1e5fe6a87c78955a4fb41e19382f735 -size 2466198 diff --git a/data/10BANDSET/000/IMG_0000_6.tif b/data/10BANDSET/000/IMG_0000_6.tif deleted file mode 100644 index 0ddb9965..00000000 --- a/data/10BANDSET/000/IMG_0000_6.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1e37f9786c8a51f313a6437e6ddd3fcee6f198bedeaf53ffb53cb4210ca6b1b0 -size 2465698 diff --git a/data/10BANDSET/000/IMG_0000_7.tif b/data/10BANDSET/000/IMG_0000_7.tif deleted file mode 100644 index 6cba9be3..00000000 --- a/data/10BANDSET/000/IMG_0000_7.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:493bdd84bba111dd35ddc094d620c3acc01af24907a65191b0625c237352accf -size 2465714 diff --git a/data/10BANDSET/000/IMG_0000_8.tif b/data/10BANDSET/000/IMG_0000_8.tif deleted file mode 100644 index 52440229..00000000 --- a/data/10BANDSET/000/IMG_0000_8.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:69387338b89dfab6723f0b5d017087fa566697563a7691d51527a80f93165935 -size 2465648 diff --git a/data/10BANDSET/000/IMG_0000_9.tif b/data/10BANDSET/000/IMG_0000_9.tif deleted file mode 100644 index f45e9a6d..00000000 --- a/data/10BANDSET/000/IMG_0000_9.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:500d7d89c4621373b6c7ca8a4779b4171f505da41543c301d49cf1d5a50157cd -size 2465674 diff --git a/data/10BANDSET/000/IMG_0431_1.tif b/data/10BANDSET/000/IMG_0431_1.tif deleted file mode 100644 index a53a4b3a..00000000 --- a/data/10BANDSET/000/IMG_0431_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:20ae60bea30d14fcaa51b6ed6e729f5e4e47ecb9540e22d5c9ace3d3faa96074 -size 2465850 diff --git a/data/10BANDSET/000/IMG_0431_10.tif b/data/10BANDSET/000/IMG_0431_10.tif deleted file mode 100644 index 6555f46a..00000000 --- a/data/10BANDSET/000/IMG_0431_10.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1a21d4c66d8c0630bac9d959298cd3f9e29393d57c80a82715c52eee43247e3c -size 2465332 diff --git a/data/10BANDSET/000/IMG_0431_2.tif b/data/10BANDSET/000/IMG_0431_2.tif deleted file mode 100644 index 7ec56290..00000000 --- a/data/10BANDSET/000/IMG_0431_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ac3b8d2d9a45f27e18c85db103cc95558e1deb03be813d25b3a210700c30a506 -size 2465830 diff --git a/data/10BANDSET/000/IMG_0431_3.tif b/data/10BANDSET/000/IMG_0431_3.tif deleted file mode 100644 index c80d0651..00000000 --- a/data/10BANDSET/000/IMG_0431_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:74008850326fc703f54ca46b227e8f106c461e966c9f8e963380cac3cc667ae2 -size 2465832 diff --git a/data/10BANDSET/000/IMG_0431_4.tif b/data/10BANDSET/000/IMG_0431_4.tif deleted file mode 100644 index 7c930105..00000000 --- a/data/10BANDSET/000/IMG_0431_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:56dbbb7dddb15c10f2ed8beef4e5896be69bfd726d2f235ab5ab26485ed1c0e8 -size 2465840 diff --git a/data/10BANDSET/000/IMG_0431_5.tif b/data/10BANDSET/000/IMG_0431_5.tif deleted file mode 100644 index 9ce7c07f..00000000 --- a/data/10BANDSET/000/IMG_0431_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8cd66d530bdf11b68cc41d1caddfdba8abf4c4ff31f36da9ae5971f335e17989 -size 2465844 diff --git a/data/10BANDSET/000/IMG_0431_6.tif b/data/10BANDSET/000/IMG_0431_6.tif deleted file mode 100644 index 2d5bc074..00000000 --- a/data/10BANDSET/000/IMG_0431_6.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3d2e89fbea0515aca8c49302e889064981baa418f38f9910e9ba184707bb2e01 -size 2465338 diff --git a/data/10BANDSET/000/IMG_0431_7.tif b/data/10BANDSET/000/IMG_0431_7.tif deleted file mode 100644 index 73ca853d..00000000 --- a/data/10BANDSET/000/IMG_0431_7.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ba4699bfd1dd07946b7d87da81b6fb0bcfa9dd03e1233e5f47e8e3c9890de26c -size 2465350 diff --git a/data/10BANDSET/000/IMG_0431_8.tif b/data/10BANDSET/000/IMG_0431_8.tif deleted file mode 100644 index 73fe1fe0..00000000 --- a/data/10BANDSET/000/IMG_0431_8.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:24f6f8657e2ef4b4589764044eecc608992bfe5950f984bd7cb27c1346678382 -size 2465286 diff --git a/data/10BANDSET/000/IMG_0431_9.tif b/data/10BANDSET/000/IMG_0431_9.tif deleted file mode 100644 index d660227b..00000000 --- a/data/10BANDSET/000/IMG_0431_9.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0fb1196332670b30083c6bf468fa79e4fb15eaf0f7af5c24c7d9db9656b8818c -size 2465314 diff --git a/data/ALTUM-PT/IMG_0000_1.tif b/data/ALTUM-PT/IMG_0000_1.tif new file mode 100644 index 00000000..a26a6554 --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af3a4a1334f25d54c3d4d4d33bdc90c01729a3c777ecc119477e5893c9d3fa08 +size 4789586 diff --git a/data/ALTUM-PT/IMG_0000_2.tif b/data/ALTUM-PT/IMG_0000_2.tif new file mode 100644 index 00000000..002327a3 --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ac873f5037e43991899f28414ae67e92d770c7760a6f16c45f02113a6a9b640 +size 4789522 diff --git a/data/ALTUM-PT/IMG_0000_3.tif b/data/ALTUM-PT/IMG_0000_3.tif new file mode 100644 index 00000000..50eb8d1f --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd75de6a4e7f58b9c6a32ad72996a76787d02f07e33a43cfe3be5162844ff1db +size 4789584 diff --git a/data/ALTUM-PT/IMG_0000_4.tif b/data/ALTUM-PT/IMG_0000_4.tif new file mode 100644 index 00000000..6df4458d --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a7c41d538c42a6b84ddd1851a3a5e3baf0fd33858e6273882bab774b350aa82 +size 4789578 diff --git a/data/ALTUM-PT/IMG_0000_5.tif b/data/ALTUM-PT/IMG_0000_5.tif new file mode 100644 index 00000000..3c73f404 --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99400536205e381e1bcfb0bd79919d9e318155ca4931cbb6bec95bb5cb15b98f +size 4789600 diff --git a/data/ALTUM-PT/IMG_0000_6.tif b/data/ALTUM-PT/IMG_0000_6.tif new file mode 100644 index 00000000..cf164dfb --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f070aa47a43527c3bef8e02e67c794d01d34cc7210fa3af3c57ab1eb29e01e73 +size 18563244 diff --git a/data/ALTUM-PT/IMG_0000_7.tif b/data/ALTUM-PT/IMG_0000_7.tif new file mode 100644 index 00000000..aa3f9efa --- /dev/null +++ b/data/ALTUM-PT/IMG_0000_7.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b438220325849cb552a2ea4d8725d54f917a4665158a0e8859f3be6c1ae73bbc +size 171482 diff --git a/data/ALTUM-PT/IMG_0010_1.tif b/data/ALTUM-PT/IMG_0010_1.tif new file mode 100644 index 00000000..a4fe7245 --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d0945f10238129a60535bf7e30d91f3797427022168f24faee061219941d525 +size 4789258 diff --git a/data/ALTUM-PT/IMG_0010_2.tif b/data/ALTUM-PT/IMG_0010_2.tif new file mode 100644 index 00000000..c38be52f --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78da4cb67a0a57382a676df5c6627f06eb3e83f31266cf1bfeb6a77cbfeb9f03 +size 4789180 diff --git a/data/ALTUM-PT/IMG_0010_3.tif b/data/ALTUM-PT/IMG_0010_3.tif new file mode 100644 index 00000000..55f724f5 --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d4a8c7affd562d948ee1a3affd12ee4e92c3ec2edda7a1ea279dd3dfbe9774f +size 4789244 diff --git a/data/ALTUM-PT/IMG_0010_4.tif b/data/ALTUM-PT/IMG_0010_4.tif new file mode 100644 index 00000000..c6bdc5c4 --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:604c969b0369fe25afbcff4f70d9924fa7a199e8a01c599f481e4f60e6517094 +size 4789236 diff --git a/data/ALTUM-PT/IMG_0010_5.tif b/data/ALTUM-PT/IMG_0010_5.tif new file mode 100644 index 00000000..4c18f25c --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e59be8cae1b85deae167d4c39dc9e2ef66e6c9fcc1bd42a1cd7a19b029245d26 +size 4789246 diff --git a/data/ALTUM-PT/IMG_0010_6.tif b/data/ALTUM-PT/IMG_0010_6.tif new file mode 100644 index 00000000..00251f46 --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebadf851d7d8c6229ddfd8e404ada1b04aa13d13a1142220a889c1580889344e +size 18562896 diff --git a/data/ALTUM-PT/IMG_0010_7.tif b/data/ALTUM-PT/IMG_0010_7.tif new file mode 100644 index 00000000..04695827 --- /dev/null +++ b/data/ALTUM-PT/IMG_0010_7.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf37fab62036cc1a806e01e36cbcf4936f3583cb87eed3fb42d1fd0ec9f89ca9 +size 171484 diff --git a/data/ALTUM/IMG_0000_1.tif b/data/ALTUM/IMG_0000_1.tif new file mode 100644 index 00000000..f367f598 --- /dev/null +++ b/data/ALTUM/IMG_0000_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31ce49bd918828d6bcebb506625e81a6679865693fca23ed8e469d8f8ddc626a +size 6382434 diff --git a/data/ALTUM/IMG_0000_2.tif b/data/ALTUM/IMG_0000_2.tif new file mode 100644 index 00000000..70abb167 --- /dev/null +++ b/data/ALTUM/IMG_0000_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad12ca29a45e880952b7dbe9b4f680faac0c6495927f0428eac0966d4aed8f0b +size 6382074 diff --git a/data/ALTUM/IMG_0000_3.tif b/data/ALTUM/IMG_0000_3.tif new file mode 100644 index 00000000..9b6a2d4e --- /dev/null +++ b/data/ALTUM/IMG_0000_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbf2f2a8a9efb3dc9369b39841c0121e71474021a89a251a8c4d9b9ca5c3f435 +size 6382466 diff --git a/data/ALTUM/IMG_0000_4.tif b/data/ALTUM/IMG_0000_4.tif new file mode 100644 index 00000000..d97df2de --- /dev/null +++ b/data/ALTUM/IMG_0000_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfcfd9d3c19428b893bc29918bb3fb745ef907532c1dcd723465c2bb3dc0ca31 +size 6382454 diff --git a/data/ALTUM/IMG_0000_5.tif b/data/ALTUM/IMG_0000_5.tif new file mode 100644 index 00000000..ac72b69f --- /dev/null +++ b/data/ALTUM/IMG_0000_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04295e6e0df92370a34ff089e002b8a81d8ff09ef9a4926d499d786931eba11b +size 6382488 diff --git a/data/ALTUM/IMG_0000_6.tif b/data/ALTUM/IMG_0000_6.tif new file mode 100644 index 00000000..057d2960 --- /dev/null +++ b/data/ALTUM/IMG_0000_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a5eed0f544ff0146cfc138a9f65a85c76d65e68b4d728388f71c284f7c1ea72 +size 45114 diff --git a/data/ALTUM/IMG_0021_1.tif b/data/ALTUM/IMG_0021_1.tif new file mode 100644 index 00000000..004fa2c9 --- /dev/null +++ b/data/ALTUM/IMG_0021_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639999a7d2c6235924f6046c639d7aaf775bfb913b5917a43c31f7cc896d4ce2 +size 6382262 diff --git a/data/ALTUM/IMG_0021_2.tif b/data/ALTUM/IMG_0021_2.tif new file mode 100644 index 00000000..4f02dc2d --- /dev/null +++ b/data/ALTUM/IMG_0021_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba160df1469ae3264e4a2f9b1751b01dc49724a28d1ebf4f2881945e66fbf2cf +size 6382262 diff --git a/data/ALTUM/IMG_0021_3.tif b/data/ALTUM/IMG_0021_3.tif new file mode 100644 index 00000000..ae157a9c --- /dev/null +++ b/data/ALTUM/IMG_0021_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71f8ec3867f31a1867c5940844626f204556c3f183eb2ed32f7c810a013bc606 +size 6382268 diff --git a/data/ALTUM/IMG_0021_4.tif b/data/ALTUM/IMG_0021_4.tif new file mode 100644 index 00000000..bf5fdc45 --- /dev/null +++ b/data/ALTUM/IMG_0021_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:088a03a108e982d2d4f9124069703ed4720c19e73dcdb85bc5fa25178535121f +size 6382272 diff --git a/data/ALTUM/IMG_0021_5.tif b/data/ALTUM/IMG_0021_5.tif new file mode 100644 index 00000000..3f95d3a6 --- /dev/null +++ b/data/ALTUM/IMG_0021_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c66dfa9b4651541da3be577e46ee0d0175f9b7e5e536a3ba3f18341b13644f93 +size 6382306 diff --git a/data/ALTUM/IMG_0021_6.tif b/data/ALTUM/IMG_0021_6.tif new file mode 100644 index 00000000..7daa7bd6 --- /dev/null +++ b/data/ALTUM/IMG_0021_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fe2dc5ad9548d15e39eadcdfe1da76e5ecba9ecfe130a0796e78cf7bcc903fb +size 45306 diff --git a/data/ALTUM0SET/000/IMG_0000_1.tif b/data/ALTUM0SET/000/IMG_0000_1.tif deleted file mode 100644 index 8511c057..00000000 --- a/data/ALTUM0SET/000/IMG_0000_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3410bf21c9fd553937f5e0cd87e57fe7c145b1b7e3dbe8ccf4d4ee0063cc2d3b -size 6381820 diff --git a/data/ALTUM1SET/000/IMG_0000_1.tif b/data/ALTUM1SET/000/IMG_0000_1.tif deleted file mode 100644 index 77dbd14c..00000000 --- a/data/ALTUM1SET/000/IMG_0000_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9a2abd9511ec95130dc1550dae07f49fa350a26db98d8596ec245a2a5a036e17 -size 6382362 diff --git a/data/ALTUM1SET/000/IMG_0000_2.tif b/data/ALTUM1SET/000/IMG_0000_2.tif deleted file mode 100644 index af4d833c..00000000 --- a/data/ALTUM1SET/000/IMG_0000_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:02ae3e49b8a7c04537e6e8a73e574cbe0f1a5d2ac429b7afb06f4ffbfebaf6d9 -size 6382386 diff --git a/data/ALTUM1SET/000/IMG_0000_3.tif b/data/ALTUM1SET/000/IMG_0000_3.tif deleted file mode 100644 index c130c9a2..00000000 --- a/data/ALTUM1SET/000/IMG_0000_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bf09f520f8c79c5e4dd946cb62c4ae75ef729cb4874d60392aac09923734a2a4 -size 6382404 diff --git a/data/ALTUM1SET/000/IMG_0000_4.tif b/data/ALTUM1SET/000/IMG_0000_4.tif deleted file mode 100644 index 1d1fde24..00000000 --- a/data/ALTUM1SET/000/IMG_0000_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:deb3e1f4af5a03b8335db264cfb86640605b0cff33847545463f57a480a5a5fc -size 6382362 diff --git a/data/ALTUM1SET/000/IMG_0000_5.tif b/data/ALTUM1SET/000/IMG_0000_5.tif deleted file mode 100644 index 1dde1d6d..00000000 --- a/data/ALTUM1SET/000/IMG_0000_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:768cf0987000cdcd7eee684f659ef9e230fe817f14dc682ff18a82c093786315 -size 6382376 diff --git a/data/ALTUM1SET/000/IMG_0000_6.tif b/data/ALTUM1SET/000/IMG_0000_6.tif deleted file mode 100644 index 7876a307..00000000 --- a/data/ALTUM1SET/000/IMG_0000_6.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7ac6cec517accf2618fc57b9096a39d78fa7810b805ea6e75ce05ad3d6fcdeae -size 44730 diff --git a/data/ALTUM1SET/000/IMG_0008_1.tif b/data/ALTUM1SET/000/IMG_0008_1.tif deleted file mode 100644 index 0424f862..00000000 --- a/data/ALTUM1SET/000/IMG_0008_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3df18c9843f0dd4fcf5174a876988d72a977bc3dd9e14f7c69aa28e158fc18f8 -size 6382004 diff --git a/data/ALTUM1SET/000/IMG_0008_2.tif b/data/ALTUM1SET/000/IMG_0008_2.tif deleted file mode 100644 index 109c72ea..00000000 --- a/data/ALTUM1SET/000/IMG_0008_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:29a268948de0a4ca87d27d5ca4e98a8abf6dc8a58ced04f438bd5b9805f98a30 -size 6382002 diff --git a/data/ALTUM1SET/000/IMG_0008_3.tif b/data/ALTUM1SET/000/IMG_0008_3.tif deleted file mode 100644 index 79c2a06c..00000000 --- a/data/ALTUM1SET/000/IMG_0008_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a7da92c38d7722a06c6d301273407ce2a08a72d4d95e0f1f6372ba732dbb3f8f -size 6382034 diff --git a/data/ALTUM1SET/000/IMG_0008_4.tif b/data/ALTUM1SET/000/IMG_0008_4.tif deleted file mode 100644 index 9a66e91d..00000000 --- a/data/ALTUM1SET/000/IMG_0008_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8bb77e029f50a7600f4ff719ab6f8b034a55e19dca617780ddf753d323f16ac8 -size 6381992 diff --git a/data/ALTUM1SET/000/IMG_0008_5.tif b/data/ALTUM1SET/000/IMG_0008_5.tif deleted file mode 100644 index 8714b94c..00000000 --- a/data/ALTUM1SET/000/IMG_0008_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ff8f028694602573268c97e025203596c5feca5435f982826bba1abf1e08b16c -size 6382006 diff --git a/data/ALTUM1SET/000/IMG_0008_6.tif b/data/ALTUM1SET/000/IMG_0008_6.tif deleted file mode 100644 index a2ee8687..00000000 --- a/data/ALTUM1SET/000/IMG_0008_6.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf26fac7549bd740b68ad43c19eb8a65be0360e4391e6762635cbbec7752266b -size 44730 diff --git a/data/ALTUM1SET/000/IMG_0245_1.tif b/data/ALTUM1SET/000/IMG_0245_1.tif deleted file mode 100644 index 3697e4e8..00000000 --- a/data/ALTUM1SET/000/IMG_0245_1.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:35aedd3a3700eedf6ffd5b6c4cd0c6469d4c1ca4801041ca79d9765d87123616 -size 6381990 diff --git a/data/ALTUM1SET/000/IMG_0245_2.tif b/data/ALTUM1SET/000/IMG_0245_2.tif deleted file mode 100644 index 23a12050..00000000 --- a/data/ALTUM1SET/000/IMG_0245_2.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:51cab65288a589b383050a829fba01e027ae66bb3aecbc6926b17882a8601c37 -size 6382002 diff --git a/data/ALTUM1SET/000/IMG_0245_3.tif b/data/ALTUM1SET/000/IMG_0245_3.tif deleted file mode 100644 index 89a8e2e5..00000000 --- a/data/ALTUM1SET/000/IMG_0245_3.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d34872e318624b168882b3180b69f16bb0b625fbf89176f0a49d1255a29de21d -size 6382032 diff --git a/data/ALTUM1SET/000/IMG_0245_4.tif b/data/ALTUM1SET/000/IMG_0245_4.tif deleted file mode 100644 index 51a46e6d..00000000 --- a/data/ALTUM1SET/000/IMG_0245_4.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5cefd019c1f034e0f812f4b933a9f8973ec2df66e093e3c8370d53231479bac8 -size 6381992 diff --git a/data/ALTUM1SET/000/IMG_0245_5.tif b/data/ALTUM1SET/000/IMG_0245_5.tif deleted file mode 100644 index a60d81a4..00000000 --- a/data/ALTUM1SET/000/IMG_0245_5.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:55429bfe492a88a2e9aec9275b422e3f27cf9d9c34d6562c71c57cdca3973099 -size 6382006 diff --git a/data/ALTUM1SET/000/IMG_0245_6.tif b/data/ALTUM1SET/000/IMG_0245_6.tif deleted file mode 100644 index 735be2fb..00000000 --- a/data/ALTUM1SET/000/IMG_0245_6.tif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a85d954314f9fa8dd6dc9175220f030d8f6ffc1c3d4fcf322b79a7fecd6b85f8 -size 44728 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_1.tif b/data/REDEDGE-MX-DUAL/IMG_0001_1.tif new file mode 100644 index 00000000..6051de5c --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d55bb5a42c6435f9654527f016e4fd0ad71455a39965b5de5b78706f758e9e0 +size 1851732 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_10.tif b/data/REDEDGE-MX-DUAL/IMG_0001_10.tif new file mode 100644 index 00000000..70ec2fce --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_10.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1db3d28c890b6c81e87c2992361502489ac1be2589caa5c41396ed33683ed2d8 +size 1851988 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_2.tif b/data/REDEDGE-MX-DUAL/IMG_0001_2.tif new file mode 100644 index 00000000..cbbf56dc --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca9ec643a2919392a5fac5a3e7fa74895f644a61e32b1aa1878724d68d875530 +size 1852100 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_3.tif b/data/REDEDGE-MX-DUAL/IMG_0001_3.tif new file mode 100644 index 00000000..1bdcc0c1 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:999093d38af3cc49fab1d00ada16aaa00b3daefaa12d20a0d15f27a930e9ed16 +size 1852100 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_4.tif b/data/REDEDGE-MX-DUAL/IMG_0001_4.tif new file mode 100644 index 00000000..202c566e --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de28082a709bac306c681c3e050372433047d7ae3b6f22440946f3d14d1699fe +size 1852098 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_5.tif b/data/REDEDGE-MX-DUAL/IMG_0001_5.tif new file mode 100644 index 00000000..9b40b87a --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1203eed0e10eca7e56edf16ae7a3c9bd0ce28290aa0d115d0e1c6ef94f57ac1 +size 1852112 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_6.tif b/data/REDEDGE-MX-DUAL/IMG_0001_6.tif new file mode 100644 index 00000000..4f21b032 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6aa785f7da34b3fa8bf11abb8215d21cf19fc506cb70ee6e3b46c44fd2defe2 +size 1851960 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_7.tif b/data/REDEDGE-MX-DUAL/IMG_0001_7.tif new file mode 100644 index 00000000..ca504aa6 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_7.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc7b4c55cfed4fae67fc45095efb68be9760fc9556910dd011f1b0e411f296be +size 1851982 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_8.tif b/data/REDEDGE-MX-DUAL/IMG_0001_8.tif new file mode 100644 index 00000000..dfdfeaba --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_8.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:572ab297292801c94ed96d59c054ae92c073d64ed3de76bada8bba3819e7474d +size 1852000 diff --git a/data/REDEDGE-MX-DUAL/IMG_0001_9.tif b/data/REDEDGE-MX-DUAL/IMG_0001_9.tif new file mode 100644 index 00000000..8262f70a --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0001_9.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6b1e87456387ea5e396fab33c066154ddd32f4b3e5545ae42cba2a033c824dc +size 1851604 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_1.tif b/data/REDEDGE-MX-DUAL/IMG_0007_1.tif new file mode 100644 index 00000000..d91c76be --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7729ee4c7b5fe1ee39802534ce34b744e6e485a0975bad934eacda116a4a040 +size 1851734 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_10.tif b/data/REDEDGE-MX-DUAL/IMG_0007_10.tif new file mode 100644 index 00000000..bd1590a0 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_10.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:567f5ce79b72242a3f66fa4814b1f1dceaba6b8b7f4bcd1ce7995c7d64fe9c0b +size 1851620 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_2.tif b/data/REDEDGE-MX-DUAL/IMG_0007_2.tif new file mode 100644 index 00000000..144783a4 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34c9c07ded0d5ef7a6c8d2254726f0316bb70424aa69bb8133d5821cf6a9c8cb +size 1851736 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_3.tif b/data/REDEDGE-MX-DUAL/IMG_0007_3.tif new file mode 100644 index 00000000..4ffda4c1 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a65d5a25e379833e1e09ff3953d1b7194f53653d0e96f4adaa6daef048d30fa +size 1851738 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_4.tif b/data/REDEDGE-MX-DUAL/IMG_0007_4.tif new file mode 100644 index 00000000..c75a06eb --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a33bbaf9e42289e9522ae3c58d0028fd45132d25591ce6193202d7ec65ca548 +size 1851732 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_5.tif b/data/REDEDGE-MX-DUAL/IMG_0007_5.tif new file mode 100644 index 00000000..404f13f4 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3bd64bb63e457ca1031081c2c31a7db8691e85ab176cfaac48f574ff3ce762d +size 1851746 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_6.tif b/data/REDEDGE-MX-DUAL/IMG_0007_6.tif new file mode 100644 index 00000000..de9af805 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53f063db3c86324b4e5a3d83b6c006b2cb76ccad68676d8807acc3b521516399 +size 1851604 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_7.tif b/data/REDEDGE-MX-DUAL/IMG_0007_7.tif new file mode 100644 index 00000000..425a7dcc --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_7.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6a20328e24cbc07f6ace785f5cf1801b6c7d0e5263b233d6ca0db2e605a001f +size 1851610 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_8.tif b/data/REDEDGE-MX-DUAL/IMG_0007_8.tif new file mode 100644 index 00000000..99b4b6d3 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_8.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5433041b64c44e69c6e41d3f245939dd8d74c42a16cbca11bb2a24d1c3687937 +size 1851636 diff --git a/data/REDEDGE-MX-DUAL/IMG_0007_9.tif b/data/REDEDGE-MX-DUAL/IMG_0007_9.tif new file mode 100644 index 00000000..d257b341 --- /dev/null +++ b/data/REDEDGE-MX-DUAL/IMG_0007_9.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26baaaa1a4974a69eb01e51010706ff19ca137b99d40f25488b60e4fc9110a32 +size 1851610 diff --git a/data/REDEDGE-MX/IMG_0001_1.tif b/data/REDEDGE-MX/IMG_0001_1.tif new file mode 100644 index 00000000..ce5bdc96 --- /dev/null +++ b/data/REDEDGE-MX/IMG_0001_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c480152b76d8e8d631521e2f3414b48e6e2cc1bd10997b00c163bb8aae9de73 +size 2466316 diff --git a/data/REDEDGE-MX/IMG_0001_2.tif b/data/REDEDGE-MX/IMG_0001_2.tif new file mode 100644 index 00000000..4001e1c8 --- /dev/null +++ b/data/REDEDGE-MX/IMG_0001_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f232cc3f1775ec9afeba4d3b7000e543d091c7edbbec4b1842d1361a14d89ae4 +size 2466304 diff --git a/data/REDEDGE-MX/IMG_0001_3.tif b/data/REDEDGE-MX/IMG_0001_3.tif new file mode 100644 index 00000000..8d12190b --- /dev/null +++ b/data/REDEDGE-MX/IMG_0001_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:911e3d06f3534910f1b2c2c07a71d7984d4d506f8cab8725d3f2f9edc7dead27 +size 2466298 diff --git a/data/REDEDGE-MX/IMG_0001_4.tif b/data/REDEDGE-MX/IMG_0001_4.tif new file mode 100644 index 00000000..175400b2 --- /dev/null +++ b/data/REDEDGE-MX/IMG_0001_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aa4bc51637f4279bd560135b4ceb09a14131fbb88b0585481cae4627e0644a3 +size 2466334 diff --git a/data/REDEDGE-MX/IMG_0001_5.tif b/data/REDEDGE-MX/IMG_0001_5.tif new file mode 100644 index 00000000..eb3e2b2e --- /dev/null +++ b/data/REDEDGE-MX/IMG_0001_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba9d6c6b70d62a60fe132cbf3883f4ee22142f219bc9a19ba7ca0bdbfa70f802 +size 2466332 diff --git a/data/REDEDGE-MX/IMG_0020_1.tif b/data/REDEDGE-MX/IMG_0020_1.tif new file mode 100644 index 00000000..e3bf29e8 --- /dev/null +++ b/data/REDEDGE-MX/IMG_0020_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86a55d68cf2ac059f22a307dbc23ee486f2fb99597259d5bea7efdf8028afbeb +size 2466136 diff --git a/data/REDEDGE-MX/IMG_0020_2.tif b/data/REDEDGE-MX/IMG_0020_2.tif new file mode 100644 index 00000000..b188bf2d --- /dev/null +++ b/data/REDEDGE-MX/IMG_0020_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cef96e8203518f89ca608162889aedb48dbec24371f30b3a33aa777260041fd +size 2466116 diff --git a/data/REDEDGE-MX/IMG_0020_3.tif b/data/REDEDGE-MX/IMG_0020_3.tif new file mode 100644 index 00000000..069228c2 --- /dev/null +++ b/data/REDEDGE-MX/IMG_0020_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ec6895c68b04e00269c05d2c1e476c4c3b32d6be077a0d40ab576ff48728e07 +size 2466104 diff --git a/data/REDEDGE-MX/IMG_0020_4.tif b/data/REDEDGE-MX/IMG_0020_4.tif new file mode 100644 index 00000000..9780afff --- /dev/null +++ b/data/REDEDGE-MX/IMG_0020_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:383561f275b567c4b594bbf80c322a2130a15f67a66783c42b0e8e21faf7f1f7 +size 2466144 diff --git a/data/REDEDGE-MX/IMG_0020_5.tif b/data/REDEDGE-MX/IMG_0020_5.tif new file mode 100644 index 00000000..3a48d7b2 --- /dev/null +++ b/data/REDEDGE-MX/IMG_0020_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45ecc31b90c31a43f9c86d43bae2c5bf49f1897c37850921571045408984f29a +size 2466138 diff --git a/data/REDEDGE-P/IMG_0000_1.tif b/data/REDEDGE-P/IMG_0000_1.tif new file mode 100644 index 00000000..f3762c05 --- /dev/null +++ b/data/REDEDGE-P/IMG_0000_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:795ad0878829a5f3046f5180f99fb566c745922db88c2e54ad8ed365d0572266 +size 2385510 diff --git a/data/REDEDGE-P/IMG_0000_2.tif b/data/REDEDGE-P/IMG_0000_2.tif new file mode 100644 index 00000000..a6a42293 --- /dev/null +++ b/data/REDEDGE-P/IMG_0000_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b878d8b4cab56243a706153220fc96d8a2d71d2bf1b289648c62cb14a5081c +size 2385490 diff --git a/data/REDEDGE-P/IMG_0000_3.tif b/data/REDEDGE-P/IMG_0000_3.tif new file mode 100644 index 00000000..6af13325 --- /dev/null +++ b/data/REDEDGE-P/IMG_0000_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4132a816146166c16b8504385c4b7c7f43cc980a133f1da4ea8d7b12218bf01 +size 2385524 diff --git a/data/REDEDGE-P/IMG_0000_4.tif b/data/REDEDGE-P/IMG_0000_4.tif new file mode 100644 index 00000000..63b914ff --- /dev/null +++ b/data/REDEDGE-P/IMG_0000_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c9cee06c72ac28b0868e662e979f570b2058a2cef1d5e5a99d614000abbb6f5 +size 2385538 diff --git a/data/REDEDGE-P/IMG_0000_5.tif b/data/REDEDGE-P/IMG_0000_5.tif new file mode 100644 index 00000000..ec30b62e --- /dev/null +++ b/data/REDEDGE-P/IMG_0000_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba9529e8af6aeeb84512bf5a3a642900b418f47aa9b6bad7cbe94f588bba3405 +size 2385536 diff --git a/data/REDEDGE-P/IMG_0000_6.tif b/data/REDEDGE-P/IMG_0000_6.tif new file mode 100644 index 00000000..4ba1e9b0 --- /dev/null +++ b/data/REDEDGE-P/IMG_0000_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd1ff395970aac9313c19620ce5d14cd058c5739b7b9b0df7dd73e897707addf +size 7608824 diff --git a/data/REDEDGE-P/IMG_0011_1.tif b/data/REDEDGE-P/IMG_0011_1.tif new file mode 100644 index 00000000..e95a6b8f --- /dev/null +++ b/data/REDEDGE-P/IMG_0011_1.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:727ad2fbb11872379fb105f43482da43c019142af2aed164678ddfa1097f6719 +size 2385174 diff --git a/data/REDEDGE-P/IMG_0011_2.tif b/data/REDEDGE-P/IMG_0011_2.tif new file mode 100644 index 00000000..4033e35a --- /dev/null +++ b/data/REDEDGE-P/IMG_0011_2.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ffc41e15c9424df477846417e1e2f85bf2131803ebbf912fd8b56b20fd89a61 +size 2385130 diff --git a/data/REDEDGE-P/IMG_0011_3.tif b/data/REDEDGE-P/IMG_0011_3.tif new file mode 100644 index 00000000..90e2f801 --- /dev/null +++ b/data/REDEDGE-P/IMG_0011_3.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:940d629af610de608db820f2cfd60df27d138f531d3ddcf1c49355e13a3f062a +size 2385174 diff --git a/data/REDEDGE-P/IMG_0011_4.tif b/data/REDEDGE-P/IMG_0011_4.tif new file mode 100644 index 00000000..a96e67c2 --- /dev/null +++ b/data/REDEDGE-P/IMG_0011_4.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8708d59e37616c208757431ce0dfba9a6381588ce81a8c77c4713fe392d62ebd +size 2385178 diff --git a/data/REDEDGE-P/IMG_0011_5.tif b/data/REDEDGE-P/IMG_0011_5.tif new file mode 100644 index 00000000..c8f05b30 --- /dev/null +++ b/data/REDEDGE-P/IMG_0011_5.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c683565fd25373a60ffbd82b7620f0ebb259e8e9d40c941cc256b73ac113685d +size 2385176 diff --git a/data/REDEDGE-P/IMG_0011_6.tif b/data/REDEDGE-P/IMG_0011_6.tif new file mode 100644 index 00000000..4067cfbf --- /dev/null +++ b/data/REDEDGE-P/IMG_0011_6.tif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:689dc61c8198d68e9124fd385f0b8bec68fe369d3cb077230a39c66f5f5df837 +size 7608474 diff --git a/docs/Alignment v2.html b/docs/Alignment v2.html new file mode 100644 index 00000000..4caa9899 --- /dev/null +++ b/docs/Alignment v2.html @@ -0,0 +1,16037 @@ + + + + + +Alignment v2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Alignment-10Band.html b/docs/Alignment-10Band.html new file mode 100644 index 00000000..727a7a45 --- /dev/null +++ b/docs/Alignment-10Band.html @@ -0,0 +1,15702 @@ + + + + + +Alignment-10Band + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Alignment-RigRelatives.html b/docs/Alignment-RigRelatives.html new file mode 100644 index 00000000..28d0d21a --- /dev/null +++ b/docs/Alignment-RigRelatives.html @@ -0,0 +1,15397 @@ + + + + + +Alignment-RigRelatives + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Alignment.html b/docs/Alignment.html new file mode 100644 index 00000000..1de3d1c8 --- /dev/null +++ b/docs/Alignment.html @@ -0,0 +1,15551 @@ + + + + + +Alignment + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Batch Processing v2.html b/docs/Batch Processing v2.html new file mode 100644 index 00000000..a0577a64 --- /dev/null +++ b/docs/Batch Processing v2.html @@ -0,0 +1,15707 @@ + + + + + +Batch Processing v2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Batch Processing.html b/docs/Batch Processing.html new file mode 100644 index 00000000..9bd8a875 --- /dev/null +++ b/docs/Batch Processing.html @@ -0,0 +1,15747 @@ + + + + + +Batch Processing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Captures.html b/docs/Captures.html new file mode 100644 index 00000000..070d6fcf --- /dev/null +++ b/docs/Captures.html @@ -0,0 +1,14760 @@ + + + + + +Captures + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/ImageSets.html b/docs/ImageSets.html new file mode 100644 index 00000000..02c74b0f --- /dev/null +++ b/docs/ImageSets.html @@ -0,0 +1,15632 @@ + + + + + +ImageSets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Images.html b/docs/Images.html new file mode 100644 index 00000000..01c51198 --- /dev/null +++ b/docs/Images.html @@ -0,0 +1,14746 @@ + + + + + +Images + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/MicaSense Image Processing Setup.html b/docs/MicaSense Image Processing Setup.html new file mode 100644 index 00000000..b4c47a70 --- /dev/null +++ b/docs/MicaSense Image Processing Setup.html @@ -0,0 +1,14901 @@ + + + + + +MicaSense Image Processing Setup + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/MicaSense Image Processing Tutorial 1.html b/docs/MicaSense Image Processing Tutorial 1.html new file mode 100644 index 00000000..eec2cd4c --- /dev/null +++ b/docs/MicaSense Image Processing Tutorial 1.html @@ -0,0 +1,15372 @@ + + + + + +MicaSense Image Processing Tutorial 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/MicaSense Image Processing Tutorial 2.html b/docs/MicaSense Image Processing Tutorial 2.html new file mode 100644 index 00000000..697a6cad --- /dev/null +++ b/docs/MicaSense Image Processing Tutorial 2.html @@ -0,0 +1,15115 @@ + + + + + +MicaSense Image Processing Tutorial 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/MicaSense Image Processing Tutorial 3.html b/docs/MicaSense Image Processing Tutorial 3.html new file mode 100644 index 00000000..7020afc1 --- /dev/null +++ b/docs/MicaSense Image Processing Tutorial 3.html @@ -0,0 +1,14927 @@ + + + + + +MicaSense Image Processing Tutorial 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/Panels.html b/docs/Panels.html new file mode 100644 index 00000000..df8e8dca --- /dev/null +++ b/docs/Panels.html @@ -0,0 +1,14797 @@ + + + + + +Panels + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/index.ipynb b/index.ipynb index c1ed80ce..75fbf6b5 100644 --- a/index.ipynb +++ b/index.ipynb @@ -6,7 +6,7 @@ "source": [ "# MicaSense Image Processing Tutorials \n", "\n", - "This repository includes tutorials and examples for processing MicaSense RedEdge and MicaSense Altum images into usable information using the python programming language. The intended audience is researchers and engineers with some software development experience that want to do their own image processing. While a number of commercial tools fully support processing RedEdge data into reflectance maps, there are a number of reasons to process your own data, including controlling the entire radiometric workflow (for academic or publication reasons), pre-processing images to be used in a non-radiometric photogrammetry suite, or processing single sets of 5 images without building a larger map.\n", + "This repository includes tutorials and examples for processing MicaSense RedEdge-M/MX, Altum, RedEdge-P, and Altum-PT images into usable information using the Python programming language. The intended audience is researchers and engineers with some software development experience that want to do their own image processing. While a number of commercial tools fully support processing MicaSense data into reflectance maps, there are a number of reasons to process your own data. This can include: controlling the entire radiometric workflow (for academic or publication reasons), pre-processing images to be used in a non-radiometric photogrammetry suite, or processing single sets of images (captures) without building a larger map.\n", "\n", "To get started, browse the examples below, and then head over to the [git repository](https://github.com/micasense/imageprocessing) to check out the code.\n", "\n", @@ -35,6 +35,8 @@ "* [Capture Alignment and Analysis Examples](Alignment.html)\n", "* [Capture Alignment Using Rig Relatives](Alignment-RigRelatives.html)\n", "* [Batch processing into stacked TIFFs](Batch%20Processing.html)\n", + "* [Capture Alignment and Analysis Examples (Updated 2023 for Panchromatic cameras)](Alignment%20v2.html)\n", + "* [Batch processing into stacked TIFFs (2023 version)](Batch%20Processing%20v2.html)\n", "\n", "### 10-Band (Dual Camera) Examples\n", "* [10-band Capture Alignment](Alignment-10Band.html)\n", @@ -52,7 +54,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -66,7 +68,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.12" } }, "nbformat": 4, diff --git a/micasense/capture.py b/micasense/capture.py index aa6254c0..59357fe0 100644 --- a/micasense/capture.py +++ b/micasense/capture.py @@ -3,8 +3,9 @@ """ MicaSense Capture Class - A Capture is a set of Images taken by one camera which share the same unique capture identifier (capture_id). - Generally these images will be found in the same folder and also share the same filename prefix, such + A Capture is a set of Images taken by one camera which share + the same unique capture identifier (capture_id). Generally these images will be + found in the same folder and also share the same filename prefix, such as IMG_0000_*.tif, but this is not required. Copyright 2017 MicaSense, Inc. @@ -26,13 +27,18 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - +import logging import math import os +from collections import namedtuple import cv2 import imageio import numpy as np +from skimage.feature import match_descriptors, SIFT +from skimage.measure import ransac +from skimage.transform import estimate_transform, FundamentalMatrixTransform, ProjectiveTransform, \ + resize import micasense.image as image import micasense.imageutils as imageutils @@ -46,6 +52,7 @@ class Capture(object): found in the same folder and also share the same filename prefix, such as IMG_0000_*.tif, but this is not required. """ + def __init__(self, images, panel_corners=None): """ :param images: str or List of str system file paths. @@ -73,14 +80,21 @@ def __init__(self, images, panel_corners=None): if len(set(capture_ids)) != 1: raise RuntimeError("Images provided must have the same capture_id.") self.uuid = self.images[0].capture_id + self.flightid = self.images[0].flight_id + self.camera_model = self.images[0].camera_model + self.camera_serial = self.images[0].camera_serial + self.camera_serials = set([img.camera_serial for img in self.images]) + self.bits_per_pixel = self.images[0].bits_per_pixel self.panels = None self.detected_panel_count = 0 if panel_corners is None: - self.panel_corners = [None] * len(self.eo_indices()) + self.panelCorners = [None] * len(self.eo_indices()) else: - self.panel_corners = panel_corners + self.panelCorners = panel_corners self.__aligned_capture = None + self.__aligned_radiometric_pan_sharpened_capture = None + self.__sift_warp_matrices = None def set_panel_corners(self, panel_corners): """ @@ -119,16 +133,16 @@ def append_file(self, file_name): self.append_image(image.Image(file_name)) @classmethod - def from_file(cls, file_name): + def from_file(cls, file_name, allow_uncalibrated=False): """ Create Capture instance from file path. :param file_name: str system file path :return: Capture object. """ - return cls(image.Image(file_name)) + return cls(image.Image(file_name, allow_uncalibrated=allow_uncalibrated)) @classmethod - def from_filelist(cls, file_list): + def from_filelist(cls, file_list, allow_uncalibrated=False): """ Create Capture instance from List of file paths. :param file_list: List of str system file paths. @@ -136,10 +150,10 @@ def from_filelist(cls, file_list): """ if len(file_list) == 0: raise IOError("No files provided. Check your file paths.") - for file in file_list: - if not os.path.isfile(file): - raise IOError(f"All files in file list must be a file. The following file is not:\n{file}") - images = [image.Image(file) for file in file_list] + for fle in file_list: + if not os.path.isfile(fle): + raise IOError(f"All files in file list must be a file. The following file is not:\n{fle}") + images = [image.Image(fle, allow_uncalibrated=allow_uncalibrated) for fle in file_list] return cls(images) def __get_reference_index(self): @@ -149,7 +163,7 @@ def __get_reference_index(self): """ return np.argmin((np.array([i.rig_xy_offset_in_px() for i in self.images]) ** 2).sum(1)) - def __plot(self, images, num_cols=2, plot_type=None, color_bar=True, fig_size=(14, 14)): + def __plot(self, imgs, num_cols=2, plot_type=None, colorbar=True, figsize=(14, 14)): """ Plot the Images from the Capture. :param images: List of Image objects @@ -159,7 +173,7 @@ def __plot(self, images, num_cols=2, plot_type=None, color_bar=True, fig_size=(1 :param fig_size: Tuple size of the figure :return: plotutils result. matplotlib Figure and Axis in both cases. """ - if plot_type == None: + if plot_type is None: plot_type = '' else: titles = [ @@ -169,10 +183,10 @@ def __plot(self, images, num_cols=2, plot_type=None, color_bar=True, fig_size=(1 in self.images ] num_rows = int(math.ceil(float(len(self.images)) / float(num_cols))) - if color_bar: - return plotutils.subplotwithcolorbar(num_rows, num_cols, images, titles, fig_size) + if colorbar: + return plotutils.subplotwithcolorbar(num_rows, num_cols, imgs, titles, figsize) else: - return plotutils.subplot(num_rows, num_cols, images, titles, fig_size) + return plotutils.subplot(num_rows, num_cols, imgs, titles, figsize) def __lt__(self, other): return self.utc_time() < other.utc_time() @@ -185,7 +199,6 @@ def __eq__(self, other): def location(self): """(lat, lon, alt) tuple of WGS-84 location units are radians, meters msl""" - # TODO: These units are "signed decimal degrees" per metadata.py comments? return self.images[0].location def utc_time(self): @@ -201,6 +214,7 @@ def clear_image_data(self): for img in self.images: img.clear_image_data() self.__aligned_capture = None + self.__aligned_radiometric_pan_sharpened_capture = None def center_wavelengths(self): """Returns a list of the image center wavelengths in nanometers.""" @@ -238,6 +252,13 @@ def dls_pose(self): """Returns (yaw, pitch, roll) tuples in radians of the earth-fixed DLS pose.""" return self.images[0].dls_yaw, self.images[0].dls_pitch, self.images[0].dls_roll + def focal_length(self): + """Returns focal length of multispectral bands or of panchromatic band if applicable.""" + if 'Panchro' in self.eo_band_names(): + return self.images[self.eo_band_names().index('Panchro')].focal_length + else: + return self.images[0].focal_length + def plot_raw(self): """Plot raw images as the data came from the camera.""" self.__plot([img.raw() for img in self.images], @@ -319,6 +340,9 @@ def eo_indices(self): """Returns a list of the indexes of the EO Images in the Capture.""" return [index for index, img in enumerate(self.images) if img.band_name != 'LWIR'] + def eo_band_names(self): + return [band for band in self.band_names() if band != 'LWIR'] + def lw_indices(self): """Returns a list of the indexes of the longwave infrared Images in the Capture.""" return [index for index, img in enumerate(self.images) if img.band_name == 'LWIR'] @@ -388,7 +412,7 @@ def panel_irradiance(self, reflectances=None): irradiance_list.append(mean_irr) return irradiance_list - def panel_reflectance(self, panel_refl_by_band=None): # FIXME: panel_refl_by_band parameter isn't used? + def panel_reflectance(self): """Return a list of mean panel reflectance values.""" if self.panels is None: if not self.panels_in_all_expected_images(): @@ -415,14 +439,14 @@ def detect_panels(self): from micasense.panel import Panel if self.panels is not None and self.detected_panel_count == len(self.images): return self.detected_panel_count - self.panels = [Panel(img, panelCorners=pc) for img, pc in zip(self.images, self.panel_corners)] + self.panels = [Panel(img, panel_corners=pc) for img, pc in zip(self.images, self.panelCorners)] self.detected_panel_count = 0 for p in self.panels: if p.panel_detected(): self.detected_panel_count += 1 - # if panel_corners are defined by hand - if self.panel_corners is not None and all(corner is not None for corner in self.panel_corners): - self.detected_panel_count = len(self.panel_corners) + # is panelCorners are defined by hand + if self.panelCorners is not None and all(corner is not None for corner in self.panelCorners): + self.detected_panel_count = len(self.panelCorners) return self.detected_panel_count def plot_panels(self): @@ -433,7 +457,7 @@ def plot_panels(self): self.__plot( [p.plot_image() for p in self.panels], plot_type='Panels', - color_bar=False + colorbar=False ) def set_external_rig_relatives(self, external_rig_relatives): @@ -468,7 +492,7 @@ def get_warp_matrices(self, ref_index=None): warp_matrices = [np.linalg.inv(im.get_homography(ref)) for im in self.images] return [w / w[2, 2] for w in warp_matrices] - def create_aligned_capture(self, irradiance_list=None, warp_matrices=None, normalize=False, img_type=None, + def create_aligned_capture(self, irradiance_list=None, warp_matrices=None, img_type=None, motion_type=cv2.MOTION_HOMOGRAPHY): """ Creates aligned Capture. Computes undistorted radiance or reflectance images if necessary. @@ -490,12 +514,25 @@ def create_aligned_capture(self, irradiance_list=None, warp_matrices=None, norma img_type = 'reflectance' if warp_matrices is None: warp_matrices = self.get_warp_matrices() - cropped_dimensions, _ = imageutils.find_crop_bounds(self, warp_matrices, warp_mode=motion_type) + if self.camera_model in ('RedEdge-P', 'Altum-PT'): + match_index = 5 + reference_band = 5 + logging.warning( + "For RedEdge-P or Altum-PT, you should use SIFT_align_capture instead of create_aligned_capture") + # for RedEdge-MX Dual Camera System + elif len(self.eo_band_names()) == 10: + match_index = 4 + reference_band = 0 + else: + match_index = 1 + reference_band = 0 + cropped_dimensions, _ = imageutils.find_crop_bounds(self, warp_matrices, warp_mode=motion_type, + reference_band=reference_band) self.__aligned_capture = imageutils.aligned_capture(self, warp_matrices, motion_type, cropped_dimensions, - None, + match_index, img_type=img_type) return self.__aligned_capture @@ -508,7 +545,23 @@ def aligned_shape(self): raise RuntimeError("Call Capture.create_aligned_capture() prior to saving as stack.") return self.__aligned_capture.shape - def save_capture_as_stack(self, out_file_name, sort_by_wavelength=False, photometric='MINISBLACK'): + def radiometric_pan_sharpened_aligned_capture(self, warp_matrices=None, irradiance_list=None, img_type: str = ''): + if irradiance_list is None and self.dls_irradiance() is None: + self.compute_undistorted_radiance() + img_type = 'radiance' + elif img_type == 'reflectance' and irradiance_list is not None: + self.compute_undistorted_reflectance(irradiance_list) + elif irradiance_list is None: + irradiance_list = self.dls_irradiance() + [0] + self.compute_undistorted_reflectance(irradiance_list) + img_type = 'reflectance' + self.__aligned_radiometric_pan_sharpened_capture = imageutils.radiometric_pan_sharpen(self, + warp_matrices=warp_matrices, + irradiance_list=irradiance_list) + return self.__aligned_radiometric_pan_sharpened_capture + + def save_capture_as_stack(self, outfilename, sort_by_wavelength=False, photometric='MINISBLACK', pansharpen=False, + write_exif=True): """ Output the Images in the Capture object as GTiff image stack. :param out_file_name: str system file path @@ -516,44 +569,66 @@ def save_capture_as_stack(self, out_file_name, sort_by_wavelength=False, photome :param photometric: str GDAL argument for GTiff color matching """ from osgeo.gdal import GetDriverByName, GDT_UInt16 - if self.__aligned_capture is None: - raise RuntimeError("Call Capture.create_aligned_capture() prior to saving as stack.") - - rows, cols, bands = self.__aligned_capture.shape + if self.__aligned_capture is None and self.__aligned_radiometric_pan_sharpened_capture is None: + raise RuntimeError( + "Call Capture.create_aligned_capture() prior to saving as stack.") + band_names = self.band_names() + if "Panchro" in band_names and pansharpen: + aligned_cap = self.__aligned_radiometric_pan_sharpened_capture[0] + if "Panchro" in band_names and not pansharpen: + aligned_cap = self.__aligned_radiometric_pan_sharpened_capture[1] + if "Panchro" not in band_names: + aligned_cap = self.__aligned_capture + + rows, cols, bands = aligned_cap.shape driver = GetDriverByName('GTiff') - out_raster = driver.Create(out_file_name, cols, rows, bands, GDT_UInt16, - options=['INTERLEAVE=BAND', 'COMPRESS=DEFLATE', f'PHOTOMETRIC={photometric}']) + outRaster = driver.Create(outfilename, cols, rows, bands, GDT_UInt16, + options=['INTERLEAVE=BAND', 'COMPRESS=DEFLATE', f'PHOTOMETRIC={photometric}']) try: - if out_raster is None: + if outRaster is None: raise IOError("could not load gdal GeoTiff driver") if sort_by_wavelength: eo_list = list(np.argsort(np.array(self.center_wavelengths())[self.eo_indices()])) + eo_bands = list(np.array(self.eo_band_names())[np.array(eo_list)]) + else: eo_list = self.eo_indices() - - for out_band, in_band in enumerate(eo_list): - out_band = out_raster.GetRasterBand(out_band + 1) - out_data = self.__aligned_capture[:, :, in_band] - out_data[out_data < 0] = 0 - out_data[out_data > 2] = 2 # limit reflectance data to 200% to allow some specular reflections - out_band.WriteArray(out_data * 32768) # scale reflectance images so 100% = 32768 - out_band.FlushCache() - - for out_band, in_band in enumerate(self.lw_indices()): - out_band = out_raster.GetRasterBand(len(eo_list) + out_band + 1) - # scale data from float degC to back to centi-Kelvin to fit into uint16 - out_data = (self.__aligned_capture[:, :, in_band] + 273.15) * 100 - out_data[out_data < 0] = 0 - out_data[out_data > 65535] = 65535 - out_band.WriteArray(out_data) - out_band.FlushCache() + eo_bands = list(np.array(self.eo_band_names())[np.array(eo_list)]) + + eo_count = len(eo_list) + + multispec_min = np.min(np.percentile(aligned_cap[:, :, 1:eo_count].flatten(), 0.01)) + multispec_max = np.max(np.percentile(aligned_cap[:, :, 1:eo_count].flatten(), 99.99)) + + for outband_count, inband in enumerate(eo_list): + outband = outRaster.GetRasterBand(outband_count + 1) + outdata = imageutils.normalize(aligned_cap[:, :, inband], multispec_min, multispec_max) + outdata[outdata < 0] = 0 + outdata[outdata > 2] = 2 # limit reflectance data to 200% to allow some specular reflections + outdata = outdata * 32767 # scale reflectance images so 100% = 32768 + outdata[outdata < 0] = 0 + outdata[outdata > 65535] = 65535 + outband.SetDescription(eo_bands[outband_count]) + outband.WriteArray(outdata) + outband.FlushCache() + + for outband_count, inband in enumerate(self.lw_indices()): + outband = outRaster.GetRasterBand(len(eo_bands) + outband_count + 1) + outdata = (aligned_cap[:, :, + inband] + 273.15) * 100 # scale data from float degC to back to centi-Kelvin to fit into uint16 + outband.SetDescription('LWIR') + outdata[outdata < 0] = 0 + outdata[outdata > 65535] = 65535 + outband.WriteArray(outdata) + outband.FlushCache() finally: - out_raster = None + if write_exif: + imageutils.write_exif_to_stack(self, outfilename) - def save_capture_as_rgb(self, out_file_name, gamma=1.4, downsample=1, white_balance='norm', hist_min_percent=0.5, - hist_max_percent=99.5, sharpen=True, rgb_band_indices=(2, 1, 0)): + def save_capture_as_rgb(self, outfilename, gamma=1.4, downsample=1, white_balance='norm', hist_min_percent=0.5, + hist_max_percent=99.5, sharpen=True, rgb_band_indices=None): """ Output the Images in the Capture object as RGB. :param out_file_name: str system file path @@ -566,23 +641,30 @@ def save_capture_as_rgb(self, out_file_name, gamma=1.4, downsample=1, white_bala :param sharpen: boolean :param rgb_band_indices: List band order """ - if self.__aligned_capture is None: - raise RuntimeError("Call Capture.create_aligned_capture() prior to saving as RGB.") - im_display = np.zeros( - (self.__aligned_capture.shape[0], self.__aligned_capture.shape[1], self.__aligned_capture.shape[2]), - dtype=np.float32) + if rgb_band_indices is None: + rgb_band_indices = [2, 1, 0] + if self.__aligned_capture is None and self.__aligned_radiometric_pan_sharpened_capture is None: + raise RuntimeError( + "Call Capture.create_aligned_capture or Capture.radiometric_pan_sharpened_aligned_capture prior to saving as RGB.") + if self.__aligned_radiometric_pan_sharpened_capture: + aligned_capture = self.__aligned_radiometric_pan_sharpened_capture[0] + else: + aligned_capture = self.__aligned_capture + im_display = np.zeros((aligned_capture.shape[0], aligned_capture.shape[1], aligned_capture.shape[2]), + dtype=np.float32) - # modify these percentiles to adjust contrast. for many images, 0.5 and 99.5 are good values - im_min = np.percentile(self.__aligned_capture[:, :, rgb_band_indices].flatten(), hist_min_percent) - im_max = np.percentile(self.__aligned_capture[:, :, rgb_band_indices].flatten(), hist_max_percent) + im_min = np.percentile(aligned_capture[:, :, rgb_band_indices].flatten(), + hist_min_percent) # modify these percentiles to adjust contrast + im_max = np.percentile(aligned_capture[:, :, rgb_band_indices].flatten(), + hist_max_percent) # for many images, 0.5 and 99.5 are good values for i in rgb_band_indices: # for rgb true color, we usually want to use the same min and max scaling across the 3 bands to # maintain the "white balance" of the calibrated image if white_balance == 'norm': - im_display[:, :, i] = imageutils.normalize(self.__aligned_capture[:, :, i], im_min, im_max) + im_display[:, :, i] = imageutils.normalize(aligned_capture[:, :, i], im_min, im_max) else: - im_display[:, :, i] = imageutils.normalize(self.__aligned_capture[:, :, i]) + im_display[:, :, i] = imageutils.normalize(aligned_capture[:, :, i]) rgb = im_display[:, :, rgb_band_indices] rgb = cv2.resize(rgb, None, fx=1 / downsample, fy=1 / downsample, interpolation=cv2.INTER_AREA) @@ -600,11 +682,11 @@ def save_capture_as_rgb(self, out_file_name, gamma=1.4, downsample=1, white_bala # Apply a gamma correction to make the render appear closer to what our eyes would see if gamma != 0: gamma_corr_rgb = unsharp_rgb ** (1.0 / gamma) - imageio.imwrite(out_file_name, (255 * gamma_corr_rgb).astype('uint8')) + imageio.imwrite(outfilename, (255 * gamma_corr_rgb).astype('uint8')) else: - imageio.imwrite(out_file_name, (255 * unsharp_rgb).astype('uint8')) + imageio.imwrite(outfilename, (255 * unsharp_rgb).astype('uint8')) - def save_thermal_over_rgb(self, out_file_name, fig_size=(30, 23), lw_index=None, hist_min_percent=0.2, + def save_thermal_over_rgb(self, outfilename, figsize=(30, 23), lw_index=None, hist_min_percent=0.2, hist_max_percent=99.8): """ Output the Images in the Capture object as thermal over RGB. @@ -614,27 +696,32 @@ def save_thermal_over_rgb(self, out_file_name, fig_size=(30, 23), lw_index=None, :param hist_min_percent: float Minimum histogram percentile. :param hist_max_percent: float Maximum histogram percentile. """ - if self.__aligned_capture is None: - raise RuntimeError("Call Capture.create_aligned_capture() prior to saving as RGB.") - - # by default we don't mask the thermal, since it's native resolution is much lower than the MS + if self.__aligned_capture is None and self.__aligned_radiometric_pan_sharpened_capture is None: + raise RuntimeError( + "Call Capture.create_aligned_capture or Capture.radiometric_pan_sharpened_aligned_capture prior to " + "saving as RGB.") + if self.__aligned_radiometric_pan_sharpened_capture: + aligned_capture = self.__aligned_radiometric_pan_sharpened_capture[0] + else: + aligned_capture = self.__aligned_capture + # by default, we don't mask the thermal, since it's native resolution is much lower than the MS if lw_index is None: lw_index = self.lw_indices()[0] - masked_thermal = self.__aligned_capture[:, :, lw_index] + masked_thermal = aligned_capture[:, :, lw_index] - im_display = np.zeros((self.__aligned_capture.shape[0], self.__aligned_capture.shape[1], 3), dtype=np.float32) + im_display = np.zeros((aligned_capture.shape[0], aligned_capture.shape[1], 3), dtype=np.float32) rgb_band_indices = [self.band_names_lower().index('red'), self.band_names_lower().index('green'), self.band_names_lower().index('blue')] # for rgb true color, we usually want to use the same min and max scaling across the 3 bands to # maintain the "white balance" of the calibrated image - im_min = np.percentile(self.__aligned_capture[:, :, rgb_band_indices].flatten(), + im_min = np.percentile(aligned_capture[:, :, rgb_band_indices].flatten(), hist_min_percent) # modify these percentiles to adjust contrast - im_max = np.percentile(self.__aligned_capture[:, :, rgb_band_indices].flatten(), + im_max = np.percentile(aligned_capture[:, :, rgb_band_indices].flatten(), hist_max_percent) # for many images, 0.5 and 99.5 are good values for dst_band, src_band in enumerate(rgb_band_indices): - im_display[:, :, dst_band] = imageutils.normalize(self.__aligned_capture[:, :, src_band], im_min, im_max) + im_display[:, :, dst_band] = imageutils.normalize(aligned_capture[:, :, src_band], im_min, im_max) # Compute a histogram min_display_therm = np.percentile(masked_thermal, hist_min_percent) @@ -642,7 +729,7 @@ def save_thermal_over_rgb(self, out_file_name, fig_size=(30, 23), lw_index=None, fig, _ = plotutils.plot_overlay_withcolorbar(im_display, masked_thermal, - figsize=fig_size, + figsize=figsize, title='Temperature over True Color', vmin=min_display_therm, vmax=max_display_therm, overlay_alpha=0.25, @@ -653,4 +740,197 @@ def save_thermal_over_rgb(self, out_file_name, fig_size=(30, 23), lw_index=None, contour_alpha=.4, contour_fmt="%.0fC", show=False) - fig.savefig(out_file_name) + fig.savefig(outfilename) + + def output(stack, gamma, channel_order=None): + if channel_order is None: + channel_order = [2, 1, 0] + out = stack[:, :, channel_order] + out -= out.min() + out /= out.max() + out = out ** gamma + scale = out.max() + return out / scale + + @staticmethod + def find_inliers(kp_image, kp_ref, matches, *, random_seed: int = 9): + + rng = np.random.default_rng(random_seed) + model, inliers = ransac((kp_image[matches[:, 0]], + kp_ref[matches[:, 1]]), + FundamentalMatrixTransform, min_samples=8, + residual_threshold=.25, max_trials=5000, + random_state=rng) + inlier_keypoints_image = kp_image[matches[inliers, 0]] + inlier_keypoints_ref = kp_ref[matches[inliers, 1]] + n = len(inlier_keypoints_ref) + return inlier_keypoints_image, inlier_keypoints_ref, np.array([np.arange(n), np.arange(n)]).T, model + + KeyPoints = namedtuple('KeyPoints', ['kpi', 'kpr', 'match', 'err']) + + @staticmethod + def filter_keypoints(kp_image, kp_ref, match, w, scale, scale_i, threshold: float = 1.0) -> KeyPoints: + err = [] + P0 = ProjectiveTransform(matrix=w) + new_kpi = [] + new_kpr = [] + new_match = [] + cnt = 0 + for m in match: + # unfortunately the coordinates between skimage and our images are reversed + a = (kp_ref[m[1]] * scale)[::-1] + b = (kp_image[m[0]] * scale_i)[::-1] + e = (np.linalg.norm(P0(a) - b)) # error in pixels + if e < threshold: + new_kpi.append(kp_image[m[0]]) + new_kpr.append(kp_ref[m[1]]) + new_match.append([cnt, cnt]) + cnt += 1 + err.append(e) + return np.array(new_kpi), np.array(new_kpr), np.array(new_match), np.array(err) + + def SIFT_align_capture(self, ref=5, min_matches=10, verbose=0, err_red=10.0, err_blue=12.0, + err_LWIR=12.): + descriptor_extractor = SIFT() + keypoints = [] + descriptors = [] + img_index = list(range(len(self.images))) + img_index.pop(ref) + ref_shape = self.images[ref].raw().shape + rest_shape = self.images[img_index[0]].raw().shape + scale = np.array(ref_shape) / np.array(rest_shape) + + # use the calibrated warp matrices to verify keypoints + warp_matrices_calibrated = self.get_warp_matrices(ref_index=ref) + + if not rest_shape == ref_shape: + ref_image_SIFT = resize(self.images[ref].undistorted( + self.images[ref].raw()), rest_shape) + ref_image_SIFT = (ref_image_SIFT / ref_image_SIFT.max() + * 65535).astype(np.uint16) + + descriptor_extractor.detect_and_extract(ref_image_SIFT) + keypoints_ref = descriptor_extractor.keypoints + descriptor_ref = descriptor_extractor.descriptors + if verbose > 1: + print('found {:d} keypoints in the reference image'.format(len(keypoints_ref))) + match_images = [] + ratio = [] + filter_tr = [] + img_index = np.array(img_index) + # extract keypoints % descriptors + for ix in img_index: + img = self.images[ix].undistorted(self.images[ix].raw()) + if not img.shape == rest_shape: + # if we have a thermal image, upsample to match the resolution of the multispec images + img_base = self.images[ix].raw()[self.images[ix].raw() > 0].min() + img = img.astype(float) + img[img > 0] = img[img > 0] - img_base + img = resize(img, rest_shape) + img = (img / img.max() * 65535).astype(np.uint16) + ratio.append(1) + filter_tr.append(err_LWIR) + else: + ratio.append(0.8) + if ix <= 5: + filter_tr.append(err_red) + else: + # less strict filtering for the BLUE images + filter_tr.append(err_blue) + match_images.append(img) + descriptor_extractor.detect_and_extract(img) + keypoints.append(descriptor_extractor.keypoints) + descriptors.append(descriptor_extractor.descriptors) + if verbose > 1: + for k, ix in zip(keypoints, img_index): + print('found {:d} keypoints for band {:} '.format(len(k), self.images[ix].band_name)) + print(' in the remaining stack') + + matches = [match_descriptors(d, descriptor_ref, max_ratio=r) + for d, r in zip(descriptors, ratio)] + + # do we have dual camera capture? + if len(img_index) > 9: + # if yes, we first match the first channel to the reference + img_index_Blue = img_index[img_index > 5] + iBlueREF = img_index_Blue[0] + # get the warp matrices for the BLUE reference + warpBLUE = self.get_warp_matrices(ref_index=iBlueREF) + # this is clunky, since we made our keypoint list WITHOUT the reference - consider refactoring that + posBLUE = np.where(img_index == iBlueREF)[0][0] + # we don't know the rig relatives between iBlueREF and ref + # so we have to rely on SIFT to find the correct transform + kpi, kpr, imatch, model = self.find_inliers( + keypoints[posBLUE], keypoints_ref, matches[posBLUE]) + # we trust this match to work + if len(kpi) < min_matches: + print('we have just {:d} matching keypoints -the match of BLUE camera to RED failed!!'.format(len(kpi))) + # if it worked, scale it and get the transform + scale_i = np.array(self.images[iBlueREF].raw().shape) / np.array(rest_shape) + P = estimate_transform('projective', (scale * kpr)[:, ::-1], (scale_i * kpi)[:, ::-1]) + warp_blue_ref = P.params + # now modify the BLUE warp matrices + for ix in img_index_Blue: + warp_matrices_calibrated[ix] = np.dot(warp_blue_ref, warpBLUE[ix]) + + models = [] + kp_image = [] + kp_ref = [] + for m, k, ix, t in zip(matches, keypoints, img_index, filter_tr): + # we need to down scale the thermal image for the proper transform + scale_i = np.array(self.images[ix].raw().shape) / np.array(rest_shape) + + filtered_kpi, filtered_kpr, filtered_match, err = self.filter_keypoints(k, + keypoints_ref, + m, + warp_matrices_calibrated[ix], + scale, + scale_i, + threshold=t) + if verbose > 0: + print('found {:d} matching keypoints for index {:d}'.format(len(filtered_match), ix)) + # if we have enough SIFT matches that actually correspond, compute a model + if len(filtered_match) > min_matches: + kpi, kpr, imatch, model = self.find_inliers(filtered_kpi, + filtered_kpr, + filtered_match) + + P = estimate_transform( + 'projective', (scale * kpr)[:, ::-1], (scale_i * kpi)[:, ::-1]) + # otherwise, use the calibrated matrix + # most of the time this will occur for the thermal image, as we have a hard time + # finding a good matches between panchro & thermal in most cases + else: + P = ProjectiveTransform(matrix=warp_matrices_calibrated[ix]) + if verbose > 0: + print('no match for index {:d}'.format(ix)) + models.append(P) + kp_image.append(kpi) + kp_ref.append(kpr) + img = self.images[ix].undistorted(self.images[ix].raw()) + + # no need for the upsampled stacks here + if verbose > 0: + print("Finished aligning band", ix) + + self.__sift_aligned_capture = [np.eye(3)] * len(self.images) + for ix, m in zip(img_index, models): + self.__sift_aligned_capture[ix] = m.params + + return self.__sift_aligned_capture + + def adjust_transform(self, ref_index): + warp_matrices = self.get_warp_matrices(ref_index=ref_index) + t_matrices = [] + CR = self.images[ref_index].cv2_camera_matrix() + for i in self.images: + z = i.location[2] * 1e3 + C = i.cv2_camera_matrix() + + T = (np.array(i.rig_translations) - np.array(self.images[ref_index].rig_translations)) + tm = np.eye(3) + tm[0, 2] = C[0, 0] * T[0] / z + tm[1, 2] = C[1, 1] * T[1] / z + t_matrices.append(tm) + warp_new = [np.dot(t, w) for w, t in zip(warp_matrices, t_matrices)] + return warp_new diff --git a/micasense/dls.py b/micasense/dls.py index 44a45191..e8a154fd 100644 --- a/micasense/dls.py +++ b/micasense/dls.py @@ -23,36 +23,41 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - import numpy as np + # for DLS correction, we need the sun position at the time the image was taken # this can be computed using the pysolar package (ver 0.6) # https://pypi.python.org/pypi/Pysolar/0.6 # we import multiple times with checking here because the case of Pysolar is # different depending on the python version :( -import imp +# import imp havePysolar = False try: import pysolar.solar as pysolar + havePysolar = True except ImportError: try: import Pysolar.solar as pysolar + havePysolar = True except ImportError: import pysolar.solar as pysolar - havePysolar = True -finally: + + havePysolar = True +finally: if not havePysolar: print("Unable to import pysolar") + def fresnel(phi): - return __multilayer_transmission(phi, n=[1.000277,1.6,1.38]) + return __multilayer_transmission(phi, n=[1.000277, 1.6, 1.38]) + # define functions to compute the DLS-Sun angle: -def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[.5, .5]): +def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=None): """compute fresnel transmission between media with refractive indices n1 and n2""" # computes the reflection and transmittance # for incidence angles phi for transition from medium @@ -62,36 +67,46 @@ def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[.5, .5]): # polarization=[.5,.5] - unpolarized light # polarization=[1.,0] - s-polarized light - perpendicular to plane of incidence # polarization=[0,1.] - p-polarized light - parallel to plane of incidence + if polarization is None: + polarization = [.5, .5] f1 = np.cos(phi) - f2 = np.sqrt(1-(n1/n2*np.sin(phi))**2) - Rs = ((n1*f1-n2*f2)/(n1*f1+n2*f2))**2 - Rp = ((n1*f2-n2*f1)/(n1*f2+n2*f1))**2 - T = 1.-polarization[0]*Rs-polarization[1]*Rp - if T > 1: T= 0. - if T < 0: T = 0. - if np.isnan(T): T = 0. + f2 = np.sqrt(1 - (n1 / n2 * np.sin(phi)) ** 2) + Rs = ((n1 * f1 - n2 * f2) / (n1 * f1 + n2 * f2)) ** 2 + Rp = ((n1 * f2 - n2 * f1) / (n1 * f2 + n2 * f1)) ** 2 + T = 1. - polarization[0] * Rs - polarization[1] * Rp + if T > 1: + T = 0. + if T < 0: + T = 0. + if np.isnan(T): + T = 0. return T -def __multilayer_transmission(phi, n, polarization=[.5, .5]): + +def __multilayer_transmission(phi, n, polarization=None): + if polarization is None: + polarization = [.5, .5] T = 1.0 phi_eff = np.copy(phi) - for i in range(0,len(n)-1): + for i in range(0, len(n) - 1): n1 = n[i] - n2 = n[i+1] - phi_eff = np.arcsin(np.sin(phi_eff)/n1) + n2 = n[i + 1] + phi_eff = np.arcsin(np.sin(phi_eff) / n1) T *= __fresnel_transmission(phi_eff, n1, n2, polarization=polarization) return T + # get the position of the sun in North-East-Down (NED) coordinate system -def ned_from_pysolar(sunAzimuth, sunAltitude): +def ned_from_pysolar(sun_azimuth, sun_altitude): """Convert pysolar coordinates to NED coordinates.""" elements = ( - np.cos(sunAzimuth) * np.cos(sunAltitude), - np.sin(sunAzimuth) * np.cos(sunAltitude), - -np.sin(sunAltitude), + np.cos(sun_azimuth) * np.cos(sun_altitude), + np.sin(sun_azimuth) * np.cos(sun_altitude), + -np.sin(sun_altitude), ) return np.array(elements).transpose() + # get the sensor orientation in North-East-Down coordinates # pose is a yaw/pitch/roll tuple of angles measured for the DLS # ori is the 3D orientation vector of the DLS in body coordinates (typically [0,0,-1]) @@ -111,6 +126,7 @@ def get_orientation(pose, ori): n = np.dot(R, ori) return n + # from the current position (lat,lon,alt) tuple # and time (UTC), as well as the sensor orientation (yaw,pitch,roll) tuple # compute a sensor sun angle - this is needed as the actual sun irradiance @@ -121,26 +137,24 @@ def get_orientation(pose, ori): # I_measured = I_direct * (cos (sun_sensor_angle) + 1/6) def compute_sun_angle( - position, - pose, - utc_datetime, - sensor_orientation, + position, + pose, + utc_datetime, + sensor_orientation, ): """ compute the sun angle using pysolar functions""" - altitude = 0 - azimuth = 0 import warnings - with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning + with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning warnings.simplefilter("ignore") try: altitude = pysolar.get_altitude(position[0], position[1], utc_datetime) azimuth = pysolar.get_azimuth(position[0], position[1], utc_datetime) - except AttributeError: # catch 0.6 version of pysolar required for python 2.7 support + except AttributeError: # catch 0.6 version of pysolar required for python 2.7 support altitude = pysolar.GetAltitude(position[0], position[1], utc_datetime) - azimuth = 180-pysolar.GetAzimuth(position[0], position[1], utc_datetime) + azimuth = 180 - pysolar.GetAzimuth(position[0], position[1], utc_datetime) sunAltitude = np.radians(np.array(altitude)) sunAzimuth = np.radians(np.array(azimuth)) - sunAzimuth = sunAzimuth % (2 * np.pi ) #wrap range 0 to 2*pi + sunAzimuth = sunAzimuth % (2 * np.pi) # wrap range 0 to 2*pi nSun = ned_from_pysolar(sunAzimuth, sunAltitude) nSensor = np.array(get_orientation(pose, sensor_orientation)) angle = np.arccos(np.dot(nSun, nSensor)) diff --git a/micasense/image.py b/micasense/image.py index ffb94799..dd31625d 100644 --- a/micasense/image.py +++ b/micasense/image.py @@ -26,43 +26,46 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import math import os + import cv2 -import math import numpy as np -import matplotlib.pyplot as plt -import micasense.plotutils as plotutils -import micasense.metadata as metadata import micasense.dls as dls +import micasense.metadata as metadata +import micasense.plotutils as plotutils + -#helper function to convert euler angles to a rotation matrix +# helper function to convert euler angles to a rotation matrix def rotations_degrees_to_rotation_matrix(rotation_degrees): - cx = np.cos(np.deg2rad(rotation_degrees[0])) - cy = np.cos(np.deg2rad(rotation_degrees[1])) - cz = np.cos(np.deg2rad(rotation_degrees[2])) - sx = np.sin(np.deg2rad(rotation_degrees[0])) - sy = np.sin(np.deg2rad(rotation_degrees[1])) - sz = np.sin(np.deg2rad(rotation_degrees[2])) - - Rx = np.mat([ 1, 0, 0, - 0, cx,-sx, - 0, sx, cx]).reshape(3,3) - Ry = np.mat([ cy, 0, sy, - 0, 1, 0, - -sy, 0, cy]).reshape(3,3) - Rz = np.mat([ cz,-sz, 0, - sz, cz, 0, - 0, 0, 1]).reshape(3,3) - R = Rx*Ry*Rz - return R + cx = np.cos(np.deg2rad(rotation_degrees[0])) + cy = np.cos(np.deg2rad(rotation_degrees[1])) + cz = np.cos(np.deg2rad(rotation_degrees[2])) + sx = np.sin(np.deg2rad(rotation_degrees[0])) + sy = np.sin(np.deg2rad(rotation_degrees[1])) + sz = np.sin(np.deg2rad(rotation_degrees[2])) + + Rx = np.mat([1, 0, 0, + 0, cx, -sx, + 0, sx, cx]).reshape(3, 3) + Ry = np.mat([cy, 0, sy, + 0, 1, 0, + -sy, 0, cy]).reshape(3, 3) + Rz = np.mat([cz, -sz, 0, + sz, cz, 0, + 0, 0, 1]).reshape(3, 3) + R = Rx * Ry * Rz + return R + class Image(object): """ An Image is a single file taken by a RedEdge camera representing one band of multispectral information """ - def __init__(self, image_path, exiftool_obj=None): + + def __init__(self, image_path, exiftool_obj=None, allow_uncalibrated=False): if not os.path.isfile(image_path): raise IOError("Provided path is not a file: {}".format(image_path)) self.path = image_path @@ -70,16 +73,20 @@ def __init__(self, image_path, exiftool_obj=None): if self.meta.band_name() is None: raise ValueError("Provided file path does not have a band name: {}".format(image_path)) - if self.meta.band_name().upper() != 'LWIR' and not self.meta.supports_radiometric_calibration(): - raise ValueError('Library requires images taken with RedEdge-(3/M/MX) camera firmware v2.1.0 or later. ' + - 'Upgrade your camera firmware to at least version 2.1.0 to use this library with RedEdge-(3/M/MX) cameras.') - + if not allow_uncalibrated: + if self.meta.band_name().upper() != 'LWIR' and not self.meta.supports_radiometric_calibration(): + raise ValueError( + 'Library requires images taken with RedEdge-(3/M/MX) camera firmware v2.1.0 or later. ' + + 'Upgrade your camera firmware to at least version 2.1.0 to use this library with RedEdge-(3/M/MX) cameras.') + self.allow_uncalibrated = allow_uncalibrated self.utc_time = self.meta.utc_time() self.latitude, self.longitude, self.altitude = self.meta.position() self.location = (self.latitude, self.longitude, self.altitude) self.dls_present = self.meta.dls_present() self.dls_yaw, self.dls_pitch, self.dls_roll = self.meta.dls_pose() self.capture_id = self.meta.capture_id() + self.camera_model = self.meta.camera_model() + self.camera_serial = self.meta.camera_serial() self.flight_id = self.meta.flight_id() self.band_name = self.meta.band_name() self.band_index = self.meta.band_index() @@ -90,8 +97,13 @@ def __init__(self, image_path, exiftool_obj=None): self.gain = self.meta.gain() self.bits_per_pixel = self.meta.bits_per_pixel() + # vignette can be either a radial poly or a 2D polynomial self.vignette_center = self.meta.vignette_center() self.vignette_polynomial = self.meta.vignette_polynomial() + + self.vignette_polynomial2Dexponents = self.meta.vignette_polynomial2Dexponents() + self.vignette_polynomial2D = self.meta.vignette_polynomial2D() + self.distortion_parameters = self.meta.distortion_parameters() self.principal_point = self.meta.principal_point() self.focal_plane_resolution_px_per_mm = self.meta.focal_plane_resolution_px_per_mm() @@ -100,6 +112,7 @@ def __init__(self, image_path, exiftool_obj=None): self.center_wavelength = self.meta.center_wavelength() self.bandwidth = self.meta.bandwidth() self.rig_relatives = self.meta.rig_relatives() + self.rig_translations = self.meta.rig_translations() self.spectral_irradiance = self.meta.spectral_irradiance() self.auto_calibration_image = self.meta.auto_calibration_image() @@ -108,15 +121,15 @@ def __init__(self, image_path, exiftool_obj=None): self.panel_serial = self.meta.panel_serial() if self.dls_present: - self.dls_orientation_vector = np.array([0,0,-1]) + self.dls_orientation_vector = np.array([0, 0, -1]) self.sun_vector_ned, \ - self.sensor_vector_ned, \ - self.sun_sensor_angle, \ - self.solar_elevation, \ - self.solar_azimuth=dls.compute_sun_angle(self.location, - self.meta.dls_pose(), - self.utc_time, - self.dls_orientation_vector) + self.sensor_vector_ned, \ + self.sun_sensor_angle, \ + self.solar_elevation, \ + self.solar_azimuth = dls.compute_sun_angle(self.location, + self.meta.dls_pose(), + self.utc_time, + self.dls_orientation_vector) self.angular_correction = dls.fresnel(self.sun_sensor_angle) # when we have good horizontal irradiance the camera provides the solar az and el also @@ -129,63 +142,63 @@ def __init__(self, image_path, exiftool_obj=None): self.estimated_direct_vector = self.meta.estimated_direct_vector() if self.meta.horizontal_irradiance_valid(): self.horizontal_irradiance = self.meta.horizontal_irradiance() - else: + else: self.horizontal_irradiance = self.compute_horizontal_irradiance_dls2() else: - self.direct_to_diffuse_ratio = 6.0 # assumption + self.direct_to_diffuse_ratio = 6.0 # assumption self.horizontal_irradiance = self.compute_horizontal_irradiance_dls1() self.spectral_irradiance = self.meta.spectral_irradiance() - else: # no dls present or LWIR band: compute what we can, set the rest to 0 - self.dls_orientation_vector = np.array([0,0,-1]) + else: # no dls present or LWIR band: compute what we can, set the rest to 0 + self.dls_orientation_vector = np.array([0, 0, -1]) self.sun_vector_ned, \ - self.sensor_vector_ned, \ - self.sun_sensor_angle, \ - self.solar_elevation, \ - self.solar_azimuth=dls.compute_sun_angle(self.location, - (0,0,0), - self.utc_time, - self.dls_orientation_vector) + self.sensor_vector_ned, \ + self.sun_sensor_angle, \ + self.solar_elevation, \ + self.solar_azimuth = dls.compute_sun_angle(self.location, + (0, 0, 0), + self.utc_time, + self.dls_orientation_vector) self.angular_correction = dls.fresnel(self.sun_sensor_angle) self.horizontal_irradiance = 0 self.scattered_irradiance = 0 self.direct_irradiance = 0 self.direct_to_diffuse_ratio = 0 - + # Internal image containers; these can use a lot of memory, clear with Image.clear_images - self.__raw_image = None # pure raw pixels - self.__intensity_image = None # black level and gain-exposure/radiometric compensated - self.__radiance_image = None # calibrated to radiance - self.__reflectance_image = None # calibrated to reflectance (0-1) + self.__raw_image = None # pure raw pixels + self.__intensity_image = None # black level and gain-exposure/radiometric compensated + self.__radiance_image = None # calibrated to radiance + self.__reflectance_image = None # calibrated to reflectance (0-1) self.__reflectance_irradiance = None - self.__undistorted_source = None # can be any of raw, intensity, radiance - self.__undistorted_image = None # current undistorted image, depdining on source + self.__undistorted_source = None # can be any of raw, intensity, radiance + self.__undistorted_image = None # current undistorted image, depdining on source - # solar elevation is defined as the angle betwee the horizon and the sun, so it is 0 when the + # solar elevation is defined as the angle between the horizon and the sun, so it is 0 when the # sun is at the horizon and pi/2 when the sun is directly overhead def horizontal_irradiance_from_direct_scattered(self): return self.direct_irradiance * np.sin(self.solar_elevation) + self.scattered_irradiance def compute_horizontal_irradiance_dls1(self): - percent_diffuse = 1.0/self.direct_to_diffuse_ratio - #percent_diffuse = 5e4/(img.center_wavelength**2) + percent_diffuse = 1.0 / self.direct_to_diffuse_ratio + # percent_diffuse = 5e4/(img.center_wavelength**2) sensor_irradiance = self.spectral_irradiance / self.angular_correction # find direct irradiance in the plane normal to the sun untilted_direct_irr = sensor_irradiance / (percent_diffuse + np.cos(self.sun_sensor_angle)) self.direct_irradiance = untilted_direct_irr - self.scattered_irradiance = untilted_direct_irr*percent_diffuse + self.scattered_irradiance = untilted_direct_irr * percent_diffuse # compute irradiance on the ground using the solar altitude angle return self.horizontal_irradiance_from_direct_scattered() - + def compute_horizontal_irradiance_dls2(self): - ''' Compute the proper solar elevation, solar azimuth, and horizontal irradiance - for cases where the camera system did not do it correctly ''' - _,_,_, \ - self.solar_elevation, \ - self.solar_azimuth=dls.compute_sun_angle(self.location, - (0,0,0), - self.utc_time, - np.array([0,0,-1])) + """ Compute the proper solar elevation, solar azimuth, and horizontal irradiance + for cases where the camera system did not do it correctly """ + _, _, _, \ + self.solar_elevation, \ + self.solar_azimuth = dls.compute_sun_angle(self.location, + (0, 0, 0), + self.utc_time, + np.array([0, 0, -1])) return self.horizontal_irradiance_from_direct_scattered() def __lt__(self, other): @@ -196,47 +209,51 @@ def __gt__(self, other): def __eq__(self, other): return (self.band_index == other.band_index) and \ - (self.capture_id == other.capture_id) + (self.capture_id == other.capture_id) def __ne__(self, other): return (self.band_index != other.band_index) or \ - (self.capture_id != other.capture_id) + (self.capture_id != other.capture_id) def raw(self): - ''' Lazy load the raw image once neecessary ''' + """ Lazy load the raw image once necessary """ if self.__raw_image is None: try: import rawpy - self.__raw_image = rawpy.imread(self.path).raw_image + # to support 12-bit DNG files, otherwise we get "SIFT found no features" error + if self.bits_per_pixel == 12: + self.__raw_image = rawpy.imread(self.path).raw_image * 16 + else: + self.__raw_image = rawpy.imread(self.path).raw_image except ImportError: - self.__raw_image = cv2.imread(self.path,-1) + self.__raw_image = cv2.imread(self.path, -1) except IOError: - print("Could not open image at path {}".format(self.path)) + print(("Could not open image at path {}".format(self.path))) raise return self.__raw_image - - def set_raw(self,img): - ''' set raw image from input img''' + + def set_raw(self, img): + """ set raw image from input img""" self.__raw_image = img.astype(np.uint16) - - def set_undistorted(self,img): - ''' set undistorted image from input img''' + + def set_undistorted(self, img): + """ set undistorted image from input img""" self.__undistorted_image = img.astype(np.uint16) - - def set_external_rig_relatives(self,external_rig_relatives): + + def set_external_rig_relatives(self, external_rig_relatives): self.rig_translations = external_rig_relatives['rig_translations'] - #external rig relatives are in rad + # external rig relatives are in rad self.rig_relatives = [np.rad2deg(a) for a in external_rig_relatives['rig_relatives']] - px,py = external_rig_relatives['cx'],external_rig_relatives['cy'] - fx,fy = external_rig_relatives['fx'],external_rig_relatives['fy'] + px, py = external_rig_relatives['cx'], external_rig_relatives['cy'] + fx, fy = external_rig_relatives['fx'], external_rig_relatives['fy'] rx = self.focal_plane_resolution_px_per_mm[0] ry = self.focal_plane_resolution_px_per_mm[1] - self.principal_point = [px/rx,py/ry] - self.focal_length = (fx+fy)*.5/rx - #to do - set the distortion etc. + self.principal_point = [px / rx, py / ry] + self.focal_length = (fx + fy) * .5 / rx + # to do - set the distortion etc. def clear_image_data(self): - ''' clear all computed images to reduce memory overhead ''' + """ clear all computed images to reduce memory overhead """ self.__raw_image = None self.__intensity_image = None self.__radiance_image = None @@ -250,10 +267,10 @@ def size(self): return width, height def reflectance(self, irradiance=None, force_recompute=False): - ''' Lazy-compute and return a reflectance image provided an irradiance reference ''' + """ Lazy-compute and return a reflectance image provided an irradiance reference """ if self.__reflectance_image is not None \ - and force_recompute == False \ - and (self.__reflectance_irradiance == irradiance or irradiance == None): + and not force_recompute \ + and (self.__reflectance_irradiance == irradiance or irradiance is None): return self.__reflectance_image if irradiance is None and self.band_name != 'LWIR': if self.horizontal_irradiance != 0.0: @@ -268,10 +285,10 @@ def reflectance(self, irradiance=None, force_recompute=False): return self.__reflectance_image def intensity(self, force_recompute=False): - ''' Lazy=computes and returns the intensity image after black level, + """ Lazy=computes and returns the intensity image after black level, vignette, and row correction applied. - Intensity is in units of DN*Seconds without a radiance correction ''' - if self.__intensity_image is not None and force_recompute == False: + Intensity is in units of DN*Seconds without a radiance correction """ + if self.__intensity_image is not None and not force_recompute: return self.__intensity_image # get image dimensions @@ -285,22 +302,22 @@ def intensity(self, force_recompute=False): R = 1.0 / (1.0 + a2 * y / self.exposure_time - a3 * y) L = V * R * (image_raw - self.black_level) L[L < 0] = 0 - max_raw_dn = float(2**self.bits_per_pixel) - intensity_image = L.astype(float)/(self.gain * self.exposure_time * max_raw_dn) + max_raw_dn = float(2 ** self.bits_per_pixel) + intensity_image = L.astype(float) / (self.gain * self.exposure_time * max_raw_dn) self.__intensity_image = intensity_image.T return self.__intensity_image def radiance(self, force_recompute=False): - ''' Lazy=computes and returns the radiance image after all radiometric - corrections have been applied ''' - if self.__radiance_image is not None and force_recompute == False: + """ Lazy=computes and returns the radiance image after all radiometric + corrections have been applied """ + if self.__radiance_image is not None and not force_recompute: return self.__radiance_image # get image dimensions image_raw = np.copy(self.raw()).T - if(self.band_name != 'LWIR'): + if self.band_name != 'LWIR': # get radiometric calibration factors a1, a2, a3 = self.radiometric_cal[0], self.radiometric_cal[1], self.radiometric_cal[2] # apply image correction methods to raw image @@ -308,46 +325,60 @@ def radiance(self, force_recompute=False): R = 1.0 / (1.0 + a2 * y / self.exposure_time - a3 * y) L = V * R * (image_raw - self.black_level) L[L < 0] = 0 - max_raw_dn = float(2**self.bits_per_pixel) - radiance_image = L.astype(float)/(self.gain * self.exposure_time)*a1/max_raw_dn + max_raw_dn = float(2 ** self.bits_per_pixel) + radiance_image = L.astype(float) / (self.gain * self.exposure_time) * a1 / max_raw_dn else: - L = image_raw - (273.15*100.0) # convert to C from K + L = image_raw - (273.15 * 100.0) # convert to C from K radiance_image = L.astype(float) * 0.01 self.__radiance_image = radiance_image.T return self.__radiance_image def vignette(self): - ''' Get a numpy array which defines the value to multiply each pixel by to correct + """ Get a numpy array which defines the value to multiply each pixel by to correct for optical vignetting effects. Note: this array is transposed from normal image orientation and comes as part of a three-tuple, the other parts of which are also used by the radiance method. - ''' - # get vignette center - vignette_center_x, vignette_center_y = self.vignette_center - - # get a copy of the vignette polynomial because we want to modify it here - v_poly_list = list(self.vignette_polynomial) - - # reverse list and append 1., so that we can call with numpy polyval - v_poly_list.reverse() - v_poly_list.append(1.) - v_polynomial = np.array(v_poly_list) - - # perform vignette correction - # get coordinate grid across image, seem swapped because of transposed vignette + """ x_dim, y_dim = self.raw().shape[1], self.raw().shape[0] + # get coordinate grid across image, seem swapped because of transposed vignette x, y = np.meshgrid(np.arange(x_dim), np.arange(y_dim)) - - #meshgrid returns transposed arrays + # meshgrid returns transposed arrays x = x.T y = y.T - - # compute matrix of distances from image center - r = np.hypot((x-vignette_center_x), (y-vignette_center_y)) - - # compute the vignette polynomial for each distance - we divide by the polynomial so that the - # corrected image is image_corrected = image_original * vignetteCorrection - vignette = 1./np.polyval(v_polynomial, r) + # if we have a radial poly + if len(self.vignette_center) > 0: + + # get vignette center + + vignette_center_x, vignette_center_y = self.vignette_center + + # get a copy of the vignette polynomial because we want to modify it here + v_poly_list = list(self.vignette_polynomial) + + # reverse list and append 1., so that we can call with numpy polyval + v_poly_list.reverse() + v_poly_list.append(1.) + v_polynomial = np.array(v_poly_list) + + # perform vignette correction + + # compute matrix of distances from image center + r = np.hypot((x - vignette_center_x), (y - vignette_center_y)) + + # compute the vignette polynomial for each distance - we divide by the polynomial so that the + # corrected image is image_corrected = image_original * vignetteCorrection + vignette = 1. / np.polyval(v_polynomial, r) + elif len(self.vignette_polynomial2D) > 0: + xv = x.T / x_dim + yv = y.T / y_dim + k = self.vignette_polynomial2D + e = self.vignette_polynomial2Dexponents + p2 = np.zeros_like(xv, dtype=np.float) + for i, c in enumerate(k): + ex = e[2 * i] + ey = e[2 * i + 1] + p2 += c * xv ** ex * yv ** ey + vignette = (1. / p2).T return vignette, x, y def undistorted_radiance(self, force_recompute=False): @@ -360,14 +391,14 @@ def plottable_vignette(self): return self.vignette()[0].T def cv2_distortion_coeff(self): - #dist_coeffs = np.array(k[0],k[1],p[0],p[1],k[2]]) + # dist_coeffs = np.array(k[0],k[1],p[0],p[1],k[2]]) return np.array(self.distortion_parameters)[[0, 1, 3, 4, 2]] # values in pp are in [mm], rescale to pixels def principal_point_px(self): center_x = self.principal_point[0] * self.focal_plane_resolution_px_per_mm[0] center_y = self.principal_point[1] * self.focal_plane_resolution_px_per_mm[1] - return (center_x, center_y) + return center_x, center_y def cv2_camera_matrix(self): center_x, center_y = self.principal_point_px() @@ -384,16 +415,16 @@ def cv2_camera_matrix(self): return cam_mat def rig_xy_offset_in_px(self): - pixel_pitch_mm_x = 1.0/self.focal_plane_resolution_px_per_mm[0] - pixel_pitch_mm_y = 1.0/self.focal_plane_resolution_px_per_mm[1] - px_fov_x = 2.0 * math.atan2(pixel_pitch_mm_x/2.0, self.focal_length) - px_fov_y = 2.0 * math.atan2(pixel_pitch_mm_y/2.0, self.focal_length) + pixel_pitch_mm_x = 1.0 / self.focal_plane_resolution_px_per_mm[0] + pixel_pitch_mm_y = 1.0 / self.focal_plane_resolution_px_per_mm[1] + px_fov_x = 2.0 * math.atan2(pixel_pitch_mm_x / 2.0, self.focal_length) + px_fov_y = 2.0 * math.atan2(pixel_pitch_mm_y / 2.0, self.focal_length) t_x = math.radians(self.rig_relatives[0]) / px_fov_x t_y = math.radians(self.rig_relatives[1]) / px_fov_y - return (t_x, t_y) + return t_x, t_y def undistorted(self, image): - ''' return the undistorted image from input image ''' + """ return the undistorted image from input image """ # If we have already undistorted the same source, just return that here # otherwise, lazy compute the undstorted image if self.__undistorted_source is not None and image.data == self.__undistorted_source.data: @@ -406,81 +437,84 @@ def undistorted(self, image): self.size(), 1) map1, map2 = cv2.initUndistortRectifyMap(self.cv2_camera_matrix(), - self.cv2_distortion_coeff(), - np.eye(3), - new_cam_mat, - self.size(), - cv2.CV_32F) # cv2.CV_32F for 32 bit floats + self.cv2_distortion_coeff(), + np.eye(3), + new_cam_mat, + self.size(), + cv2.CV_32F) # cv2.CV_32F for 32 bit floats # compute the undistorted 16 bit image self.__undistorted_image = cv2.remap(image, map1, map2, cv2.INTER_LINEAR) return self.__undistorted_image def plot_raw(self, title=None, figsize=None): - ''' Create a single plot of the raw image ''' + """ Create a single plot of the raw image """ if title is None: title = '{} Band {} Raw DN'.format(self.band_name, self.band_index) return plotutils.plotwithcolorbar(self.raw(), title=title, figsize=figsize) def plot_intensity(self, title=None, figsize=None): - ''' Create a single plot of the image converted to uncalibrated intensity ''' + """ Create a single plot of the image converted to uncalibrated intensity """ if title is None: title = '{} Band {} Intensity (DN*sec)'.format(self.band_name, self.band_index) return plotutils.plotwithcolorbar(self.intensity(), title=title, figsize=figsize) - def plot_radiance(self, title=None, figsize=None): - ''' Create a single plot of the image converted to radiance ''' + """ Create a single plot of the image converted to radiance """ if title is None: title = '{} Band {} Radiance'.format(self.band_name, self.band_index) return plotutils.plotwithcolorbar(self.radiance(), title=title, figsize=figsize) def plot_vignette(self, title=None, figsize=None): - ''' Create a single plot of the vignette ''' + """ Create a single plot of the vignette """ if title is None: title = '{} Band {} Vignette'.format(self.band_name, self.band_index) return plotutils.plotwithcolorbar(self.plottable_vignette(), title=title, figsize=figsize) def plot_undistorted_radiance(self, title=None, figsize=None): - ''' Create a single plot of the undistorted radiance ''' + """ Create a single plot of the undistorted radiance """ if title is None: title = '{} Band {} Undistorted Radiance'.format(self.band_name, self.band_index) return plotutils.plotwithcolorbar(self.undistorted(self.radiance()), title=title, figsize=figsize) - def plot_all(self, figsize=(13,10)): + def plot_all(self, figsize=(13, 10)): plots = [self.raw(), self.plottable_vignette(), self.radiance(), self.undistorted(self.radiance())] plot_types = ['Raw', 'Vignette', 'Radiance', 'Undistorted Radiance'] titles = ['{} Band {} {}'.format(str(self.band_name), str(self.band_index), tpe) - for tpe in plot_types] + for tpe in plot_types] plotutils.subplotwithcolorbar(2, 2, plots, titles, figsize=figsize) - #get the homography that maps from this image to the reference image - def get_homography(self,ref,R=None,T=None): - #if we have externally supplied rotations/translations for the rig use these + # get the homography that maps from this image to the reference image + + def get_homography(self, ref, R=None, T=None): + # if we have externally supplied rotations/translations for the rig use these # otherwise use the rig-relatives intrinsic to the image if R is None: R = rotations_degrees_to_rotation_matrix(self.rig_relatives) if T is None: - T =np.zeros(3) + T = np.zeros(3) + # don't want to use this here + # if self.rig_translations is not None: + # T = np.array(self.rig_translations)-np.array(ref.rig_translations) R_ref = rotations_degrees_to_rotation_matrix(ref.rig_relatives) - A = np.zeros((4,4)) - A[0:3,0:3]=np.dot(R_ref.T,R) - A[0:3,3]=T - A[3,3]=1. + A = np.zeros((4, 4)) + A[0:3, 0:3] = np.dot(R_ref.T, R) + A[0:3, 3] = T + A[3, 3] = 1. C, _ = cv2.getOptimalNewCameraMatrix(self.cv2_camera_matrix(), self.cv2_distortion_coeff(), - self.size(),1) + self.size(), 1) Cr, _ = cv2.getOptimalNewCameraMatrix(ref.cv2_camera_matrix(), ref.cv2_distortion_coeff(), - ref.size(),1) - CC = np.zeros((4,4)) - CC[0:3,0:3] = C - CC[3,3]=1. - CCr = np.zeros((4,4)) - CCr[0:3,0:3] = Cr - CCr[3,3]=1. - - B = np.array(np.dot(CCr,np.dot(A,np.linalg.inv(CC)))) - B[:,2]=B[:,2]-B[:,3] - B = B[0:3,0:3] - B = B/B[2,2] + ref.size(), 1) + CC = np.zeros((4, 4)) + CC[0:3, 0:3] = C + CC[3, 3] = 1. + CCr = np.zeros((4, 4)) + CCr[0:3, 0:3] = Cr + CCr[3, 3] = 1. + + B = np.array(np.dot(CCr, np.dot(A, np.linalg.inv(CC)))) + B[:, 2] = B[:, 2] - B[:, 3] + B = B[0:3, 0:3] + B = B / B[2, 2] return np.array(B) diff --git a/micasense/imageset.py b/micasense/imageset.py index f9f02401..6b5a08d8 100644 --- a/micasense/imageset.py +++ b/micasense/imageset.py @@ -1,10 +1,12 @@ #!/usr/bin/env python # coding: utf-8 """ -MicaSense ImageSet Class +RedEdge Capture Class - An ImageSet contains a group of Captures. The Captures can be loaded from Image objects, from a list of files, - or by recursively searching a directory for images. + A Capture is a set of images taken by one RedEdge cameras which share + the same unique capture identifier. Generally these images will be + found in the same folder and also share the same filename prefix, such + as IMG_0000_*.tif, but this is not required Copyright 2017 MicaSense, Inc. @@ -25,148 +27,54 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - import fnmatch +import multiprocessing import os -import warnings -from concurrent.futures import ProcessPoolExecutor, as_completed -from functools import partial -from pprint import pprint import exiftool -from tqdm import tqdm import micasense.capture as capture import micasense.image as image - -warnings.simplefilter(action="once") +from micasense.imageutils import save_capture as save_capture -# FIXME: mirrors Capture.append_file(). Not used. Does this still belong here? def image_from_file(filename): return image.Image(filename) -def parallel_process(function, iterable, parameters, progress_callback=None, use_tqdm=False): +class ImageSet(object): """ - Multiprocessing Pool handler. - :param function: function used in multiprocessing call - :param iterable: iterable holding objects passed to function for each process - :param parameters: dict of any function parameters other than the iterable object - :param use_tqdm: boolean True to use tqdm progress bar - :param progress_callback: function to report progress to - :return: None + An ImageSet is a container for a group of captures that are processed together """ - with ProcessPoolExecutor() as pool: - # run multiprocessing - futures = [pool.submit(partial(function, parameters), i) for i in iterable] - - if use_tqdm: - # kwargs for tqdm - kwargs = { - 'total': len(futures), - 'unit': 'Capture', - 'unit_scale': False, - 'leave': True - } - - # Receive Future objects as they complete. Print out the progress as tasks complete - for _ in tqdm(iterable=as_completed(futures), desc='Processing ImageSet', **kwargs): - pass - elif progress_callback is not None: - futures_len = float(len(futures)) - for i, _ in enumerate(as_completed(futures)): - progress_callback(float(i) / futures_len) - - -def save_capture(params, cap): - """ - Process an ImageSet according to program parameters. Saves rgb - :param params: dict of program parameters from ImageSet.process_imageset() - :param cap: micasense.capture.Capture object - """ - try: - # align capture - if len(cap.images) == params['capture_len']: - cap.create_aligned_capture( - irradiance_list=params['irradiance'], - warp_matrices=params['warp_matrices'], - img_type=params['img_type'] - ) - else: - print(f"\tCapture {cap.uuid} only has {len(cap.images)} Images. Should have {params['capture_len']}. " - f"Skipping...") - return - - if params['output_stack_dir']: - output_stack_file_path = os.path.join(params['output_stack_dir'], cap.uuid + '.tif') - if params['overwrite'] or not os.path.exists(output_stack_file_path): - cap.save_capture_as_stack(output_stack_file_path) - if params['output_rgb_dir']: - output_rgb_file_path = os.path.join(params['output_rgb_dir'], cap.uuid + '.jpg') - if params['overwrite'] or not os.path.exists(output_rgb_file_path): - cap.save_capture_as_rgb(output_rgb_file_path) - - cap.clear_image_data() - except Exception as e: - print(e) - pprint(params) - quit() - - -class ImageSet(object): - """An ImageSet is a container for a group of Captures that are processed together.""" - def __init__(self, captures): self.captures = captures captures.sort() @classmethod - def from_directory(cls, directory, progress_callback=None, use_tqdm=False, exiftool_path=None): + def from_directory(cls, directory, progress_callback=None, exiftool_path=None, allow_uncalibrated=False): """ - Create an ImageSet recursively from the files in a directory. - :param directory: str system file path - :param progress_callback: function to report progress to - :param use_tqdm: boolean True to use tqdm progress bar - :param exiftool_path: str system file path to exiftool location - :return: ImageSet instance + Create and ImageSet recursively from the files in a directory """ - - # progress_callback deprecation warning - if progress_callback is not None: - warnings.warn(message='The progress_callback parameter will be deprecated in favor of use_tqdm', - category=PendingDeprecationWarning) - - # ensure exiftoolpath is found per MicaSense setup instructions - if exiftool_path is None and os.environ.get('exiftoolpath') is not None: - exiftool_path = os.path.normpath(os.environ.get('exiftoolpath')) - cls.basedir = directory matches = [] - for root, _, filenames in os.walk(directory): - [matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.tif')] + for root, dirnames, filenames in os.walk(directory): + for filename in fnmatch.filter(filenames, '*.tif'): + matches.append(os.path.join(root, filename)) images = [] + if exiftool_path is None and os.environ.get('exiftoolpath') is not None: + exiftool_path = os.path.normpath(os.environ.get('exiftoolpath')) + with exiftool.ExifTool(exiftool_path) as exift: - if use_tqdm: # to use tqdm progress bar instead of progress_callback - kwargs = { - 'total': len(matches), - 'unit': ' Files', - 'unit_scale': False, - 'leave': True - } - for path in tqdm(iterable=matches, desc='Loading ImageSet', **kwargs): - images.append(image.Image(path, exiftool_obj=exift)) - else: - print('Loading ImageSet from: {}'.format(directory)) - for i, path in enumerate(matches): - images.append(image.Image(path, exiftool_obj=exift)) - if progress_callback is not None: - progress_callback(float(i) / float(len(matches))) + for i, path in enumerate(matches): + images.append(image.Image(path, exiftool_obj=exift, allow_uncalibrated=allow_uncalibrated)) + if progress_callback is not None: + progress_callback(float(i) / float(len(matches))) - # create a dictionary to index the images so we can sort them into captures + # create a dictionary to index the images, so we can sort them + # into captures # { # "capture_id": [img1, img2, ...] # } @@ -221,78 +129,34 @@ def dls_irradiance(self): dat = cap.utc_time().isoformat() irr = cap.dls_irradiance() series[dat] = irr - return series - - def process_imageset(self, - output_stack_directory=None, - output_rgb_directory=None, - warp_matrices=None, - irradiance=None, - img_type=None, - multiprocess=True, - overwrite=False, - progress_callback=None, - use_tqdm=False): - """ - Write band stacks and rgb thumbnails to disk. - :param warp_matrices: 2d List of warp matrices derived from Capture.get_warp_matrices() - :param output_stack_directory: str system file path to output stack directory - :param output_rgb_directory: str system file path to output thumbnail directory - :param irradiance: List returned from Capture.dls_irradiance() or Capture.panel_irradiance() <-- TODO: Write a better docstring for this - :param img_type: str 'radiance' or 'reflectance'. Desired image output type. - :param multiprocess: boolean True to use multiprocessing module - :param overwrite: boolean True to overwrite existing files - :param progress_callback: function to report progress to - :param use_tqdm: boolean True to use tqdm progress bar - """ - - if progress_callback is not None: - warnings.warn(message='The progress_callback parameter will be deprecated in favor of use_tqdm', - category=PendingDeprecationWarning) - - # ensure some output is requested - if output_stack_directory is None and output_rgb_directory is None: - raise RuntimeError('No output requested for the ImageSet.') - - # make output dirs if not exist - if output_stack_directory is not None and not os.path.exists(output_stack_directory): - os.mkdir(output_stack_directory) - if output_rgb_directory is not None and not os.path.exists(output_rgb_directory): - os.mkdir(output_rgb_directory) - # processing parameters - params = { - 'warp_matrices': warp_matrices, - 'irradiance': irradiance, - 'img_type': img_type, - 'capture_len': len(self.captures[0].images), - 'output_stack_dir': output_stack_directory, - 'output_rgb_dir': output_rgb_directory, - 'overwrite': overwrite, - } + def save_stacks(self, warp_matrices, stack_directory, thumbnail_directory=None, irradiance=None, multiprocess=True, + overwrite=False, progress_callback=None): + + if not os.path.exists(stack_directory): + os.makedirs(stack_directory) + if thumbnail_directory is not None and not os.path.exists(thumbnail_directory): + os.makedirs(thumbnail_directory) + + save_params_list = [] + for local_capture in self.captures: + save_params_list.append({ + 'output_path': stack_directory, + 'thumbnail_path': thumbnail_directory, + 'file_list': [img.path for img in local_capture.images], + 'warp_matrices': warp_matrices, + 'irradiance_list': irradiance, + 'photometric': 'MINISBLACK', + 'overwrite_existing': overwrite, + }) - print('Processing {} Captures ...'.format(len(self.captures))) - - # multiprocessing with concurrent futures if multiprocess: - parallel_process(function=save_capture, iterable=self.captures, parameters=params, - progress_callback=progress_callback, use_tqdm=use_tqdm) - - # else serial processing + pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) + for i, _ in enumerate(pool.imap_unordered(save_capture, save_params_list)): + if progress_callback is not None: + progress_callback(float(i) / float(len(save_params_list))) + pool.close() + pool.join() else: - if use_tqdm: - kwargs = { - 'total': len(self.captures), - 'unit': 'Capture', - 'unit_scale': False, - 'leave': True - } - for cap in tqdm(iterable=self.captures, desc='Processing ImageSet', **kwargs): - save_capture(params, cap) - else: - for i, cap in enumerate(self.captures): - save_capture(params, cap) - if progress_callback is not None: - progress_callback(float(i) / float(len(self.captures))) - - print('Processing complete.') + for params in save_params_list: + save_capture(params) diff --git a/micasense/imageutils.py b/micasense/imageutils.py index 643c365b..0c88d4d1 100644 --- a/micasense/imageutils.py +++ b/micasense/imageutils.py @@ -23,55 +23,142 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import cv2 +import multiprocessing import os + +import cv2 +import exiftool import numpy as np -import multiprocessing -from skimage import exposure +from skimage.filters import gaussian, rank +from skimage.morphology import binary_closing from skimage.morphology import disk -from skimage.filters import rank, gaussian +from skimage.transform import warp from skimage.util import img_as_ubyte -def normalize(im, min=None, max=None): + +# start helper functions for finding a "hole"-free rectangle +def get_longest_sequence(b): + if b.any(): + d = np.diff(np.pad(b, 1)) + xS = np.where(d > 0)[0] + xE = np.where(d < 0)[0] + ix = np.argmax(xE - xS) + return (xE[ix] - xS[ix]), xS[ix], xE[ix] + else: + return 0, 0, 0 + + +def max_hist_rect(h): + if h.sum() > 0: + maxArea = 0 + xS = 0 + xE = 0 + vals = np.unique(np.sort(h))[::-1] + maxArea = 0 + for v in vals: + b = (h >= v) * 1 + if b.sum() * v > maxArea: + area, xs, xe = get_longest_sequence(b) + area *= v + if area > maxArea: + # print("{:d}: {:d}, {:d}, {:d}".format(v,area,xs,xe)) + maxArea = area + xS = xs + xE = xe + return maxArea, xS, xE + else: + return 0, 0, 0 + + +def findoptimalrect(overlap, nbands=5): + omap = (overlap == nbands) * 1 + w0 = 0 + h0 = 0 + x0 = 0 + y0 = 0 + a0 = 0 + h = np.zeros(omap.shape[1]) + hS = np.zeros_like(omap) + for y, o in enumerate(omap): + h += o + hS[y] = h + area, xS, xE = max_hist_rect(h) + if area > a0: + a0 = area + h0 = h[xS:xE].min() + w0 = xE - xS + y0 = y - h0 + x0 = xS + ur = (int(x0), int(y0)) + ll = (int(x0 + w0), int(y0 + h0)) + return ur, ll + + +def findoptimal_rect_noholes(overlap, nbands=5): + omap = (overlap == nbands) * 1 + b = [] + e = [] + w = [] + for o in omap: + ww, bb, ee = get_longest_sequence(o) + b.append(bb) + e.append(ee) + w.append(ww) + b = np.array(b) + e = np.array(e) + w = np.array(w) + area, y0, y1 = max_hist_rect(w) + x0 = b[y0:y1].max() + x1 = e[y0:y1].min() + return (x0, y0), (x1, y1) + + +# end helper functions for finding a "hole"-free rectangle +def normalize(im, local_min=None, local_max=None): width, height = im.shape norm = np.zeros((width, height), dtype=np.float32) - if min is not None and max is not None: - norm = (im - min) / (max-min) + if local_min is not None and local_max is not None: + norm = (im - local_min) / (local_max - local_min) else: cv2.normalize(im, dst=norm, alpha=0.0, beta=1.0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) - norm[norm<0.0] = 0.0 - norm[norm>1.0] = 1.0 + norm[norm < 0.0] = 0.0 + norm[norm > 1.0] = 1.0 return norm + def local_normalize(im): - norm = img_as_ubyte(normalize(im)) # TODO: mainly using this as a type conversion, but it's expensive + norm = img_as_ubyte(normalize(im)) # TODO: mainly using this as a type conversion, but it's expensive width, _ = im.shape - disksize = int(width/5) + disksize = int(width / 5) if disksize % 2 == 0: disksize = disksize + 1 - selem = disk(disksize) - norm2 = rank.equalize(norm, selem=selem) + footprint = disk(disksize) + norm2 = rank.equalize(norm, footprint=footprint) return norm2 + def gradient(im, ksize=5): im = local_normalize(im) # im = normalize(im) - grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=ksize) - grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=ksize) + grad_x = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=ksize) + grad_y = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=ksize) grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0) return grad + def relatives_ref_band(capture): for img in capture.images: - if img.rig_xy_offset_in_px() == (0,0): + if img.rig_xy_offset_in_px() == (0, 0): return img.band_index() - return (0) + return 0 + def translation_from_ref(capture, band, ref=4): - x,y = capture.images[band].rig_xy_offset_in_px() - rx,ry = capture.images[ref].rig_xy_offset_in_px() + x, y = capture.images[band].rig_xy_offset_in_px() + rx, ry = capture.images[ref].rig_xy_offset_in_px() return + def align(pair): """ Determine an alignment matrix between two images @input: @@ -109,20 +196,20 @@ def align(pair): warp_matrix = pair['warp_matrix_init'] else: # warp_matrix = np.array([[1,0,0],[0,1,0]], dtype=np.float32) - warp_matrix = np.array([[1,0,translations[1]],[0,1,translations[0]]], dtype=np.float32) + warp_matrix = np.array([[1, 0, translations[1]], [0, 1, translations[0]]], dtype=np.float32) w = pair['ref_image'].shape[1] if pair['pyramid_levels'] is None: - nol = int(w / (1280/3)) - 1 + nol = int(w / (1280 / 3)) - 1 else: nol = pair['pyramid_levels'] if pair['debug']: - print("number of pyramid levels: {}".format(nol)) + print(("number of pyramid levels: {}".format(nol))) - warp_matrix[0][2] /= (2**nol) - warp_matrix[1][2] /= (2**nol) + warp_matrix[0][2] /= (2 ** nol) + warp_matrix[1][2] /= (2 ** nol) if ref_index != match_index: @@ -131,23 +218,23 @@ def align(pair): gray1 = pair['ref_image'] gray2 = pair['match_image'] if gray2.shape[0] < gray1.shape[0]: - cv2.resize(gray2, None, fx=gray1.shape[0]/gray2.shape[0], fy=gray1.shape[0]/gray2.shape[0], - interpolation=cv2.INTER_AREA) + cv2.resize(gray2, None, fx=gray1.shape[0] / gray2.shape[0], fy=gray1.shape[0] / gray2.shape[0], + interpolation=cv2.INTER_AREA) gray1_pyr = [gray1] gray2_pyr = [gray2] for level in range(nol): gray1_pyr[0] = gaussian(normalize(gray1_pyr[0])) - gray1_pyr.insert(0, cv2.resize(gray1_pyr[0], None, fx=1/2, fy=1/2, - interpolation=cv2.INTER_AREA)) + gray1_pyr.insert(0, cv2.resize(gray1_pyr[0], None, fx=1 / 2, fy=1 / 2, + interpolation=cv2.INTER_AREA)) gray2_pyr[0] = gaussian(normalize(gray2_pyr[0])) - gray2_pyr.insert(0, cv2.resize(gray2_pyr[0], None, fx=1/2, fy=1/2, - interpolation=cv2.INTER_AREA)) + gray2_pyr.insert(0, cv2.resize(gray2_pyr[0], None, fx=1 / 2, fy=1 / 2, + interpolation=cv2.INTER_AREA)) # Terminate the optimizer if either the max iterations or the threshold are reached criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iterations, epsilon_threshold) # run pyramid ECC - for level in range(nol+1): + for level in range(nol + 1): grad1 = gradient(gray1_pyr[level]) grad2 = gradient(gray2_pyr[level]) @@ -157,82 +244,89 @@ def align(pair): plotutils.plotwithcolorbar(gray2_pyr[level], "match level {}".format(level)) plotutils.plotwithcolorbar(grad1, "ref grad level {}".format(level)) plotutils.plotwithcolorbar(grad2, "match grad level {}".format(level)) - print("Starting warp for level {} is:\n {}".format(level,warp_matrix)) + print(("Starting warp for level {} is:\n {}".format(level, warp_matrix))) try: - cc, warp_matrix = cv2.findTransformECC(grad1, grad2, warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1) + cc, warp_matrix = cv2.findTransformECC(grad1, grad2, warp_matrix, warp_mode, criteria, inputMask=None, + gaussFiltSize=1) except TypeError: cc, warp_matrix = cv2.findTransformECC(grad1, grad2, warp_matrix, warp_mode, criteria) - + if show_debug_images: - print("Warp after alignment level {} is \n{}".format(level,warp_matrix)) + print(("Warp after alignment level {} is \n{}".format(level, warp_matrix))) if level != nol: # scale up only the offset by a factor of 2 for the next (larger image) pyramid level if warp_mode == cv2.MOTION_HOMOGRAPHY: - warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) + warp_matrix = warp_matrix * np.array([[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32) else: - warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2]], dtype=np.float32) - - + warp_matrix = warp_matrix * np.array([[1, 1, 2], [1, 1, 2]], dtype=np.float32) return {'ref_index': pair['ref_index'], 'match_index': pair['match_index'], - 'warp_matrix': warp_matrix } + 'warp_matrix': warp_matrix} + def default_warp_matrix(warp_mode): if warp_mode == cv2.MOTION_HOMOGRAPHY: - return np.array([[1,0,0],[0,1,0],[0,0,1]], dtype=np.float32) + return np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32) else: - return np.array([[1,0,0],[0,1,0]], dtype=np.float32) + return np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32) -def align_capture(capture, ref_index=1, warp_mode=cv2.MOTION_HOMOGRAPHY, max_iterations=2500, epsilon_threshold=1e-9, multithreaded=True, debug=False, pyramid_levels = None): - '''Align images in a capture using openCV + +def align_capture(capture, ref_index=None, warp_mode=cv2.MOTION_HOMOGRAPHY, max_iterations=2500, epsilon_threshold=1e-9, + multithreaded=True, debug=False, pyramid_levels=None): + """Align images in a capture using openCV MOTION_TRANSLATION sets a translational motion model; warpMatrix is 2x3 with the first 2x2 part being the unity matrix and the rest two parameters being estimated. MOTION_EUCLIDEAN sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; warpMatrix is 2x3. MOTION_AFFINE sets an affine motion model (DEFAULT); six parameters are estimated; warpMatrix is 2x3. MOTION_HOMOGRAPHY sets a homography as a motion model; eight parameters are estimated;`warpMatrix` is 3x3. best results will be AFFINE and HOMOGRAPHY, at the expense of speed - ''' + """ + if ref_index is None: + if capture.camera_model == 'Altum' or capture.camera_model == 'RedEdge-M' or capture.camera_model == 'RedEdge': + ref_index = 1 + if capture.camera_model == 'RedEdge-P' or capture.camera_model == 'Altum-PT': + ref_index = 5 # Match other bands to this reference image (index into capture.images[]) ref_img = capture.images[ref_index].undistorted(capture.images[ref_index].radiance()).astype('float32') - + if capture.has_rig_relatives(): warp_matrices_init = capture.get_warp_matrices(ref_index=ref_index) else: - warp_matrices_init = [default_warp_matrix(warp_mode)]*len(capture.images) - + warp_matrices_init = [default_warp_matrix(warp_mode)] * len(capture.images) + alignment_pairs = [] - for i,img in enumerate(capture.images): + for i, img in enumerate(capture.images): if img.rig_relatives is not None: translations = img.rig_xy_offset_in_px() else: - translations = (0,0) + translations = (0, 0) if img.band_name != 'LWIR': alignment_pairs.append({'warp_mode': warp_mode, 'max_iterations': max_iterations, 'epsilon_threshold': epsilon_threshold, - 'ref_index':ref_index, + 'ref_index': ref_index, 'ref_image': ref_img, - 'match_index':i, - 'match_image':img.undistorted(img.radiance()).astype('float32'), + 'match_index': img.band_index, + 'match_image': img.undistorted(img.radiance()).astype('float32'), 'translations': translations, 'warp_matrix_init': np.array(warp_matrices_init[i], dtype=np.float32), 'debug': debug, 'pyramid_levels': pyramid_levels}) - warp_matrices = [None]*len(alignment_pairs) + warp_matrices = [None] * len(alignment_pairs) - #required to work across linux/mac/windows, see https://stackoverflow.com/questions/47852237 + # required to work across linux/mac/windows, see https://stackoverflow.com/questions/47852237 if multithreaded and multiprocessing.get_start_method() != 'spawn': try: - multiprocessing.set_start_method('spawn',force=True) + multiprocessing.set_start_method('spawn', force=True) except ValueError: multithreaded = False - if(multithreaded): + if multithreaded: pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) - for _,mat in enumerate(pool.imap_unordered(align, alignment_pairs)): + for _, mat in enumerate(pool.imap_unordered(align, alignment_pairs)): warp_matrices[mat['match_index']] = mat['warp_matrix'] - print("Finished aligning band {}".format(mat['match_index'])) + print(("Finished aligning band {}".format(mat['match_index']))) pool.close() pool.join() else: @@ -240,49 +334,52 @@ def align_capture(capture, ref_index=1, warp_mode=cv2.MOTION_HOMOGRAPHY, max_ite for pair in alignment_pairs: mat = align(pair) warp_matrices[mat['match_index']] = mat['warp_matrix'] - print("Finished aligning band {}".format(mat['match_index'])) + print(("Finished aligning band {}".format(mat['match_index']))) if capture.images[-1].band_name == 'LWIR': img = capture.images[-1] alignment_pairs.append({'warp_mode': warp_mode, 'max_iterations': max_iterations, 'epsilon_threshold': epsilon_threshold, - 'ref_index':ref_index, + 'ref_index': ref_index, 'ref_image': ref_img, - 'match_index':img.band_index, - 'match_image':img.undistorted(img.radiance()).astype('float32'), + 'match_index': img.band_index, + 'match_image': img.undistorted(img.radiance()).astype('float32'), 'translations': translations, 'debug': debug}) warp_matrices.append(capture.get_warp_matrices(ref_index)[-1]) return warp_matrices, alignment_pairs -#apply homography to create an aligned stack -def aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, match_index, img_type = 'reflectance',interpolation_mode=cv2.INTER_LANCZOS4): - width, height = capture.images[0].size() - im_aligned = np.zeros((height,width,len(warp_matrices)), dtype=np.float32 ) +# apply homography to create an aligned stack +def aligned_capture(capture, warp_matrices, warp_mode, cropped_dimensions, match_index, img_type='reflectance', + interpolation_mode=cv2.INTER_LANCZOS4): + width, height = capture.images[match_index].size() + + im_aligned = np.zeros((height, width, len(warp_matrices)), dtype=np.float32) - for i in range(0,len(warp_matrices)): + for i in range(0, len(warp_matrices)): if img_type == 'reflectance': img = capture.images[i].undistorted_reflectance() else: img = capture.images[i].undistorted_radiance() if warp_mode != cv2.MOTION_HOMOGRAPHY: - im_aligned[:,:,i] = cv2.warpAffine(img, - warp_matrices[i], - (width,height), - flags=interpolation_mode + cv2.WARP_INVERSE_MAP) + im_aligned[:, :, i] = cv2.warpAffine(img, + warp_matrices[i], + (width, height), + flags=interpolation_mode + cv2.WARP_INVERSE_MAP) else: - im_aligned[:,:,i] = cv2.warpPerspective(img, - warp_matrices[i], - (width,height), - flags=interpolation_mode + cv2.WARP_INVERSE_MAP) + im_aligned[:, :, i] = cv2.warpPerspective(img, + warp_matrices[i], + (width, height), + flags=interpolation_mode + cv2.WARP_INVERSE_MAP) (left, top, w, h) = tuple(int(i) for i in cropped_dimensions) - im_cropped = im_aligned[top:top+h, left:left+w][:] + im_cropped = im_aligned[top:top + h, left:left + w][:] return im_cropped + class BoundPoint(object): def __init__(self, x=0, y=0): self.x = x @@ -294,6 +391,7 @@ def __str__(self): def __repr__(self): return self.__str__() + class Bounds(object): def __init__(self): arbitrary_large_value = 100000000 @@ -306,7 +404,8 @@ def __str__(self): def __repr__(self): return self.__str__() -def find_crop_bounds(capture,registration_transforms,warp_mode=cv2.MOTION_HOMOGRAPHY): + +def find_crop_bounds(capture, registration_transforms, warp_mode=cv2.MOTION_HOMOGRAPHY, reference_band=0): """Compute the crop rectangle to be applied to a set of images after registration such that no pixel in the resulting stack of images will include a blank value for any of the bands @@ -323,34 +422,36 @@ def find_crop_bounds(capture,registration_transforms,warp_mode=cv2.MOTION_HOMOGR """ image_sizes = [image.size() for image in capture.images] lens_distortions = [image.cv2_distortion_coeff() for image in capture.images] - camera_matrices = [image.cv2_camera_matrix() for image in capture.images] - - bounds = [get_inner_rect(s, a, d, c,warp_mode=warp_mode)[0] for s, a, d, c in zip(image_sizes,registration_transforms, lens_distortions, camera_matrices)] - edges = [get_inner_rect(s, a, d, c,warp_mode=warp_mode)[1] for s, a, d, c in zip(image_sizes,registration_transforms, lens_distortions, camera_matrices)] - combined_bounds = get_combined_bounds(bounds, image_sizes[0]) + camera_matrices = [image.cv2_camera_matrix() for image in capture.images] + bounds = [get_inner_rect(s, a, d, c, warp_mode=warp_mode)[0] for s, a, d, c in + zip(image_sizes, registration_transforms, lens_distortions, camera_matrices)] + edges = [get_inner_rect(s, a, d, c, warp_mode=warp_mode)[1] for s, a, d, c in + zip(image_sizes, registration_transforms, lens_distortions, camera_matrices)] + combined_bounds = get_combined_bounds(bounds, image_sizes[reference_band]) left = np.ceil(combined_bounds.min.x) top = np.ceil(combined_bounds.min.y) width = np.floor(combined_bounds.max.x - combined_bounds.min.x) height = np.floor(combined_bounds.max.y - combined_bounds.min.y) - return (left, top, width, height),edges + return (left, top, width, height), edges -def get_inner_rect(image_size, affine, distortion_coeffs, camera_matrix,warp_mode=cv2.MOTION_HOMOGRAPHY): + +def get_inner_rect(image_size, affine, distortion_coeffs, camera_matrix, warp_mode=cv2.MOTION_HOMOGRAPHY): w = image_size[0] h = image_size[1] - left_edge = np.array([np.ones(h)*0, np.arange(0, h)]).T - right_edge = np.array([np.ones(h)*(w-1), np.arange(0, h)]).T - top_edge = np.array([np.arange(0, w), np.ones(w)*0]).T - bottom_edge = np.array([np.arange(0, w), np.ones(w)*(h-1)]).T + left_edge = np.array([np.ones(h) * 0, np.arange(0, h)]).T + right_edge = np.array([np.ones(h) * (w - 1), np.arange(0, h)]).T + top_edge = np.array([np.arange(0, w), np.ones(w) * 0]).T + bottom_edge = np.array([np.arange(0, w), np.ones(w) * (h - 1)]).T - left_map = map_points(left_edge, image_size, affine, distortion_coeffs, camera_matrix,warp_mode=warp_mode) + left_map = map_points(left_edge, image_size, affine, distortion_coeffs, camera_matrix, warp_mode=warp_mode) left_bounds = min_max(left_map) - right_map = map_points(right_edge, image_size, affine, distortion_coeffs, camera_matrix,warp_mode=warp_mode) + right_map = map_points(right_edge, image_size, affine, distortion_coeffs, camera_matrix, warp_mode=warp_mode) right_bounds = min_max(right_map) - top_map = map_points(top_edge, image_size, affine, distortion_coeffs, camera_matrix,warp_mode=warp_mode) + top_map = map_points(top_edge, image_size, affine, distortion_coeffs, camera_matrix, warp_mode=warp_mode) top_bounds = min_max(top_map) - bottom_map = map_points(bottom_edge, image_size, affine, distortion_coeffs, camera_matrix,warp_mode=warp_mode) + bottom_map = map_points(bottom_edge, image_size, affine, distortion_coeffs, camera_matrix, warp_mode=warp_mode) bottom_bounds = min_max(bottom_map) bounds = Bounds() @@ -358,8 +459,9 @@ def get_inner_rect(image_size, affine, distortion_coeffs, camera_matrix,warp_mod bounds.max.y = bottom_bounds.min.y bounds.min.x = left_bounds.max.x bounds.min.y = top_bounds.max.y - edges = (left_map,right_map,top_map,bottom_map) - return bounds,edges + edges = (left_map, right_map, top_map, bottom_map) + return bounds, edges + def get_combined_bounds(bounds, image_size): w = image_size[0] @@ -380,8 +482,8 @@ def get_combined_bounds(bounds, image_size): # limit to image size final.min.x = max(final.min.x, 0) final.min.y = max(final.min.y, 0) - final.max.x = min(final.max.x, w-1) - final.max.y = min(final.max.y, h-1) + final.max.x = min(final.max.x, w - 1) + final.max.y = min(final.max.y, h - 1) # Add 1 px of margin (remove one pixel on all sides) final.min.x += 1 final.min.y += 1 @@ -390,6 +492,7 @@ def get_combined_bounds(bounds, image_size): return final + def min_max(pts): bounds = Bounds() for p in pts: @@ -403,7 +506,8 @@ def min_max(pts): bounds.min.y = p[1] return bounds -def map_points(pts, image_size, warpMatrix, distortion_coeffs, camera_matrix,warp_mode=cv2.MOTION_HOMOGRAPHY): + +def map_points(pts, image_size, warpMatrix, distortion_coeffs, camera_matrix, warp_mode=cv2.MOTION_HOMOGRAPHY): # extra dimension makes opencv happy pts = np.array([pts], dtype=np.float) new_cam_mat, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, distortion_coeffs, image_size, 1) @@ -411,10 +515,187 @@ def map_points(pts, image_size, warpMatrix, distortion_coeffs, camera_matrix,war if warp_mode == cv2.MOTION_AFFINE: new_pts = cv2.transform(new_pts, cv2.invertAffineTransform(warpMatrix)) if warp_mode == cv2.MOTION_HOMOGRAPHY: - new_pts =cv2.perspectiveTransform(new_pts,np.linalg.inv(warpMatrix).astype(np.float32)) - #apparently the output order has changed in 4.1.1 (possibly earlier from 3.4.3) - if cv2.__version__<='3.4.4': + new_pts = cv2.perspectiveTransform(new_pts, np.linalg.inv(warpMatrix).astype(np.float32)) + # apparently the output order has changed in 4.1.1 (possibly earlier from 3.4.3) + if cv2.__version__ <= '3.4.4': return new_pts[0] else: - return new_pts[:,0,:] - + return new_pts[:, 0, :] + + +def save_capture(params): + import micasense.capture as capture + cap = capture.Capture.from_filelist(params['file_list']) + output_filename = cap.uuid + '.tif' + if os.path.exists(output_filename) and not params['overwrite_existing']: + return output_filename + + full_output_path = os.path.join(params['output_path'], output_filename) + cap.create_aligned_capture(irradiance_list=params['irradiance_list'], warp_matrices=params['warp_matrices']) + cap.save_capture_as_stack(full_output_path, sort_by_wavelength=True, photometric=params['photometric']) + + if params['thumbnail_path'] is not None: + thumbnailFilename = cap.uuid + fullThumbnailPath = os.path.join(params['thumbnail_path'], thumbnailFilename) + rgb_band_indices = [cap.band_names_lower().index('red'), cap.band_names_lower().index('green'), + cap.band_names_lower().index('blue')] + cap.save_capture_as_rgb(fullThumbnailPath + '_rgb.jpg', rgb_band_indices=rgb_band_indices, + gamma=1.8) # original indices, not sorted + + return output_filename + + +def brovey_pan_sharpen(thecapture, weights=None): + # https://desktop.arcgis.com/en/arcmap/10.3/manage-data/raster-and-images/fundamentals-of-panchromatic-sharpening.htm + + # note that this algorithm is the simplest possible pansharpening algorithm and NOT radiometrically correct + + if weights is None: + weights = [0.0725, 0.0745, 0.353, 0.070, 0.0296] + cam_model = thecapture.camera_model + if cam_model not in ('RedEdge-P', 'Altum-PT'): + raise Exception("Pan-sharpening only works with RedEdge-P and Altum-PT") + blue = input_stack[:, :, 0] + green = input_stack[:, :, 1] + red = input_stack[:, :, 2] + rededge = input_stack[:, :, 4] + nir = input_stack[:, :, 3] + panchro = input_stack[:, :, 5] + output_band_count = input_stack.shape[2] - 1 + output_stack = np.zeros((input_stack.shape[0], input_stack.shape[1], output_band_count), dtype=np.float32) + spectral_weight = np.zeros_like(panchro) + for band in range(0, 5): + spectral_weight += input_stack[:, :, band] * weights[band] + # avoid division by zero for some pixels + spectral_weight[spectral_weight < 1e-9] = 1. + + PW = input_stack[:, :, 5] / spectral_weight + for band in range(0, 5): + output_stack[:, :, band] = input_stack[:, :, band] * PW + if cam_model == 'Altum-PT': + # output_band_count = output_band_count + output_stack[:, :, 5] = input_stack[:, :, 6] + return output_stack + else: + raise Exception("Pan-sharpening only works with RedEdge-P and Altum-PT") + + +def radiometric_pan_sharpen(capture, warp_matrices=None, panchro_band=5, irradiance_list=None): + # this function performs a radiometrically accurate pansharpening on the input capture + # if no warp matrices are supplied to align the multispec images to the pan band + # the camera calibration is used (which produces reasonably well aligned imagers in most cases) + # in addition to the pan sharpened stack, the equivalent upsampled stack is also produced + # for comparison + # use the warp matrices we have for the stack, if not user supplied + if warp_matrices is None: + print("No SIFT warp matrices provided.") + warp_matrices = capture.get_warp_matrices(ref_index=panchro_band) + h, w = capture.images[panchro_band].raw().shape + if irradiance_list is None: + pan = capture.images[panchro_band].undistorted_radiance() + else: + capture.compute_undistorted_reflectance(irradiance_list=irradiance_list) + pan = capture.images[panchro_band].undistorted_reflectance() + + # we need this , because the panchro band doesn't necessarily fully cover the multispec bands + pan_mask = binary_closing(pan > 1e-4) + if 'LWIR' in capture.band_names(): + sigma = 12 / 3008 * h + panf = gaussian(pan, sigma) + pan[pan < 1e-4] = 1.0 + pansharpened_stack = [] + upsampled_stack = [] + overlap = np.zeros_like(pan) + for i, wm in zip(capture.images, warp_matrices): + if irradiance_list is None: + img = i.undistorted_radiance() + else: + img = i.undistorted_reflectance() + hLow, wLow = img.shape + if i.band_index != panchro_band: + if i.band_name == 'LWIR': + pLow = warp(panf, np.linalg.inv(wm), output_shape=(hLow, wLow), preserve_range=True) + else: + pLow = warp(pan, np.linalg.inv(wm), output_shape=(hLow, wLow), preserve_range=True) + panMaskLow = warp(pan_mask, np.linalg.inv(wm), output_shape=(hLow, wLow)) + pLow[pLow <= 1e-4] = 1.0 + if irradiance_list is None: + mask = binary_closing(i.undistorted_radiance() > 1e-4) + r = i.undistorted_radiance() / pLow + else: + mask = binary_closing(i.undistorted_reflectance() > 1e-4) + r = i.undistorted_reflectance() / pLow + if i.band_name == 'LWIR': + H = warp(r, wm, output_shape=(h, w)) * panf + else: + H = warp(r, wm, output_shape=(h, w)) * pan + if irradiance_list is None: + U = warp(i.undistorted_radiance(), wm, output_shape=(h, w), preserve_range=True) + else: + U = warp(i.undistorted_reflectance(), wm, output_shape=(h, w), preserve_range=True) + overlap += warp(mask * panMaskLow, wm, output_shape=(h, w)) + pansharpened_stack.append(H) + upsampled_stack.append(U) + else: + overlap += (pan_mask > 0) + pansharpened_stack.append(pan) + upsampled_stack.append(pan) + + N = len(capture.images) + (x0, y0), (x1, y1) = findoptimal_rect_noholes(overlap, nbands=N) + pansharpened_stack = np.moveaxis(np.array(pansharpened_stack), 0, 2)[y0 + 1:y1 - 1, x0 + 1:x1 - 1, :] + upsampled_stack = np.moveaxis(np.array(upsampled_stack), 0, 2)[y0 + 1:y1 - 1, x0 + 1:x1 - 1, :] + + return pansharpened_stack, upsampled_stack + + +# if writing exif for multiple capture sets, it is faster to do it all at the end of the +# tiff creation instead of writing the exif data as each tiff is created +def prepare_exif_for_stacks(thecapture, thefilename): + lat, lon, alt = thecapture.location() + resolution = thecapture.images[0].focal_plane_resolution_px_per_mm + # eventually it would be nice to add the capture ID, flight ID, + # and yaw/pitch/roll, but these are non-standard exif tags, + # so it is more difficult. + attitude = thecapture.dls_pose() + theid = thecapture.uuid + flightid = thecapture.flightid + focallength = thecapture.focal_length() + latdir = 'N' + if lat < 0: + latdir = 'S' + londir = 'E' + if lon < 0: + londir = 'W' + exif_data = [{"Capture ID": theid}, {"Filename": thefilename}, {"Model": str(thecapture.camera_model)}, + {"GPSDateStamp": thecapture.utc_time().strftime("%Y:%m:%d")}, + {"GPSTimeStamp": thecapture.utc_time().strftime("%H:%M:%S.%f")}, {"GPSLatitude": str(lat)}, + {"GpsLatitudeRef": latdir}, {"GPSLongitude": str(lon)}, {"GPSLongitudeRef": londir}, + {"GPSAltitude": str(alt) + " m Above Sea Level"}, {"GPSAltitudeRef": "0"}, + {"FocalLength": str(focallength)}, {"XResolution": str(resolution[0])}, + {"YResolution": str(resolution[1])}, {"ResolutionUnits": "mm"}] + return exif_data + + +def write_exif_to_stack(thecapture=None, thefilename=None, existing_exif_list=None): + # without the overwrite_original flag, twice as many stacks will be created + overwrite = str.encode("-overwrite_original") + if thecapture and thefilename: + exif_data = prepare_exif_for_stacks(thecapture, thefilename) + elif existing_exif_list: + exif_data = existing_exif_list + else: + raise Exception( + "Please provide an existing capture object and filename or a list of existing exif data for batch processing") + exif_bytes_list = [] + for exif in exif_data: + for key, val in exif.items(): + if key != 'Capture ID' and key != 'Filename': + new_value = str.encode("-" + key + "=" + val) + exif_bytes_list.append(new_value) + if key == 'Filename': + thefilename = val + thefilename = str.encode(thefilename) + + with exiftool.ExifTool() as et: + et.execute(*exif_bytes_list, overwrite, thefilename) diff --git a/micasense/metadata.py b/micasense/metadata.py index fbffb249..051b8350 100644 --- a/micasense/metadata.py +++ b/micasense/metadata.py @@ -25,20 +25,23 @@ # Support strings in Python 2 and 3 from __future__ import unicode_literals -import exiftool +import math +import os from datetime import datetime, timedelta + +import exiftool import pytz -import os -import math + class Metadata(object): - ''' Container for Micasense image metadata''' - def __init__(self, filename, exiftoolPath=None, exiftool_obj=None): + """ Container for Micasense image metadata""" + + def __init__(self, filename, exiftool_path=None, exiftool_obj=None): if exiftool_obj is not None: self.exif = exiftool_obj.get_metadata(filename) return - if exiftoolPath is not None: - self.exiftoolPath = exiftoolPath + if exiftool_path is not None: + self.exiftoolPath = exiftool_path elif os.environ.get('exiftoolpath') is not None: self.exiftoolPath = os.path.normpath(os.environ.get('exiftoolpath')) else: @@ -49,26 +52,25 @@ def __init__(self, filename, exiftoolPath=None, exiftool_obj=None): self.exif = exift.get_metadata(filename) def get_all(self): - ''' Get all extracted metadata items ''' + """ Get all extracted metadata items """ return self.exif def get_item(self, item, index=None): - ''' Get metadata item by Namespace:Parameter''' + """ Get metadata item by Namespace:Parameter""" val = None try: val = self.exif[item] if index is not None: try: if isinstance(val, unicode): - val = val.encode('ascii','ignore') + val = val.encode('ascii', 'ignore') except NameError: - #throws on python 3 where unicode is undefined + # throws on python 3 where unicode is undefined pass - if isinstance(val,str) and len(val.split(',')) > 1: + if isinstance(val, str) and len(val.split(',')) > 1: val = val.split(',') val = val[index] except KeyError: - #print ("Item "+item+" not found") pass except IndexError: print("Item {0} is length {1}, index {2} is outside this range.".format( @@ -78,15 +80,15 @@ def get_item(self, item, index=None): return val def size(self, item): - '''get the size (length) of a metadata item''' + """get the size (length) of a metadata item""" val = self.get_item(item) try: if isinstance(val, unicode): - val = val.encode('ascii','ignore') + val = val.encode('ascii', 'ignore') except NameError: - #throws on python 3 where unicode is undefined + # throws on python 3 where unicode is undefined pass - if isinstance(val,str) and len(val.split(',')) > 1: + if isinstance(val, str) and len(val.split(',')) > 1: val = val.split(',') if val is not None: return len(val) @@ -99,33 +101,33 @@ def print_all(self): def dls_present(self): return self.get_item("XMP:Irradiance") is not None \ - or self.get_item("XMP:HorizontalIrradiance") is not None \ - or self.get_item("XMP:DirectIrradiance") is not None + or self.get_item("XMP:HorizontalIrradiance") is not None \ + or self.get_item("XMP:DirectIrradiance") is not None def supports_radiometric_calibration(self): - if(self.get_item('XMP:RadiometricCalibration')) is None: + if (self.get_item('XMP:RadiometricCalibration')) is None: return False return True def position(self): - '''get the WGS-84 latitude, longitude tuple as signed decimal degrees''' + """get the WGS-84 latitude, longitude tuple as signed decimal degrees""" lat = self.get_item('EXIF:GPSLatitude') latref = self.get_item('EXIF:GPSLatitudeRef') - if latref=='S': + if latref == 'S': lat *= -1.0 lon = self.get_item('EXIF:GPSLongitude') lonref = self.get_item('EXIF:GPSLongitudeRef') - if lonref=='W': + if lonref == 'W': lon *= -1.0 alt = self.get_item('EXIF:GPSAltitude') return lat, lon, alt def utc_time(self): - ''' Get the timezone-aware datetime of the image capture ''' + """ Get the timezone-aware datetime of the image capture """ str_time = self.get_item('EXIF:DateTimeOriginal') - if str_time is not None: + if str_time: utc_time = datetime.strptime(str_time, "%Y:%m:%d %H:%M:%S") - subsec = int(self.get_item('EXIF:SubSecTime')) + subsec = float(f"0.{self.get_item('EXIF:SubSecTime')}") negative = 1.0 if subsec < 0: negative = -1.0 @@ -133,7 +135,7 @@ def utc_time(self): subsec = float('0.{}'.format(int(subsec))) subsec *= negative ms = subsec * 1e3 - utc_time += timedelta(milliseconds = ms) + utc_time += timedelta(milliseconds=ms) timezone = pytz.timezone('UTC') utc_time = timezone.localize(utc_time) else: @@ -141,9 +143,9 @@ def utc_time(self): return utc_time def dls_pose(self): - ''' get DLS pose as local earth-fixed yaw, pitch, roll in radians ''' + """ get DLS pose as local earth-fixed yaw, pitch, roll in radians """ if self.get_item('XMP:Yaw') is not None: - yaw = float(self.get_item('XMP:Yaw')) # should be XMP.DLS.Yaw, but exiftool doesn't expose it that way + yaw = float(self.get_item('XMP:Yaw')) # should be XMP.DLS.Yaw, but exiftool doesn't expose it that way pitch = float(self.get_item('XMP:Pitch')) roll = float(self.get_item('XMP:Roll')) else: @@ -157,6 +159,13 @@ def rig_relatives(self): else: return None + def rig_translations(self): + if self.get_item('XMP:RigTranslations') is not None: + nelem = self.size('XMP:RigTranslations') + return [float(self.get_item('XMP:RigTranslations', i)) for i in range(nelem)] + else: + return None + def capture_id(self): return self.get_item('XMP:CaptureId') @@ -169,6 +178,9 @@ def camera_make(self): def camera_model(self): return self.get_item('EXIF:Model') + def camera_serial(self): + return self.get_item('EXIF:SerialNumber') + def firmware_version(self): return self.get_item('EXIF:Software') @@ -182,12 +194,12 @@ def exposure(self): exp = self.get_item('EXIF:ExposureTime') # correct for incorrect exposure in some legacy RedEdge firmware versions if self.camera_model() != "Altum": - if math.fabs(exp-(1.0/6329.0)) < 1e-6: + if math.fabs(exp - (1.0 / 6329.0)) < 1e-6: exp = 0.000274 return exp def gain(self): - return self.get_item('EXIF:ISOSpeed')/100.0 + return self.get_item('EXIF:ISOSpeed') / 100.0 def image_size(self): return self.get_item('EXIF:ImageWidth'), self.get_item('EXIF:ImageHeight') @@ -210,85 +222,103 @@ def black_level(self): num = len(black_lvl) for pixel in black_lvl: total += float(pixel) - return total/float(num) + return total / float(num) def dark_pixels(self): - ''' get the average of the optically covered pixel values + """ get the average of the optically covered pixel values Note: these pixels are raw, and have not been radiometrically corrected. Use the black_level() method for all - radiomentric calibrations ''' + radiomentric calibrations """ dark_pixels = self.get_item('XMP:DarkRowValue') total = 0.0 num = len(dark_pixels) for pixel in dark_pixels: total += float(pixel) - return total/float(num) + return total / float(num) def bits_per_pixel(self): - ''' get the number of bits per pixel, which defines the maximum digital number value in the image ''' + """ get the number of bits per pixel, which defines the maximum digital number value in the image """ return self.get_item('EXIF:BitsPerSample') def vignette_center(self): - ''' get the vignette center in X and Y image coordinates''' + """ get the vignette center in X and Y image coordinates""" nelem = self.size('XMP:VignettingCenter') return [float(self.get_item('XMP:VignettingCenter', i)) for i in range(nelem)] def vignette_polynomial(self): - ''' get the radial vignette polynomial in the order it's defined in the metadata''' + """ get the radial vignette polynomial in the order it's defined in the metadata""" nelem = self.size('XMP:VignettingPolynomial') return [float(self.get_item('XMP:VignettingPolynomial', i)) for i in range(nelem)] + def vignette_polynomial2Dexponents(self): + """ get exponents of the 2D polynomial """ + nelem = self.size('XMP:VignettingPolynomial2DName') + return [float(self.get_item('XMP:VignettingPolynomial2DName', i)) for i in range(nelem)] + + def vignette_polynomial2D(self): + """ get the 2D polynomial coefficients in the order it's defined in the metadata""" + nelem = self.size('XMP:VignettingPolynomial2D') + return [float(self.get_item('XMP:VignettingPolynomial2D', i)) for i in range(nelem)] + def distortion_parameters(self): nelem = self.size('XMP:PerspectiveDistortion') return [float(self.get_item('XMP:PerspectiveDistortion', i)) for i in range(nelem)] def principal_point(self): - return [float(item) for item in self.get_item('XMP:PrincipalPoint').split(',')] + if self.get_item('XMP:PrincipalPoint') is not None: + return [float(item) for item in self.get_item('XMP:PrincipalPoint').split(',')] + else: + return [0, 0] def focal_plane_resolution_px_per_mm(self): - fp_x_resolution = float(self.get_item('EXIF:FocalPlaneXResolution')) - fp_y_resolution = float(self.get_item('EXIF:FocalPlaneYResolution')) + if self.get_item('EXIF:FocalPlaneXResolution') is not None: + fp_x_resolution = float(self.get_item('EXIF:FocalPlaneXResolution')) + fp_y_resolution = float(self.get_item('EXIF:FocalPlaneYResolution')) + else: + fp_x_resolution, fp_y_resolution = 0, 0 return fp_x_resolution, fp_y_resolution def focal_length_mm(self): units = self.get_item('XMP:PerspectiveFocalLengthUnits') focal_length_mm = 0.0 - if units == 'mm': - focal_length_mm = float(self.get_item('XMP:PerspectiveFocalLength')) - else: - focal_length_px = float(self.get_item('XMP:PerspectiveFocalLength')) - focal_length_mm = focal_length_px / self.focal_plane_resolution_px_per_mm()[0] + if units is not None: + if units == 'mm': + focal_length_mm = float(self.get_item('XMP:PerspectiveFocalLength')) + else: + focal_length_px = float(self.get_item('XMP:PerspectiveFocalLength')) + focal_length_mm = focal_length_px / self.focal_plane_resolution_px_per_mm()[0] return focal_length_mm def focal_length_35_mm_eq(self): return float(self.get_item('Composite:FocalLength35efl')) - def __float_or_zero(self, str): - if str is not None: - return float(str) + @staticmethod + def __float_or_zero(val): + if val is not None: + return float(val) else: return 0.0 def irradiance_scale_factor(self): - ''' Get the calibration scale factor for the irradiance measurements in this image metadata. + """ Get the calibration scale factor for the irradiance measurements in this image metadata. Due to calibration differences between DLS1 and DLS2, we need to account for a scale factor change in their respective units. This scale factor is pulled from the image metadata, or, if - the metadata doesn't give us the scale, we assume one based on a known combination of tags''' + the metadata doesn't give us the scale, we assume one based on a known combination of tags""" if self.get_item('XMP:IrradianceScaleToSIUnits') is not None: - # the metadata contains the scale + # the metadata contains the scale scale_factor = self.__float_or_zero(self.get_item('XMP:IrradianceScaleToSIUnits')) - elif self.get_item('XMP:HorizontalIrradiance') is not None: + elif self.get_item('XMP:HorizontalIrradiance') is not None: # DLS2 but the metadata is missing the scale, assume 0.01 scale_factor = 0.01 else: # DLS1, so we use a scale of 1 scale_factor = 1.0 return scale_factor - + def horizontal_irradiance_valid(self): - ''' Defines if horizontal irradiance tag contains a value that can be trusted + """ Defines if horizontal irradiance tag contains a value that can be trusted some firmware versions had a bug whereby the direct and scattered irradiance were correct, - but the horizontal irradiance was calculated incorrectly ''' + but the horizontal irradiance was calculated incorrectly """ if self.get_item('XMP:HorizontalIrradiance') is None: return False from packaging import version @@ -297,69 +327,73 @@ def horizontal_irradiance_valid(self): good_version = "1.2.3" elif self.camera_model() == 'RedEdge' or self.camera_model() == 'RedEdge-M': good_version = "5.1.7" + elif self.camera_model() == 'RedEdge-P': + return True + elif self.camera_model() == 'Altum-PT': + return True else: raise ValueError("Camera model is required to be RedEdge or Altum, not {} ".format(self.camera_model())) return version.parse(version_string) >= version.parse(good_version) def spectral_irradiance(self): - ''' Raw spectral irradiance measured by an irradiance sensor. - Calibrated to W/m^2/nm using irradiance_scale_factor, but not corrected for angles ''' - return self.__float_or_zero(self.get_item('XMP:SpectralIrradiance'))*self.irradiance_scale_factor() + """ Raw spectral irradiance measured by an irradiance sensor. + Calibrated to W/m^2/nm using irradiance_scale_factor, but not corrected for angles """ + return self.__float_or_zero(self.get_item('XMP:SpectralIrradiance')) * self.irradiance_scale_factor() def horizontal_irradiance(self): - ''' Horizontal irradiance at the earth's surface below the DLS on the plane normal to the gravity - vector at the location (local flat plane spectral irradiance) ''' - return self.__float_or_zero(self.get_item('XMP:HorizontalIrradiance'))*self.irradiance_scale_factor() + """ Horizontal irradiance at the earth's surface below the DLS on the plane normal to the gravity + vector at the location (local flat plane spectral irradiance) """ + return self.__float_or_zero(self.get_item('XMP:HorizontalIrradiance')) * self.irradiance_scale_factor() def scattered_irradiance(self): - ''' scattered component of the spectral irradiance ''' - return self.__float_or_zero(self.get_item('XMP:ScatteredIrradiance'))*self.irradiance_scale_factor() + """ scattered component of the spectral irradiance """ + return self.__float_or_zero(self.get_item('XMP:ScatteredIrradiance')) * self.irradiance_scale_factor() def direct_irradiance(self): - ''' direct component of the spectral irradiance on a ploane normal to the vector towards the sun ''' - return self.__float_or_zero(self.get_item('XMP:DirectIrradiance'))*self.irradiance_scale_factor() + """ direct component of the spectral irradiance on a ploane normal to the vector towards the sun """ + return self.__float_or_zero(self.get_item('XMP:DirectIrradiance')) * self.irradiance_scale_factor() def solar_azimuth(self): - ''' solar azimuth at the time of capture, as calculated by the camera system ''' + """ solar azimuth at the time of capture, as calculated by the camera system """ return self.__float_or_zero(self.get_item('XMP:SolarAzimuth')) def solar_elevation(self): - ''' solar elevation at the time of capture, as calculated by the camera system ''' + """ solar elevation at the time of capture, as calculated by the camera system """ return self.__float_or_zero(self.get_item('XMP:SolarElevation')) def estimated_direct_vector(self): - ''' estimated direct light vector relative to the DLS2 reference frame''' + """ estimated direct light vector relative to the DLS2 reference frame""" if self.get_item('XMP:EstimatedDirectLightVector') is not None: return [self.__float_or_zero(item) for item in self.get_item('XMP:EstimatedDirectLightVector')] else: return None def auto_calibration_image(self): - ''' True if this image is an auto-calibration image, where the camera has found and idetified - a calibration panel ''' + """ True if this image is an auto-calibration image, where the camera has found and identified + a calibration panel """ cal_tag = self.get_item('XMP:CalibrationPicture') return cal_tag is not None and \ - cal_tag == 2 and \ - self.panel_albedo() is not None and \ - self.panel_region() is not None and \ - self.panel_serial() is not None + cal_tag == 2 and \ + self.panel_albedo() is not None and \ + self.panel_region() is not None and \ + self.panel_serial() is not None def panel_albedo(self): - ''' Surface albedo of the active portion of the reflectance panel as calculated by the camera - (usually from the informatoin in the panel QR code) ''' + """ Surface albedo of the active portion of the reflectance panel as calculated by the camera + (usually from the information in the panel QR code) """ albedo = self.get_item('XMP:Albedo') if albedo is not None: return self.__float_or_zero(albedo) return albedo def panel_region(self): - ''' A 4-tuple containing image x,y coordinates of the panel active area ''' + """ A 4-tuple containing image x,y coordinates of the panel active area """ if self.get_item('XMP:ReflectArea') is not None: coords = [int(item) for item in self.get_item('XMP:ReflectArea').split(',')] return list(zip(coords[0::2], coords[1::2])) else: return None - + def panel_serial(self): - ''' The panel serial number as extracted from the image by the camera ''' + """ The panel serial number as extracted from the image by the camera """ return self.get_item('XMP:PanelSerial') diff --git a/micasense/panel.py b/micasense/panel.py index 2c314afe..007a2068 100644 --- a/micasense/panel.py +++ b/micasense/panel.py @@ -24,18 +24,18 @@ """ import math -import numpy as np -import cv2 import re -import pyzbar.pyzbar as pyzbar -from skimage import measure +import cv2 import matplotlib.pyplot as plt -import micasense.imageutils as imageutils +import numpy as np +import pyzbar.pyzbar as pyzbar +from skimage import measure + class Panel(object): - def __init__(self, img,panelCorners=None,ignore_autocalibration=False): + def __init__(self, img, panel_corners=None, ignore_autocalibration=False): # if we have panel images with QR metadata, panel detection is not called, # so this can be forced here if img is None: @@ -45,12 +45,12 @@ def __init__(self, img,panelCorners=None,ignore_autocalibration=False): bias = img.radiance().min() scale = (img.radiance().max() - bias) self.gray8b = np.zeros(img.radiance().shape, dtype='uint8') - cv2.convertScaleAbs(img.undistorted(img.radiance()), self.gray8b, 256.0/scale, -1.0*scale*bias) - - if (self.image.auto_calibration_image) and ~ignore_autocalibration: - self.__panel_type = "auto" ## panels the camera found we call auto - if panelCorners is not None: - self.__panel_bounds = np.array(panelCorners) + cv2.convertScaleAbs(img.undistorted(img.radiance()), self.gray8b, 256.0 / scale, -1.0 * scale * bias) + + if self.image.auto_calibration_image and ~ignore_autocalibration: + self.__panel_type = "auto" # panels the camera found we call auto + if panel_corners is not None: + self.__panel_bounds = np.array(panel_corners) else: self.__panel_bounds = np.array(self.image.panel_region) self.panel_albedo = self.image.panel_albedo @@ -65,7 +65,7 @@ def __init__(self, img,panelCorners=None,ignore_autocalibration=False): self.serial = self.image.panel_serial self.panel_version = int(self.image.panel_serial[2:4]) else: - self.__panel_type = "search" ## panels we search for we call search + self.__panel_type = "search" # panels we search for we call search self.serial = None self.qr_area = None self.qr_bounds = None @@ -73,8 +73,8 @@ def __init__(self, img,panelCorners=None,ignore_autocalibration=False): self.saturated_panel_pixels_pct = None self.panel_pixels_mean = None self.panel_version = None - if panelCorners is not None: - self.__panel_bounds = np.array(panelCorners) + if panel_corners is not None: + self.__panel_bounds = np.array(panel_corners) else: self.__panel_bounds = None @@ -91,11 +91,9 @@ def __find_qr(self): self.panel_version = int(self.serial[2:4]) self.qr_bounds = [] for point in symbol.polygon: - self.qr_bounds.append([point.x,point.y]) + self.qr_bounds.append([point.x, point.y]) self.qr_bounds = np.asarray(self.qr_bounds, np.int32) self.qr_area = cv2.contourArea(self.qr_bounds) - # print (symbol.polygon) - # print (self.qr_bounds) break def __pt_in_image_bounds(self, pt): @@ -105,118 +103,111 @@ def __pt_in_image_bounds(self, pt): if pt[1] >= height or pt[1] < 0: return False return True - + def reflectance_from_panel_serial(self): if self.__panel_type == 'auto': return self.panel_albedo - + if self.serial is None: self.__find_qr() if self.serial is None: raise ValueError("Panel serial number not found") if self.panel_version >= 4: min_wl = float(self.serial[-14:-10]) - min_rf = float(self.serial[-10:-7])/1000.0 + min_rf = float(self.serial[-10:-7]) / 1000.0 max_wl = float(self.serial[-7:-3]) - max_rf = float(self.serial[-3:])/1000.0 - c = np.polyfit([min_wl,max_wl], [min_rf,max_rf], 1) + max_rf = float(self.serial[-3:]) / 1000.0 + c = np.polyfit([min_wl, max_wl], [min_rf, max_rf], 1) p = np.poly1d(c) return p(self.image.center_wavelength) else: return None + def get_panel_type(self): + print(self.__panel_type) + def qr_corners(self): if self.__panel_type == 'auto': return None - + if self.qr_bounds is None: self.__find_qr() return self.qr_bounds def panel_detected(self): - if self.__expect_panel() == False: + if not self.__expect_panel(): return False - + if self.__panel_type == 'auto': return True - + if self.serial is None: self.__find_qr() return self.qr_bounds is not None def panel_corners(self): """ get the corners of a panel region based on the qr code location - Our algorithm to do this uses a 'reference' qr code location and + Our algorithm to do this uses a 'reference' qr code location, and it's associate panel region. We find the affine transform between the reference qr and our qr, and apply that same transform to the reference panel region to find our panel region. Because of a limitation of the pyzbar library, the rotation of the absolute QR code isn't known, so we then try all 4 rotations and test against a cost function which is the - minimum of the standard devation divided by the mean value for the panel region""" + minimum of the standard deviation divided by the mean value for the panel region""" if self.__panel_bounds is not None: return self.__panel_bounds if self.serial is None: self.__find_qr() - if self.serial is None: # didn't find a panel in this image + if self.serial is None: # didn't find a panel in this image return None - + if self.panel_version < 3: - # reference_panel_pts = np.asarray([[894, 469], [868, 232], [630, 258], [656, 496]], - # dtype=np.int32) - # reference_qr_pts = np.asarray([[898, 748], [880, 567], [701, 584], [718, 762]], - # dtype=np.int32) - # use the actual panel measures here - we use units of [mm] # the panel is 154.4 x 152.4 mm , vs. the 84 x 84 mm for the QR code # it is left 143.20 mm from the QR code # use the inner 50% square of the panel s = 76.2 p = 42 - T = np.array([-143.2,0]) - - elif (self.panel_version >= 3) and (self.panel_version<6): + T = np.array([-143.2, 0]) + + elif (self.panel_version >= 3) and (self.panel_version < 6): s = 50 p = 45 - T = np.array([-145.8,0]) - # reference_panel_pts = np.asarray([[557, 350], [550, 480], [695, 480], [700, 350]], dtype=np.int32) - # reference_qr_pts = np.asarray([[821, 324], [819, 506], [996, 509], [999, 330]], dtype=np.int32) - elif self.panel_version >= 6 : + T = np.array([-145.8, 0]) + elif self.panel_version >= 6: # use the actual panel measures here - we use units of [mm] # the panel is 100 x 100 mm , vs. the 91 x 91 mm for the QR code # it is down 125.94 mm from the QR code # use the inner 50% square of the panel p = 41 s = 50 - T = np.array([0,-130.84]) - - - reference_panel_pts = np.asarray([[-s, s], [s, s], [s, -s], [-s, -s]], dtype=np.float32)*.5+T + T = np.array([0, -130.84]) + + reference_panel_pts = np.asarray([[-s, s], [s, s], [s, -s], [-s, -s]], dtype=np.float32) * .5 + T reference_qr_pts = np.asarray([[-p, p], [p, p], [p, -p], [-p, -p]], dtype=np.float32) bounds = [] costs = [] - for rotation in range(0,4): + for rotation in range(0, 4): qr_points = np.roll(reference_qr_pts, rotation, axis=0) src = np.asarray([tuple(row) for row in qr_points[:]], np.float32) dst = np.asarray([tuple(row) for row in self.qr_corners()[:]], np.float32) - + # we determine the homography from the 4 corner points - warp_matrix = cv2.getPerspectiveTransform(src,dst) - - #warp_matrix = cv2.getAffineTransform(src, dst) + warp_matrix = cv2.getPerspectiveTransform(src, dst) pts = np.asarray([reference_panel_pts], 'float32') panel_bounds = cv2.convexHull(cv2.perspectiveTransform(pts, warp_matrix), clockwise=False) - panel_bounds = np.squeeze(panel_bounds) # remove nested lists - + panel_bounds = np.squeeze(panel_bounds) # remove nested lists + bounds_in_image = True for i, point in enumerate(panel_bounds): if not self.__pt_in_image_bounds(point): bounds_in_image = False if bounds_in_image: - mean, std, _, _ = self.region_stats(self.image.raw(),panel_bounds, sat_threshold=65000) + mean, std, _, _ = self.region_stats(self.image.raw(), panel_bounds, sat_threshold=65000) bounds.append(panel_bounds.astype(np.int32)) - costs.append(std/mean) + costs.append(std / mean) idx = costs.index(min(costs)) self.__panel_bounds = bounds[idx] @@ -242,12 +233,12 @@ def ordered_panel_coordinates(self): return [tuple(right_coords[1]), tuple(left_coords[1]), tuple(left_coords[0]), tuple(right_coords[0])] def region_stats(self, img, region, sat_threshold=None): - """Provide regional statistics for a image over a region + """Provide regional statistics for an image over a region Inputs: img is any image ndarray, region is a skimage shape Outputs: mean, std, count, and saturated count tuple for the region""" - rev_panel_pts = np.fliplr(region) #skimage and opencv coords are reversed + rev_panel_pts = np.fliplr(region) # skimage and opencv coords are reversed w, h = img.shape - mask = measure.grid_points_in_poly((w,h),rev_panel_pts) + mask = measure.grid_points_in_poly((w, h), rev_panel_pts) num_pixels = mask.sum() panel_pixels = img[mask] stdev = panel_pixels.std() @@ -255,30 +246,34 @@ def region_stats(self, img, region, sat_threshold=None): saturated_count = 0 if sat_threshold is not None: saturated_count = (panel_pixels > sat_threshold).sum() - #set saturated pixels here - if num_pixels>0: - self.saturated_panel_pixels_pct = (100.0*saturated_count)/num_pixels + # set saturated pixels here + if num_pixels > 0: + self.saturated_panel_pixels_pct = (100.0 * saturated_count) / num_pixels return mean_value, stdev, num_pixels, saturated_count - + def raw(self): raw_img = self.image.undistorted(self.image.raw()) return self.region_stats(raw_img, self.panel_corners(), sat_threshold=65000) + def intensity(self): intensity_img = self.image.undistorted(self.image.intensity()) return self.region_stats(intensity_img, self.panel_corners(), sat_threshold=65000) + def radiance(self): radiance_img = self.image.undistorted(self.image.radiance()) return self.region_stats(radiance_img, self.panel_corners()) - + def reflectance_mean(self): reflectance_image = self.image.reflectance() if reflectance_image is None: - print("First calculate the reflectance image by providing a\n band specific irradiance to the calling image.reflectance(irradiance)") + print( + "First calculate the reflectance image by providing a\n band specific irradiance to the calling " + "image.reflectance(irradiance)") mean, _, _, _ = self.region_stats(reflectance_image, self.panel_corners()) return mean @@ -288,24 +283,24 @@ def irradiance_mean(self, reflectance): return radiance_mean * math.pi / reflectance def plot_image(self): - display_img = cv2.cvtColor(self.gray8b,cv2.COLOR_GRAY2RGB) + display_img = cv2.cvtColor(self.gray8b, cv2.COLOR_GRAY2RGB) if self.panel_detected(): if self.qr_corners() is not None: - cv2.drawContours(display_img,[self.qr_corners()], 0, (255, 0, 0), 3) - cv2.drawContours(display_img,[self.panel_corners()], 0, (0,0, 255), 3) + cv2.drawContours(display_img, [self.qr_corners()], 0, (255, 0, 0), 3) + cv2.drawContours(display_img, [self.panel_corners()], 0, (0, 0, 255), 3) font = cv2.FONT_HERSHEY_DUPLEX if self.panel_detected(): if self.qr_corners() is not None: - xloc = self.qr_corners()[0][0]-100 - yloc = self.qr_corners()[0][1]+100 + xloc = self.qr_corners()[0][0] - 100 + yloc = self.qr_corners()[0][1] + 100 else: - xloc = self.panel_corners()[0][0]-100 - yloc = self.panel_corners()[0][1]+100 - cv2.putText(display_img, str(self.serial).split('_')[0], (xloc,yloc), font, 1, 255, 2) + xloc = self.panel_corners()[0][0] - 100 + yloc = self.panel_corners()[0][1] + 100 + cv2.putText(display_img, str(self.serial).split('_')[0], (xloc, yloc), font, 1, 255, 2) return display_img - def plot(self, figsize=(14,14)): + def plot(self, figsize=(14, 14)): display_img = self.plot_image() fig, ax = plt.subplots(figsize=figsize) ax.imshow(display_img) diff --git a/micasense/plotutils.py b/micasense/plotutils.py index 6b93c33e..62994e6b 100644 --- a/micasense/plotutils.py +++ b/micasense/plotutils.py @@ -24,12 +24,12 @@ """ import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import make_axes_locatable -from mpl_toolkits.mplot3d import Axes3D from matplotlib.pylab import cm +from mpl_toolkits.axes_grid1 import make_axes_locatable + def plotwithcolorbar(img, title=None, figsize=None, vmin=None, vmax=None): - ''' Plot an image with a colorbar ''' + """ Plot an image with a colorbar """ fig, axis = plt.subplots(1, 1, figsize=figsize) rad2 = axis.imshow(img, vmin=vmin, vmax=vmax) axis.set_title(title) @@ -40,12 +40,13 @@ def plotwithcolorbar(img, title=None, figsize=None, vmin=None, vmax=None): plt.show() return fig, axis + def subplotwithcolorbar(rows, cols, images, titles=None, figsize=None): - ''' Plot a set of images in subplots ''' - fig, axes = plt.subplots(rows, cols, figsize=figsize,squeeze=False) - for i in range(cols*rows): - column = int(i%cols) - row = int(i/cols) + """ Plot a set of images in subplots """ + fig, axes = plt.subplots(rows, cols, figsize=figsize, squeeze=False) + for i in range(cols * rows): + column = int(i % cols) + row = int(i / cols) if i < len(images): rad = axes[row][column].imshow(images[i]) if titles is not None: @@ -59,23 +60,26 @@ def subplotwithcolorbar(rows, cols, images, titles=None, figsize=None): plt.show() return fig, axes -def plot_overlay_withcolorbar(imgbase, imgcolor, title=None, figsize=None, vmin=None, vmax=None, overlay_alpha=1.0, overlay_colormap='viridis', overlay_steps=None, display_contours=False, contour_fmt=None, contour_steps=None, contour_alpha=None, show=True): - ''' Plot an image with a colorbar ''' + +def plot_overlay_withcolorbar(imgbase, imgcolor, title=None, figsize=None, vmin=None, vmax=None, overlay_alpha=1.0, + overlay_colormap='viridis', overlay_steps=None, display_contours=False, contour_fmt=None, + contour_steps=None, contour_alpha=None, show=True): + """ Plot an image with a colorbar """ fig, axis = plt.subplots(1, 1, figsize=figsize, squeeze=False) base = axis[0][0].imshow(imgbase) if overlay_steps is not None: - overlay_colormap = cm.get_cmap(overlay_colormap,overlay_steps) + overlay_colormap = cm.get_cmap(overlay_colormap, overlay_steps) rad2 = axis[0][0].imshow(imgcolor, vmin=vmin, vmax=vmax, alpha=overlay_alpha, cmap=overlay_colormap) if display_contours: if contour_steps is None: contour_steps = overlay_steps if contour_alpha is None: contour_alpha = overlay_alpha - contour_cmap = cm.get_cmap(overlay_colormap,contour_steps) - contour_list = np.arange(vmin, vmax, (vmax-vmin)/contour_steps) + contour_cmap = cm.get_cmap(overlay_colormap, contour_steps) + contour_list = np.arange(vmin, vmax, (vmax - vmin) / contour_steps) rad3 = axis[0][0].contour(imgcolor, contour_list, cmap=contour_cmap, alpha=contour_alpha) - fontsize=8+(max(figsize)/10)*2 - axis[0][0].clabel(rad3,rad3.levels,inline=True,fontsize=fontsize,fmt=contour_fmt) + fontsize = 8 + (max(figsize) / 10) * 2 + axis[0][0].clabel(rad3, rad3.levels, inline=True, fontsize=fontsize, fmt=contour_fmt) axis[0][0].set_title(title) divider = make_axes_locatable(axis[0][0]) cax = divider.append_axes("right", size="3%", pad=0.05) @@ -85,12 +89,13 @@ def plot_overlay_withcolorbar(imgbase, imgcolor, title=None, figsize=None, vmin= plt.show() return fig, axis[0][0] + def subplot(rows, cols, images, titles=None, figsize=None): - ''' Plot a set of images in subplots ''' + """ Plot a set of images in subplots """ fig, axes = plt.subplots(rows, cols, figsize=figsize, squeeze=False) - for i in range(cols*rows): - column = int(i%cols) - row = int(i/cols) + for i in range(cols * rows): + column = int(i % cols) + row = int(i / cols) if i < len(images): rad = axes[row][column].imshow(images[i]) if titles is not None: @@ -101,30 +106,34 @@ def subplot(rows, cols, images, titles=None, figsize=None): plt.show() return fig, axes + def colormap(cmap): - ''' Set the defalut plotting colormap + """ Set the defalut plotting colormap Could be one of 'gray, viridis, plasma, inferno, magma, nipy_spectral' - ''' + """ plt.set_cmap(cmap) + import numpy as np -def plot_ned_vector3d(x,y,z, u=0,v=0,w=0, title=None, figsize=(8,5)): - '''Create a 3d plot of a North-East-Down vector. XYZ is the (tip of the) vector, - uvw is the base location of the vector ''' + + +def plot_ned_vector3d(x, y, z, u=0, v=0, w=0, title=None, figsize=(8, 5)): + """Create a 3d plot of a North-East-Down vector. XYZ is the (tip of the) vector, + uvw is the base location of the vector """ fig = plt.figure(figsize=figsize) ax = fig.gca(projection='3d') ax.quiver(u, v, w, x, y, z, color='r') ax.quiver(u, v, w, x, y, 0, color='b') ax.quiver(x, y, 0, 0, 0, z, color='g') - + ax.legend() - ax.set_xlim([-1,1]) - ax.set_ylim([-1,1]) - ax.set_zlim([0,1]) + ax.set_xlim([-1, 1]) + ax.set_ylim([-1, 1]) + ax.set_zlim([0, 1]) ax.set_xlabel("West - East") ax.set_ylabel("South - North") if title is not None: plt.title(title) plt.tight_layout() plt.show() - return fig,ax \ No newline at end of file + return fig, ax diff --git a/micasense/utils.py b/micasense/utils.py old mode 100644 new mode 100755 index 93006b8b..b95421b6 --- a/micasense/utils.py +++ b/micasense/utils.py @@ -23,11 +23,11 @@ import numpy as np -def raw_image_to_radiance(meta, imageRaw): +def raw_image_to_radiance(meta, image_raw): # get image dimensions - imageRaw = imageRaw.T - xDim = imageRaw.shape[0] - yDim = imageRaw.shape[1] + image_raw = image_raw.T + xDim = image_raw.shape[0] + yDim = image_raw.shape[1] # get radiometric calibration factors @@ -40,11 +40,11 @@ def raw_image_to_radiance(meta, imageRaw): # get dark current pixel values # get number of stored values black_levels = [float(val) for val in meta.get_item('EXIF:BlackLevel').split(' ')] - blackLevel = np.array(black_levels) - darkLevel = blackLevel.mean() + black_level = np.array(black_levels) + dark_level = black_level.mean() # get exposure time & gain (gain = ISO/100) - exposureTime = float(meta.get_item('EXIF:ExposureTime')) + exposure_time = float(meta.get_item('EXIF:ExposureTime')) gain = float(meta.get_item('EXIF:ISOSpeed')) / 100.0 # apply image correction methods to raw image @@ -53,12 +53,12 @@ def raw_image_to_radiance(meta, imageRaw): V, x, y = vignette_map(meta, xDim, yDim) # row gradient correction - R = 1.0 / (1.0 + a2 * y / exposureTime - a3 * y) + R = 1.0 / (1.0 + a2 * y / exposure_time - a3 * y) # subtract the dark level and adjust for vignette and row gradient - L = V * R * (imageRaw - darkLevel) + L = V * R * (image_raw - dark_level) - # Floor any negative radiances to zero (can happend due to noise around blackLevel) + # Floor any negative radiance's to zero (can happen due to noise around black_level) L[L < 0] = 0 # L = np.round(L).astype(np.uint16) @@ -67,43 +67,43 @@ def raw_image_to_radiance(meta, imageRaw): # multiply with the radiometric calibration coefficient # need to normalize by 2^16 for 16 bit images # because coefficients are scaled to work with input values of max 1.0 - bitsPerPixel = meta.get_item('EXIF:BitsPerSample') - bitDepthMax = float(2 ** bitsPerPixel) - radianceImage = L.astype(float) / (gain * exposureTime) * a1 / bitDepthMax + bits_per_pixel = meta.get_item('EXIF:BitsPerSample') + bit_depth_max = float(2 ** bits_per_pixel) + radiance_image = L.astype(float) / (gain * exposure_time) * a1 / bit_depth_max # return both the radiance compensated image and the DN corrected image, for the # sake of the tutorial and visualization - return radianceImage.T, L.T, V.T, R.T + return radiance_image.T, L.T, V.T, R.T -def vignette_map(meta, xDim, yDim): +def vignette_map(meta, x_dim, y_dim): # get vignette center - xVignette = float(meta.get_item('XMP:VignettingCenter', 0)) - yVignette = float(meta.get_item('XMP:VignettingCenter', 1)) + x_vignette = float(meta.get_item('XMP:VignettingCenter', 0)) + y_vignette = float(meta.get_item('XMP:VignettingCenter', 1)) # get vignette polynomial - NvignettePoly = meta.size('XMP:VignettingPolynomial') - vignettePolyList = [float(meta.get_item('XMP:VignettingPolynomial', i)) for i in range(NvignettePoly)] + nvignette_poly = meta.size('XMP:VignettingPolynomial') + vignette_poly_list = [float(meta.get_item('XMP:VignettingPolynomial', i)) for i in range(nvignette_poly)] # reverse list and append 1., so that we can call with numpy polyval - vignettePolyList.reverse() - vignettePolyList.append(1.) - vignettePoly = np.array(vignettePolyList) + vignette_poly_list.reverse() + vignette_poly_list.append(1.) + vignette_poly = np.array(vignette_poly_list) # perform vignette correction # get coordinate grid across image - x, y = np.meshgrid(np.arange(xDim), np.arange(yDim)) + x, y = np.meshgrid(np.arange(x_dim), np.arange(y_dim)) # meshgrid returns transposed arrays x = x.T y = y.T # compute matrix of distances from image center - r = np.hypot((x - xVignette), (y - yVignette)) + r = np.hypot((x - x_vignette), (y - y_vignette)) # compute the vignette polynomial for each distance - we divide by the polynomial so that the # corrected image is image_corrected = image_original * vignetteCorrection - vignette = 1. / np.polyval(vignettePoly, r) + vignette = 1. / np.polyval(vignette_poly, r) return vignette, x, y @@ -115,34 +115,29 @@ def focal_plane_resolution_px_per_mm(meta): def focal_length_mm(meta): units = meta.get_item('XMP:PerspectiveFocalLengthUnits') - focal_length_mm = 0.0 if units == 'mm': - focal_length_mm = float(meta.get_item('XMP:PerspectiveFocalLength')) + local_focal_length_mm = float(meta.get_item('XMP:PerspectiveFocalLength')) else: focal_length_px = float(meta.get_item('XMP:PerspectiveFocalLength')) - focal_length_mm = focal_length_px / focal_plane_resolution_px_per_mm(meta)[0] - return focal_length_mm + local_focal_length_mm = focal_length_px / focal_plane_resolution_px_per_mm(meta)[0] + return local_focal_length_mm def correct_lens_distortion(meta, image): # get lens distortion parameters - Ndistortion = meta.size('XMP:PerspectiveDistortion') - distortionParameters = np.array([float(meta.get_item('XMP:PerspectiveDistortion', i)) for i in range(Ndistortion)]) + ndistortion = meta.size('XMP:PerspectiveDistortion') + distortion_parameters = np.array([float(meta.get_item('XMP:PerspectiveDistortion', i)) for i in range(ndistortion)]) # get the two principal points pp = np.array(meta.get_item('XMP:PrincipalPoint').split(',')).astype(np.float) # values in pp are in [mm] and need to be rescaled to pixels - FocalPlaneXResolution = float(meta.get_item('EXIF:FocalPlaneXResolution')) - FocalPlaneYResolution = float(meta.get_item('EXIF:FocalPlaneYResolution')) + focal_plane_x_resolution = float(meta.get_item('EXIF:focal_plane_x_resolution')) + focal_plane_y_resolution = float(meta.get_item('EXIF:focal_plane_y_resolution')) - cX = pp[0] * FocalPlaneXResolution - cY = pp[1] * FocalPlaneYResolution - # k = distortionParameters[0:3] # seperate out k -parameters - # p = distortionParameters[3::] # separate out p - parameters - # fx = fy = float(meta.get_item('XMP:PerspectiveFocalLength')) - fx = fy = focal_length_mm(meta) * FocalPlaneXResolution + cX = pp[0] * focal_plane_x_resolution + cY = pp[1] * focal_plane_y_resolution + fx = fy = focal_length_mm(meta) * focal_plane_x_resolution # apply perspective distortion - h, w = image.shape # set up camera matrix for cv2 @@ -154,8 +149,7 @@ def correct_lens_distortion(meta, image): cam_mat[1, 2] = cY # set up distortion coefficients for cv2 - # dist_coeffs = np.array(k[0],k[1],p[0],p[1],k[2]]) - dist_coeffs = distortionParameters[[0, 1, 3, 4, 2]] + dist_coeffs = distortion_parameters[[0, 1, 3, 4, 2]] new_cam_mat, _ = cv2.getOptimalNewCameraMatrix(cam_mat, dist_coeffs, (w, h), 1) map1, map2 = cv2.initUndistortRectifyMap(cam_mat, @@ -165,5 +159,4 @@ def correct_lens_distortion(meta, image): (w, h), cv2.CV_32F) # cv2.CV_32F for 32 bit floats # compute the undistorted 16 bit image - undistortedImage = cv2.remap(image, map1, map2, cv2.INTER_LINEAR) - return undistortedImage + return cv2.remap(image, map1, map2, cv2.INTER_LINEAR) diff --git a/micasense_conda_env.yml b/micasense_conda_env.yml index 5e403bc2..2ce10af0 100644 --- a/micasense_conda_env.yml +++ b/micasense_conda_env.yml @@ -3,26 +3,26 @@ channels: - conda-forge dependencies: - python=3.7 - - opencv - - gdal + - opencv=4.1 + - numpy - jupyter - matplotlib + - pysolar + - gdal - scikit-image - - imageio + - pytest - git - git-lfs - pandas + - imageio - nb_conda - - pytest - - pytest-xdist - - pip - requests - packaging - - tqdm + - pip + - pytest-xdist - pip: - - pysolar - - pyzbar - - mapboxgl - - pyexiftool<=0.4.13 - - jenkspy - - rawpy + - pyzbar + - mapboxgl + - git+https://github.com/smarnach/pyexiftool.git#egg=pyexiftoolpy + - jenkspy + - rawpy diff --git a/setup.py b/setup.py index a664c5be..04decc1c 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +import codecs from setuptools import setup, find_packages +import os # Parse the version from the main __init__.py with open('micasense/__init__.py') as f: @@ -23,7 +25,7 @@ install_requires=[ 'requests', 'numpy', - 'opencv-python', + 'opencv-python-headless', 'gdal', 'pysolar', 'matplotlib', @@ -31,6 +33,6 @@ 'packaging', 'pyexiftool<=0.4.13', 'pytz', - 'pyzbar', - 'tqdm' + 'pyzbar' ]) + diff --git a/tests/conftest.py b/tests/conftest.py index c5f297a9..4b76a3e4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -23,168 +23,192 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import glob +import os + import pytest -import os, glob import micasense.capture as capture import micasense.image as image import micasense.metadata as metadata + @pytest.fixture() def files_dir(): - return os.path.join('data', '0000SET', '000') + return os.path.join('data', 'REDEDGE-MX') + @pytest.fixture() def altum_files_dir(): - return os.path.join('data', 'ALTUM1SET', '000') + return os.path.join('data', 'ALTUM') + @pytest.fixture() def ten_band_files_dir(): - return os.path.join('data', '10BANDSET', '000') + return os.path.join('data', 'REDEDGE-MX-DUAL') + @pytest.fixture() def panel_rededge_file_list(files_dir): - return sorted(glob.glob(os.path.join(files_dir, 'IMG_0000_*.tif'))) + return glob.glob(os.path.join(files_dir, 'IMG_0001_*.tif')) + @pytest.fixture() def non_panel_rededge_file_list(files_dir): - return sorted(glob.glob(os.path.join(files_dir, 'IMG_0001_*.tif'))) + return glob.glob(os.path.join(files_dir, 'IMG_0020_*.tif')) + @pytest.fixture() def bad_file_list(files_dir): - file1 = os.path.join(files_dir, 'IMG_0000_1.tif') + file1 = os.path.join(files_dir, 'IMG_0020_1.tif') file2 = os.path.join(files_dir, 'IMG_0001_1.tif') return [file1, file2] + @pytest.fixture() def panel_altum_file_list(altum_files_dir): - return sorted(glob.glob(os.path.join(altum_files_dir, 'IMG_0000_*.tif'))) + return glob.glob(os.path.join(altum_files_dir, 'IMG_0000_*.tif')) + @pytest.fixture() def panel_rededge_capture(panel_rededge_file_list): return capture.Capture.from_filelist(panel_rededge_file_list) + @pytest.fixture() def non_panel_rededge_capture(non_panel_rededge_file_list): return capture.Capture.from_filelist(non_panel_rededge_file_list) + @pytest.fixture() def panel_10band_rededge_file_list(ten_band_files_dir): - return sorted(glob.glob(os.path.join(ten_band_files_dir, 'IMG_0000_*.tif'))) + return glob.glob(os.path.join(ten_band_files_dir, 'IMG_0001_*.tif')) + @pytest.fixture() def flight_10band_rededge_file_list(ten_band_files_dir): - return sorted(glob.glob(os.path.join(ten_band_files_dir, 'IMG_0431_*.tif'))) + return glob.glob(os.path.join(ten_band_files_dir, 'IMG_0007_*.tif')) + @pytest.fixture() def panel_altum_capture(panel_altum_file_list): imgs = [image.Image(fle) for fle in panel_altum_file_list] return capture.Capture(imgs) + @pytest.fixture() def non_panel_altum_file_list(altum_files_dir): - return sorted(glob.glob(os.path.join(altum_files_dir, 'IMG_0008_*.tif'))) + return glob.glob(os.path.join(altum_files_dir, 'IMG_0021_*.tif')) + @pytest.fixture() def non_panel_altum_capture(non_panel_altum_file_list): imgs = [image.Image(fle) for fle in non_panel_altum_file_list] return capture.Capture(imgs) -@pytest.fixture() -def panel_image_name(): - image_path = os.path.join('data', '0000SET', '000') - return os.path.join(image_path, 'IMG_0000_1.tif') - @pytest.fixture() -def panel_image_name_red(): - image_path = os.path.join('data', '0000SET', '000') - return os.path.join(image_path, 'IMG_0000_2.tif') +def panel_image_name(): + image_path = os.path.join('data', 'REDEDGE-MX') + return os.path.join(image_path, 'IMG_0001_1.tif') -@pytest.fixture() -def panel_image_name_RP06_blue(): - image_path = os.path.join('data', '0002SET', '000') - return os.path.join(image_path, 'IMG_0000_1.tif') @pytest.fixture() -def panel_images_RP06(): - image_path = os.path.join('data', '0002SET', '000') - return sorted(glob.glob(os.path.join(image_path, 'IMG*.tif'))) +def panel_image_name_red(): + image_path = os.path.join('data', 'REDEDGE-MX') + return os.path.join(image_path, 'IMG_0001_2.tif') @pytest.fixture() def flight_image_name(): - image_path = os.path.join('data', '0000SET', '000') - return os.path.join(image_path, 'IMG_0001_1.tif') + image_path = os.path.join('data', 'REDEDGE-MX') + return os.path.join(image_path, 'IMG_0020_1.tif') + @pytest.fixture() def altum_panel_image_name(altum_files_dir): return os.path.join(altum_files_dir, 'IMG_0000_1.tif') + @pytest.fixture() def altum_lwir_image_name(altum_files_dir): return os.path.join(altum_files_dir, 'IMG_0000_6.tif') + @pytest.fixture() def altum_flight_image_name(altum_files_dir): - return os.path.join(altum_files_dir, 'IMG_0008_1.tif') + return os.path.join(altum_files_dir, 'IMG_0021_1.tif') + @pytest.fixture() def img(files_dir): - return image.Image(os.path.join(files_dir,'IMG_0000_1.tif')) + return image.Image(os.path.join(files_dir, 'IMG_0001_1.tif')) + @pytest.fixture() def img2(files_dir): - return image.Image(os.path.join(files_dir,'IMG_0000_2.tif')) + return image.Image(os.path.join(files_dir, 'IMG_0001_2.tif')) + @pytest.fixture() def panel_altum_file_name(altum_files_dir): return os.path.join(altum_files_dir, 'IMG_0000_1.tif') + @pytest.fixture() def panel_altum_image(panel_altum_file_name): return image.Image(panel_altum_file_name) + @pytest.fixture() def altum_flight_image(altum_flight_image_name): return image.Image(altum_flight_image_name) + @pytest.fixture() def non_existant_file_name(altum_files_dir): return os.path.join(altum_files_dir, 'NOFILE.tif') + @pytest.fixture() def altum_lwir_image(altum_files_dir): return image.Image(os.path.join(altum_files_dir, 'IMG_0000_6.tif')) + @pytest.fixture() def meta(): - image_path = os.path.join('data', '0000SET', '000') - return metadata.Metadata(os.path.join(image_path, 'IMG_0000_1.tif')) + image_path = os.path.join('data', 'REDEDGE-MX') + return metadata.Metadata(os.path.join(image_path, 'IMG_0001_1.tif')) + @pytest.fixture() def meta_v3(): - image_path = os.path.join('data', '0001SET', '000') - return metadata.Metadata(os.path.join(image_path, 'IMG_0002_4.tif')) + image_path = os.path.join('data', 'REDEDGE-MX') + return metadata.Metadata(os.path.join(image_path, 'IMG_0020_4.tif')) + @pytest.fixture() def meta_bad_exposure(): - image_path = os.path.join('data', '0001SET', '000') - return metadata.Metadata(os.path.join(image_path, 'IMG_0003_1.tif')) + image_path = os.path.join('data', 'REDEDGE-MX') + return metadata.Metadata(os.path.join(image_path, 'IMG_0020_1.tif')) + @pytest.fixture() def meta_altum_dls2(altum_flight_image_name): return metadata.Metadata(altum_flight_image_name) + @pytest.fixture() def bad_dls2_horiz_irr_image(): - image_path = os.path.join('data', 'ALTUM0SET', '000') - return image.Image(os.path.join(image_path, 'IMG_0000_1.tif')) + image_path = os.path.join('data', 'ALTUM') + return image.Image(os.path.join(image_path, 'IMG_0021_1.tif')) + @pytest.fixture() def panel_10band_rededge_capture(panel_10band_rededge_file_list): return capture.Capture.from_filelist(panel_10band_rededge_file_list) + @pytest.fixture() def flight_10band_rededge_capture(flight_10band_rededge_file_list): return capture.Capture.from_filelist(flight_10band_rededge_file_list) diff --git a/tests/test_capture.py b/tests/test_capture.py index a08a6a4b..6a65a967 100644 --- a/tests/test_capture.py +++ b/tests/test_capture.py @@ -23,38 +23,45 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import pytest -import os, glob +import os + import numpy as np +import pytest import micasense.capture as capture import micasense.image as image + def test_from_images(panel_rededge_file_list): imgs = [image.Image(fle) for fle in panel_rededge_file_list] cap = capture.Capture(imgs) assert cap is not None assert len(cap.images) == len(panel_rededge_file_list) + def test_from_filelist(panel_rededge_file_list): cap = capture.Capture.from_filelist(panel_rededge_file_list) assert cap is not None assert len(cap.images) == len(panel_rededge_file_list) + def test_from_single_file(panel_image_name): cap = capture.Capture.from_file(panel_image_name) assert cap is not None + def test_append_single_file(panel_rededge_file_list): cap = capture.Capture.from_file(panel_rededge_file_list[0]) assert len(cap.images) == 1 cap.append_file(panel_rededge_file_list[1]) assert len(cap.images) == 2 + def test_from_different_ids(bad_file_list): with pytest.raises(RuntimeError): cap = capture.Capture.from_filelist(bad_file_list) + def test_append_single(panel_rededge_file_list): imgs = [image.Image(fle) for fle in panel_rededge_file_list] cap = capture.Capture(imgs[0]) @@ -63,6 +70,7 @@ def test_append_single(panel_rededge_file_list): cap.append_image(img) assert len(cap.images) == 5 + def test_append_list(panel_rededge_file_list): imgs = [image.Image(fle) for fle in panel_rededge_file_list] cap = capture.Capture(imgs[0]) @@ -70,141 +78,170 @@ def test_append_list(panel_rededge_file_list): cap.append_images(imgs[1:]) assert len(cap.images) == 5 + def test_less_than(panel_image_name, flight_image_name): cap1 = capture.Capture.from_file(panel_image_name) cap2 = capture.Capture.from_file(flight_image_name) assert cap1 < cap2 + def test_greater_than(panel_image_name, flight_image_name): cap1 = capture.Capture.from_file(panel_image_name) cap2 = capture.Capture.from_file(flight_image_name) assert cap2 > cap1 + def test_equal(panel_image_name, panel_image_name_red): cap1 = capture.Capture.from_file(panel_image_name) cap2 = capture.Capture.from_file(panel_image_name_red) assert cap2 == cap1 + def test_uct_time(panel_image_name): cap1 = capture.Capture.from_file(panel_image_name) - assert cap1.utc_time().isoformat() == '2017-10-19T20:40:39.200174+00:00' + assert cap1.utc_time().isoformat() == '2022-04-06T18:50:25.983430+00:00' + def test_location(panel_image_name): cap1 = capture.Capture.from_file(panel_image_name) loc = cap1.location() + print(loc) assert len(loc) == 3 - assert loc == (36.576096, -119.4352689, 101.861) + assert loc == (47.7036143, -122.1414373, 6.728) + def test_dls_single_file(panel_image_name): cap1 = capture.Capture.from_file(panel_image_name) assert cap1.dls_present() - assert cap1.dls_irradiance()[0] == pytest.approx(1.0101948, 1e-4) + print(cap1.dls_irradiance()[0]) + print(cap1.dls_pose()) + assert cap1.dls_irradiance()[0] == pytest.approx(0.77810447, 1e-4) pose = cap1.dls_pose() + print(pose) assert len(pose) == 3 - assert pose[0] == pytest.approx(-3.070222992336269) - assert pose[1] == pytest.approx(-0.18812839845718335) - assert pose[2] == pytest.approx(-0.013387829297356699) + assert pose[0] == pytest.approx(-2.0091497634122724) + assert pose[1] == pytest.approx(0.018554597483870183) + assert pose[2] == pytest.approx(0.031269217556393974) + def test_dls_group(panel_rededge_capture): assert panel_rededge_capture.dls_present() irradiance = panel_rededge_capture.dls_irradiance() assert len(irradiance) == 5 - assert irradiance[0] == pytest.approx(1.0101948, 1e-4) + print(irradiance[0]) + print(panel_rededge_capture.dls_pose()) + assert irradiance[0] == pytest.approx(0.77810447, 1e-4) pose = panel_rededge_capture.dls_pose() + print(pose) assert len(pose) == 3 - assert pose[0] == pytest.approx(-3.070222992336269) - assert pose[1] == pytest.approx(-0.18812839845718335) - assert pose[2] == pytest.approx(-0.013387829297356699) + assert pose[0] == pytest.approx(-2.0091497634122724) + assert pose[1] == pytest.approx(0.018554597483870183) + assert pose[2] == pytest.approx(0.031269217556393974) + def test_panel_radiance(panel_rededge_capture): rad = panel_rededge_capture.panel_radiance() - expected_rad = [0.17028382320603955, - 0.17940027272297152, - 0.1622172746785481, - 0.10647021248769974, - 0.13081077851565506] + print(rad) + expected_rad = [0.17909220357022979, + 0.1805512169681595, + 0.1577359026273932, + 0.10556279304323357, + 0.13221390933733143] assert len(rad) == len(expected_rad) - for i,_ in enumerate(expected_rad): - assert rad[i] == pytest.approx(expected_rad[i], rel=0.01) + for i, _ in enumerate(expected_rad): + assert rad[i] == pytest.approx(expected_rad[i], rel=0.001) + def test_panel_raw(panel_rededge_capture): raw = panel_rededge_capture.panel_raw() print(raw) - expected_raw = [45406.753893482026, - 46924.919447148139, - 53240.810340812051, - 56187.417482757308, - 54479.170371812339] + expected_raw = [28585.13736620175, + 24613.08368472267, + 33435.07799487508, + 31492.43415504379, + 30858.20096] assert len(raw) == len(expected_raw) - for i,_ in enumerate(expected_raw): - assert raw[i] == pytest.approx(expected_raw[i], rel=0.01) + for i, _ in enumerate(expected_raw): + assert raw[i] == pytest.approx(expected_raw[i], rel=0.001) + def test_panel_irradiance(panel_rededge_capture): - panel_reflectance_by_band = [0.67, 0.69, 0.68, 0.61, 0.67] + panel_reflectance_by_band = [0.49, 0.49, 0.49, 0.49, 0.49] rad = panel_rededge_capture.panel_irradiance(panel_reflectance_by_band) - expected_rad = [0.79845135523772681, 0.81681533164998943, 0.74944205649335915, 0.54833776619262586, 0.61336444894797537] + expected_rad = [1.1482341858192686, 1.15758852413034, 1.0113101079623947, 0.6768067247286578, 0.8476780536256069] assert len(rad) == len(expected_rad) - for i,_ in enumerate(expected_rad): - assert rad[i] == pytest.approx(expected_rad[i], rel=0.01) + for i, _ in enumerate(expected_rad): + assert rad[i] == pytest.approx(expected_rad[i], rel=0.001) -def test_panel_albedo_not_preset(panel_rededge_capture): - assert panel_rededge_capture.panels_in_all_expected_images() - assert panel_rededge_capture.panel_albedo() == None def test_panel_albedo_preset(panel_altum_capture): assert panel_altum_capture.panels_in_all_expected_images() - assert panel_altum_capture.panel_albedo() == pytest.approx(5*[0.52],abs=0.01) + assert panel_altum_capture.panel_albedo() == pytest.approx(5 * [0.49], abs=0.01) + def test_detect_panels_in_panel_image(panel_rededge_capture): assert panel_rededge_capture.detect_panels() == 5 assert panel_rededge_capture.panels_in_all_expected_images() == True + def test_no_detect_panels_in_flight_image(non_panel_rededge_capture): assert non_panel_rededge_capture.detect_panels() == 0 assert non_panel_rededge_capture.panels_in_all_expected_images() == False + def test_band_names(panel_rededge_capture): assert panel_rededge_capture.band_names() == ['Blue', 'Green', 'Red', 'NIR', 'Red edge'] + def test_band_names_lower(panel_rededge_capture): assert panel_rededge_capture.band_names_lower() == ['blue', 'green', 'red', 'nir', 'red edge'] + def test_altum_eo_lw_indices(panel_altum_capture): - assert panel_altum_capture.eo_indices() == [0,1,2,3,4] + assert panel_altum_capture.eo_indices() == [0, 1, 2, 3, 4] assert panel_altum_capture.lw_indices() == [5] + def test_rededge_eo_lw_indices(panel_rededge_capture): - assert panel_rededge_capture.eo_indices() == [0,1,2,3,4] + assert panel_rededge_capture.eo_indices() == [0, 1, 2, 3, 4] assert panel_rededge_capture.lw_indices() == [] + def test_altum_images(non_panel_altum_file_list): imgs = [image.Image(fle) for fle in non_panel_altum_file_list] cap = capture.Capture(imgs) assert cap is not None assert len(cap.images) == len(non_panel_altum_file_list) + def test_altum_from_filelist(non_panel_altum_file_list): cap = capture.Capture.from_filelist(non_panel_altum_file_list) assert cap is not None assert len(cap.images) == len(non_panel_altum_file_list) + def test_altum_from_single_file(altum_flight_image_name): cap = capture.Capture.from_file(altum_flight_image_name) assert cap is not None + def test_altum_horizontal_irradiance(non_panel_altum_capture): assert non_panel_altum_capture.dls_present() - good_irradiance = [1.222, 1.079, 0.914, 0.587, 0.715, 0.0] + good_irradiance = [1.1215395307329492, 1.0496371984570443, 0.9390573858775944, 0.5691781852317392, + 0.6800663660628287, 0] assert non_panel_altum_capture.dls_irradiance() == pytest.approx(good_irradiance, 1e-3) + def test_altum_panels(panel_altum_capture): assert panel_altum_capture.panels_in_all_expected_images() == True + @pytest.fixture() def aligned_altum_capture(non_panel_altum_capture): non_panel_altum_capture.create_aligned_capture(img_type='radiance') return non_panel_altum_capture + def test_stack_export(aligned_altum_capture, tmpdir): pathstr = str(tmpdir.join('test_bgrent.tiff')) aligned_altum_capture.save_capture_as_stack(pathstr) @@ -212,6 +249,7 @@ def test_stack_export(aligned_altum_capture, tmpdir): if tmpdir.check(): tmpdir.remove() + def test_rgb_jpg(aligned_altum_capture, tmpdir): pathstr = str(tmpdir.join('test_rgb.jpg')) aligned_altum_capture.save_capture_as_rgb(pathstr) @@ -219,6 +257,7 @@ def test_rgb_jpg(aligned_altum_capture, tmpdir): if tmpdir.check(): tmpdir.remove() + def test_rgb_png(aligned_altum_capture, tmpdir): pathstr = str(tmpdir.join('test_rgb.png')) aligned_altum_capture.save_capture_as_rgb(pathstr) @@ -226,20 +265,22 @@ def test_rgb_png(aligned_altum_capture, tmpdir): if tmpdir.check(): tmpdir.remove() + def test_rgb_jpg_decimation(aligned_altum_capture, tmpdir): import imageio - decimations = [2,5,8] + decimations = [2, 5, 8] for decimation in decimations: pathstr = str(tmpdir.join('test_rgb_{}x.jpg'.format(decimation))) aligned_altum_capture.save_capture_as_rgb(pathstr, downsample=decimation) assert os.path.exists(pathstr) img = imageio.imread(pathstr) - assert img.shape[0] == round(float(aligned_altum_capture.aligned_shape()[0])/float(decimation)) - assert img.shape[1] == round(float(aligned_altum_capture.aligned_shape()[1])/float(decimation)) + assert img.shape[0] == round(float(aligned_altum_capture.aligned_shape()[0]) / float(decimation)) + assert img.shape[1] == round(float(aligned_altum_capture.aligned_shape()[1]) / float(decimation)) if tmpdir.check(): tmpdir.remove() + def test_save_thermal_over_rgb(aligned_altum_capture, tmpdir): pathstr = str(tmpdir.join('test_thermal_rgb.png')) aligned_altum_capture.save_thermal_over_rgb(pathstr) @@ -247,41 +288,52 @@ def test_save_thermal_over_rgb(aligned_altum_capture, tmpdir): if tmpdir.check(): tmpdir.remove() + def test_has_rig_relatives(non_panel_altum_capture): assert non_panel_altum_capture.has_rig_relatives() == True -def test_no_rig_relatives(non_panel_rededge_file_list): - cap = capture.Capture.from_filelist(non_panel_rededge_file_list) - assert cap.has_rig_relatives() == False def test_panel_albedo(panel_altum_capture): assert panel_altum_capture.detect_panels() == 5 assert panel_altum_capture.panels_in_all_expected_images() - good_panel_albedo = [0.5282, 0.5274, 0.5263, 0.5246, 0.5258] + good_panel_albedo = [0.488375, 0.4876666666666666, 0.4867666666666667, 0.48533333333333334, 0.48635833333333334] assert panel_altum_capture.panel_albedo() == pytest.approx(good_panel_albedo, 1e-4) + def test_panel_albedo_no_detect(panel_altum_capture): - good_panel_albedo = [0.5282, 0.5274, 0.5263, 0.5246, 0.5258] + good_panel_albedo = [0.488375, 0.4876666666666666, 0.4867666666666667, 0.48533333333333334, 0.48635833333333334] assert panel_altum_capture.panel_albedo() == pytest.approx(good_panel_albedo, 1e-4) + def test_10_band_capture_loads(panel_10band_rededge_file_list): print(panel_10band_rededge_file_list) cap = capture.Capture.from_filelist(panel_10band_rededge_file_list) assert cap.num_bands == 10 + def test_10_band_panel(panel_10band_rededge_file_list): cap = capture.Capture.from_filelist(panel_10band_rededge_file_list) assert cap.detect_panels() == 10 assert cap.panels_in_all_expected_images() == True + def test_10_band_irradiance(flight_10band_rededge_capture): assert flight_10band_rededge_capture.dls_present() test_irradiance = flight_10band_rededge_capture.dls_irradiance() - good_irradiance = [0.67305, 0.62855, 0.55658, 0.34257, 0.41591, 0.57470, 0.64203, 0.53739, 0.48215, 0.44563] - assert test_irradiance == pytest.approx(good_irradiance, abs = 1e-5) + good_irradiance = [0.8891651301138496, + 0.8108716108077543, + 0.706543006961791, + 0.4295603539307114, + 0.5300914981313334, + 0.7601517486572266, + 0.8333349609375, + 0.6829991149902344, + 0.6153291320800781, + 0.5720899200439453] + assert test_irradiance == pytest.approx(good_irradiance, abs=1e-5) + def test_get_warp_matrices(panel_altum_capture): for i in range(len(panel_altum_capture.images)): w = panel_altum_capture.get_warp_matrices(i) - np.testing.assert_allclose(np.eye(3),w[i],atol=1e-6) - \ No newline at end of file + np.testing.assert_allclose(np.eye(3), w[i], atol=1e-6) diff --git a/tests/test_dls.py b/tests/test_dls.py index d439d65d..27138ec1 100644 --- a/tests/test_dls.py +++ b/tests/test_dls.py @@ -23,94 +23,104 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import pytest -import os, glob -import numpy as np -import math import datetime +import math + +import numpy as np +import pytest import micasense.dls as dls -import micasense.image as image + # pysolar changed their coordinate system from South-based to north-based between 0.6 and 0.8 # we add some tests here to help ensure we captured that change properly def test_ned_from_pysolar_north(): - assert dls.ned_from_pysolar(0,np.radians(45)) == pytest.approx([0.707, 0, -0.707], 0.01) + assert dls.ned_from_pysolar(0, np.radians(45)) == pytest.approx([0.707, 0, -0.707], 0.01) + def test_ned_from_pysolar_northeast(): - assert dls.ned_from_pysolar(np.radians(45),np.radians(45)) == pytest.approx([0.50, 0.5, -0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(45), np.radians(45)) == pytest.approx([0.50, 0.5, -0.707], 0.01) + def test_ned_from_pysolar_east(): - assert dls.ned_from_pysolar(np.radians(90),np.radians(45)) == pytest.approx([0, 0.707, -0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(90), np.radians(45)) == pytest.approx([0, 0.707, -0.707], 0.01) + def test_ned_from_pysolar_southeast(): - assert dls.ned_from_pysolar(np.radians(135),np.radians(45)) == pytest.approx([-0.50, 0.5, -0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(135), np.radians(45)) == pytest.approx([-0.50, 0.5, -0.707], 0.01) + def test_ned_from_pysolar_south(): - assert dls.ned_from_pysolar(np.radians(180),np.radians(45)) == pytest.approx([-0.707, 0, -0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(180), np.radians(45)) == pytest.approx([-0.707, 0, -0.707], 0.01) + def test_ned_from_pysolar_southwest(): - assert dls.ned_from_pysolar(np.radians(225),np.radians(45)) == pytest.approx([-0.50, -0.5, -0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(225), np.radians(45)) == pytest.approx([-0.50, -0.5, -0.707], 0.01) + def test_ned_from_pysolar_west(): - assert dls.ned_from_pysolar(np.radians(270),np.radians(45)) == pytest.approx([0,-0.707,-0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(270), np.radians(45)) == pytest.approx([0, -0.707, -0.707], 0.01) + def test_ned_from_pysolar_northwest(): - assert dls.ned_from_pysolar(np.radians(315),np.radians(45)) == pytest.approx([0.50, -0.5, -0.707], 0.01) + assert dls.ned_from_pysolar(np.radians(315), np.radians(45)) == pytest.approx([0.50, -0.5, -0.707], 0.01) + def test_pysolar_az_el_vs_usno(): # for simplicity's sake, let's pick a couple places on the prime meridian on the vernal equinox, # and test those vs the USNO # https://aa.usno.navy.mil/rstt/onedaytable?ID=AA&year=2019&month=3&day=20&place=&lon_sign=-1&lon_deg=0&lon_min=0&lat_sign=1&lat_deg=51&lat_min=28&tz=0&tz_sign=-1 - lat = 51.4769 #greenwich observatory + lat = 51.4769 # greenwich observatory lon = 0 - dt = datetime.datetime(2019,3,21,12,8,0,tzinfo=datetime.timezone.utc) - _,_,angle,sunAltitude,sunAzimuth = dls.compute_sun_angle((lat, lon, 0), - (0,0,0), - dt, - np.array([0,0,-1])) + dt = datetime.datetime(2019, 3, 21, 12, 8, 0, tzinfo=datetime.timezone.utc) + _, _, angle, sunAltitude, sunAzimuth = dls.compute_sun_angle((lat, lon, 0), + (0, 0, 0), + dt, + np.array([0, 0, -1])) assert angle == pytest.approx(math.radians(lat), abs=0.01) - assert sunAltitude == pytest.approx(math.radians(90-lat), abs=0.01) + assert sunAltitude == pytest.approx(math.radians(90 - lat), abs=0.01) assert sunAzimuth == pytest.approx(math.pi, abs=0.01) - #for simplicity's sake, let's pick a couple places on the prime meridian on the vernal equinox + # for simplicity's sake, let's pick a couple places on the prime meridian on the vernal equinox lat = 0 lon = 0 - dt = datetime.datetime(2019,3,20,12,8,0,tzinfo=datetime.timezone.utc) - _,_,angle,sunAltitude,sunAzimuth = dls.compute_sun_angle((lat, lon, 0), - (0,0,0), - dt, - np.array([0,0,-1])) + dt = datetime.datetime(2019, 3, 20, 12, 8, 0, tzinfo=datetime.timezone.utc) + _, _, angle, sunAltitude, sunAzimuth = dls.compute_sun_angle((lat, lon, 0), + (0, 0, 0), + dt, + np.array([0, 0, -1])) assert angle == pytest.approx(math.radians(lat), abs=0.01) - assert sunAltitude == pytest.approx(math.radians(90-lat), abs=0.01) - #assert sunAzimuth == pytest.approx(math.pi, abs=0.01) # should be straight up, at the equator, don't test elevation + assert sunAltitude == pytest.approx(math.radians(90 - lat), abs=0.01) + # assert sunAzimuth == pytest.approx(math.pi, abs=0.01) # should be straight up, at the equator, don't test elevation # middle of the ocean at 45deg sout latitutde lat = -45 lon = 0 - dt = datetime.datetime(2019,3,20,12,8,0,tzinfo=datetime.timezone.utc) - _,_,angle,sunAltitude,sunAzimuth = dls.compute_sun_angle((lat, lon, 0), - (0,0,0), - dt, - np.array([0,0,-1])) + dt = datetime.datetime(2019, 3, 20, 12, 8, 0, tzinfo=datetime.timezone.utc) + _, _, angle, sunAltitude, sunAzimuth = dls.compute_sun_angle((lat, lon, 0), + (0, 0, 0), + dt, + np.array([0, 0, -1])) assert angle == pytest.approx(math.radians(math.fabs(lat)), abs=0.01) - assert sunAltitude == pytest.approx(math.radians(90+lat), abs=0.01) - assert sunAzimuth == pytest.approx(2*math.pi, abs=0.01) # should be due north + assert sunAltitude == pytest.approx(math.radians(90 + lat), abs=0.01) + assert sunAzimuth == pytest.approx(2 * math.pi, abs=0.01) # should be due north + def test_sun_angle_image(img): if dls.havePysolar: sun_angle = dls.compute_sun_angle((img.latitude, img.longitude, img.altitude), (img.dls_yaw, img.dls_pitch, img.dls_roll), img.utc_time, - np.array([0,0,-1])) - assert sun_angle[0] == pytest.approx([-0.711, -0.247, -0.659], abs=0.001) - assert sun_angle[1] == pytest.approx([-1.87482468e-01, 1.82720334e-05, -9.82267949e-01], abs=0.001) - assert sun_angle[2] == pytest.approx(0.6754, abs=0.001) - assert sun_angle[3] == pytest.approx(0.7193, abs=0.001) - assert sun_angle[4] == pytest.approx(3.4756, abs=0.001) + np.array([0, 0, -1])) + assert sun_angle[0] == pytest.approx([-0.61171272, 0.34136377, -0.71363739], abs=0.001) + assert sun_angle[1] == pytest.approx([0.03617934, 0.00352109, -0.99933911], abs=0.001) + assert sun_angle[2] == pytest.approx(0.8062129905713518, abs=0.001) + assert sun_angle[3] == pytest.approx(0.7946770189357074, abs=0.001) + assert sun_angle[4] == pytest.approx(2.6325931620626153, abs=0.001) else: assert True + def test_fresnel(): assert dls.fresnel(0.00) == pytest.approx(0.9416, abs=0.001) assert dls.fresnel(0.01) == pytest.approx(0.9416, abs=0.001) @@ -118,38 +128,44 @@ def test_fresnel(): assert dls.fresnel(0.99) == pytest.approx(0.903, abs=0.001) assert dls.fresnel(1.00) == pytest.approx(0.901, abs=0.001) + def test_get_orientation_zenith(): - pose = (math.radians(0),math.radians(0), math.radians(0)) - orientation = [0,0,-1] + pose = (math.radians(0), math.radians(0), math.radians(0)) + orientation = [0, 0, -1] ned = dls.get_orientation(pose, orientation) - assert ned == pytest.approx([0,0,-1]) + assert ned == pytest.approx([0, 0, -1]) + def test_get_orientation_north(): - pose = (math.radians(0),math.radians(-90), math.radians(0)) - orientation = [0,0,-1] + pose = (math.radians(0), math.radians(-90), math.radians(0)) + orientation = [0, 0, -1] ned = dls.get_orientation(pose, orientation) - assert ned == pytest.approx([1,0,0]) + assert ned == pytest.approx([1, 0, 0]) + def test_get_orientation_east(): - pose = (math.radians(90),math.radians(-90), math.radians(0)) - orientation = [0,0,-1] + pose = (math.radians(90), math.radians(-90), math.radians(0)) + orientation = [0, 0, -1] ned = dls.get_orientation(pose, orientation) - assert ned == pytest.approx([0,1,0]) + assert ned == pytest.approx([0, 1, 0]) + def test_get_orientation_south(): - pose = (math.radians(0),math.radians(90), math.radians(0)) - orientation = [0,0,-1] + pose = (math.radians(0), math.radians(90), math.radians(0)) + orientation = [0, 0, -1] ned = dls.get_orientation(pose, orientation) - assert ned == pytest.approx([-1,0,0]) + assert ned == pytest.approx([-1, 0, 0]) + def test_get_orientation_south2(): - pose = (math.radians(180),math.radians(-90), math.radians(0)) - orientation = [0,0,-1] + pose = (math.radians(180), math.radians(-90), math.radians(0)) + orientation = [0, 0, -1] ned = dls.get_orientation(pose, orientation) - assert ned == pytest.approx([-1,0,0]) + assert ned == pytest.approx([-1, 0, 0]) + def test_get_orientation_west(): - pose = (math.radians(-90),math.radians(-90), math.radians(0)) - orientation = [0,0,-1] + pose = (math.radians(-90), math.radians(-90), math.radians(0)) + orientation = [0, 0, -1] ned = dls.get_orientation(pose, orientation) - assert ned == pytest.approx([0,-1,0]) \ No newline at end of file + assert ned == pytest.approx([0, -1, 0]) diff --git a/tests/test_image.py b/tests/test_image.py index 7a95187b..8040ae3b 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -23,35 +23,40 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import pytest -import os, glob -import math import numpy as np +import pytest import micasense.image as image import micasense.panel as panel + def test_load_image_metadata(img): assert img.meta is not None assert img.meta.band_index() == 0 assert img.meta.camera_make() == 'MicaSense' - assert img.meta.camera_model() == 'RedEdge' + assert img.meta.camera_model() == 'RedEdge-M' -def test_less_than(img,img2): + +def test_less_than(img, img2): assert img < img2 -def test_greater_than(img,img2): + +def test_greater_than(img, img2): assert img2 > img -def test_equal(img,img2): + +def test_equal(img, img2): assert img == img -def test_not_equal(img,img2): + +def test_not_equal(img, img2): assert img != img2 + def test_load_image_raw(img): assert img.raw() is not None + def test_clear_image_data(img): assert img.undistorted(img.radiance()) is not None img.clear_image_data() @@ -63,48 +68,57 @@ def test_clear_image_data(img): assert img._Image__undistorted_source is None assert img._Image__undistorted_image is None + def test_reflectance(img): pan = panel.Panel(img) - panel_reflectance = 0.5 + panel_reflectance = 0.50 panel_irradiance = pan.irradiance_mean(panel_reflectance) reflectance_img = img.reflectance(panel_irradiance) ref_mean, _, _, _ = pan.region_stats(reflectance_img, pan.panel_corners()) - assert ref_mean == pytest.approx(panel_reflectance, 1e-4) + assert ref_mean == pytest.approx(panel_reflectance, 1e-2) + def test_size(img): - assert img.size() == (1280,960) + assert img.size() == (1280, 960) + def test_pp_px(img): - assert img.principal_point_px() == pytest.approx((627.6, 479.9), abs=0.1) + assert img.principal_point_px() == pytest.approx((657.402, 478.056), abs=0.1) + def test_cv2_camera_matrix(img): - test_mat = [[1449.4, 0.0, 627.6], - [ 0.0, 1449.4, 479.9], - [ 0.0, 0.0, 1.0]] + test_mat = [[1441.60555, 0, 657.402667], + [0, 1441.60555, 478.056001], + [0, 0, 1]] for idx, row in enumerate(img.cv2_camera_matrix()): assert row == pytest.approx(test_mat[idx], abs=0.1) + def test_altum_panel_image(panel_altum_image): assert panel_altum_image.size() == (2064, 1544) assert panel_altum_image.meta.camera_make() == "MicaSense" assert panel_altum_image.meta.camera_model() == "Altum" assert panel_altum_image.auto_calibration_image == True + def test_altum_flight_image(altum_flight_image): assert altum_flight_image.meta.camera_make() == "MicaSense" assert altum_flight_image.meta.camera_model() == "Altum" assert altum_flight_image.auto_calibration_image == False + def test_image_not_file(non_existant_file_name): with pytest.raises(OSError): image.Image(non_existant_file_name) + def test_altum_lwir_image(altum_lwir_image): assert altum_lwir_image.meta.band_name() == 'LWIR' - assert altum_lwir_image.size() == (160,120) + assert altum_lwir_image.size() == (160, 120) assert altum_lwir_image.auto_calibration_image == False + def test_altum_image_horizontal_irradiance(altum_flight_image): assert altum_flight_image.dls_present solar_el = altum_flight_image.solar_elevation @@ -112,13 +126,3 @@ def test_altum_image_horizontal_irradiance(altum_flight_image): scattered_irr = altum_flight_image.scattered_irradiance good_horiz_irradiance = direct_irr * np.sin(solar_el) + scattered_irr assert altum_flight_image.horizontal_irradiance == pytest.approx(good_horiz_irradiance, 1e-3) - -def test_altum_bad_dls2_horizontal_irradiance(bad_dls2_horiz_irr_image): - assert bad_dls2_horiz_irr_image.dls_present - assert bad_dls2_horiz_irr_image.meta.horizontal_irradiance_valid() == False - solar_el = bad_dls2_horiz_irr_image.solar_elevation - direct_irr = bad_dls2_horiz_irr_image.direct_irradiance - scattered_irr = bad_dls2_horiz_irr_image.scattered_irradiance - good_horiz_irradiance = direct_irr * np.sin(solar_el) + scattered_irr - assert bad_dls2_horiz_irr_image.horizontal_irradiance == pytest.approx(good_horiz_irradiance, 1e-3) - diff --git a/tests/test_imageset.py b/tests/test_imageset.py index 3f3c5b49..fd002670 100644 --- a/tests/test_imageset.py +++ b/tests/test_imageset.py @@ -23,31 +23,36 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import os import pytest -import os, glob -import micasense.imageset as imageset import micasense.capture as capture -import micasense.image as image +import micasense.imageset as imageset + @pytest.fixture() def files_dir(): - return os.path.join('data', '0000SET', '000') + return os.path.join('data', 'REDEDGE-MX') + progress_val = 0.0 + + def progress(p): global progress_val progress_val = p - + + def test_from_captures(files_dir): - file1 = os.path.join(files_dir, 'IMG_0000_1.tif') - file2 = os.path.join(files_dir, 'IMG_0001_1.tif') + file1 = os.path.join(files_dir, 'IMG_0001_1.tif') + file2 = os.path.join(files_dir, 'IMG_0020_1.tif') cap1 = capture.Capture.from_file(file1) cap2 = capture.Capture.from_file(file2) - imgset = imageset.ImageSet([cap1,cap2]) + imgset = imageset.ImageSet([cap1, cap2]) assert imgset.captures is not None + def test_from_directory(files_dir): global progress_val progress(0.0) @@ -56,15 +61,17 @@ def test_from_directory(files_dir): assert progress_val == 1.0 assert len(imgset.captures) == 2 + def test_as_nested_lists(files_dir): imgset = imageset.ImageSet.from_directory(files_dir) assert imgset is not None data, columns = imgset.as_nested_lists() - assert data[0][1] == 36.576096 + assert data[0][1] == 47.7036143 assert columns[0] == 'timestamp' + def test_10_band_from_dir(ten_band_files_dir): imgset = imageset.ImageSet.from_directory(ten_band_files_dir, progress) assert imgset is not None assert progress_val == 1.0 - assert len(imgset.captures) == 2 \ No newline at end of file + assert len(imgset.captures) == 2 diff --git a/tests/test_imageutils.py b/tests/test_imageutils.py index cb864a33..1b320a3d 100644 --- a/tests/test_imageutils.py +++ b/tests/test_imageutils.py @@ -23,71 +23,94 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ - import pytest -import os, glob -import cv2 +from numpy import array -import micasense.imageset as imageset -import micasense.capture as capture -import micasense.image as image import micasense.imageutils as imageutils -from numpy import array -from numpy import float32 - -truth_warp_matrices = [array([[ 1.00523243e+00, -3.95214025e-03, -1.02620616e+01], - [ 2.48925470e-03, 1.00346483e+00, 4.17114294e+01], - [ 7.86653480e-07, -2.04642746e-06, 1.00000000e+00]]), array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]), array([[ 9.98681616e-01, -4.56952788e-03, -2.08530561e+00], - [ 4.63740524e-03, 9.96737324e-01, 3.19011722e+01], - [ 1.48930038e-06, -1.05003201e-06, 1.00000000e+00]]), array([[ 1.00149509e+00, -1.56960584e-03, -3.80940807e+00], - [ 2.11523967e-03, 1.00222122e+00, 4.78563536e+01], - [ 5.63914024e-07, 1.03391312e-07, 1.00000000e+00]]), array([[ 1.00305493e+00, -2.82497954e-03, -1.02199199e+01], - [ 3.23661267e-03, 1.00139925e+00, 1.50062440e+01], - [ 1.63746543e-06, -8.01922991e-07, 1.00000000e+00]]), array([[ 6.35209892e-02, 1.17877689e-05, 1.40322785e+01], - [-4.56733969e-04, 6.35520044e-02, 1.15592432e+01], - [-4.15804231e-06, -2.63551964e-06, 1.00000000e+00]])] +truth_warp_matrices = [array([[1.00970826e+00, 1.77994467e-03, -1.56924379e+01], + [-8.81481370e-04, 1.00902183e+00, -2.00348790e+01], + [9.37048882e-07, 1.81088623e-07, 1.00000000e+00]]), array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]), + array([[1.00178888e+00, 5.20346163e-03, 3.69187005e+01], + [-3.96537869e-03, 1.00147557e+00, 1.30881661e+01], + [9.43072540e-07, 4.60918812e-07, 1.00000000e+00]]), + array([[1.00392234e+00, 3.71917056e-04, 5.63565776e-01], + [-1.01700706e-03, 1.00376351e+00, -1.93992179e+01], + [7.62785884e-08, -6.79246435e-07, 1.00000000e+00]]), + array([[1.00017929e+00, -1.68397918e-03, -6.94129471e+00], + [-4.79481808e-04, 9.99392449e-01, -1.86069216e+01], + [-7.16703525e-07, -1.58956796e-06, 1.00000000e+00]]), + array([[6.40923927e-02, -9.44614560e-04, 1.44295833e+01], + [1.15806613e-03, 6.36425220e-02, 7.48762382e+00], + [2.72813889e-06, 7.70422215e-07, 1.00000000e+00]])] truth_image_sizes = [(2064, 1544), (2064, 1544), (2064, 1544), (2064, 1544), (2064, 1544), (160, 120)] -truth_lens_distortions = [array([-1.360334e-01, 2.374279e-01, 1.761687e-04, 2.373747e-04, - -1.304408e-01]), array([-1.458199e-01, 2.681765e-01, 2.403470e-04, -6.698399e-04, - -2.014740e-01]), array([-1.482020e-01, 2.494987e-01, 4.884159e-04, -1.989958e-04, - -1.674770e-01]), array([-1.516688e-01, 2.483217e-01, 9.426709e-04, 1.109110e-04, - -1.619578e-01]), array([-1.487282e-01, 2.477914e-01, 1.381469e-04, 5.226758e-04, - -1.687072e-01]), array([-3.869621e-01, 4.784228e-01, 3.671945e-03, 4.130745e-04, - -4.892879e-01])] - -truth_camera_matrices = [array([[2.24343510e+03, 0.00000000e+00, 1.01295942e+03], - [0.00000000e+00, 2.24343510e+03, 7.67547825e+02], - [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), array([[2.23882015e+03, 0.00000000e+00, 1.01802029e+03], - [0.00000000e+00, 2.23882015e+03, 7.30214492e+02], - [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), array([[2.23464845e+03, 0.00000000e+00, 1.01673333e+03], - [0.00000000e+00, 2.23464845e+03, 7.58373912e+02], - [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), array([[2.24700170e+03, 0.00000000e+00, 1.01524638e+03], - [0.00000000e+00, 2.24700170e+03, 7.80426086e+02], - [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), array([[2.24418040e+03, 0.00000000e+00, 1.01343188e+03], - [0.00000000e+00, 2.24418040e+03, 7.45014492e+02], - [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), array([[162.63769993, 0. , 78.2788333 ], - [ 0. , 162.63769993, 56.92766664], - [ 0. , 0. , 1. ]])] - -expected_dimensions = (21.0, 12.0, 2035.0, 1467.0) +truth_lens_distortions = [array([-1.229523e-01, 2.344970e-01, 1.695486e-05, -4.560869e-04, -1.624159e-01]), + array([-0.1279964, 0.2289589, 0.00027409, -0.00074023, -0.1536382]), + array([-0.1281345, 0.2031752, 0.00043306, -0.00098965, -0.09603056]), + array([-1.309457e-01, 2.203982e-01, -1.260115e-05, -4.066518e-04, -1.594542e-01]), + array([-0.128634, 0.2207445, 0.0003157, -0.00077475, -0.1538338]), + array([-0.3571166, 0.2329389, 0.00148511, -0.00306555, -0.02952669])] + +truth_camera_matrices_old = [array([[2.24343510e+03, 0.00000000e+00, 1.01295942e+03], + [0.00000000e+00, 2.24343510e+03, 7.67547825e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.23882015e+03, 0.00000000e+00, 1.01802029e+03], + [0.00000000e+00, 2.23882015e+03, 7.30214492e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.23464845e+03, 0.00000000e+00, 1.01673333e+03], + [0.00000000e+00, 2.23464845e+03, 7.58373912e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.24700170e+03, 0.00000000e+00, 1.01524638e+03], + [0.00000000e+00, 2.24700170e+03, 7.80426086e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.24418040e+03, 0.00000000e+00, 1.01343188e+03], + [0.00000000e+00, 2.24418040e+03, 7.45014492e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[162.63769993, 0., 78.2788333], + [0., 162.63769993, 56.92766664], + [0., 0., 1.]])] + +truth_camera_matrices = [array([[2.26561955e+03, 0.00000000e+00, 1.03073913e+03], + [0.00000000e+00, 2.26561955e+03, 7.59208694e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.25366650e+03, 0.00000000e+00, 1.03212464e+03], + [0.00000000e+00, 2.25366650e+03, 7.72739129e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.25435270e+03, 0.00000000e+00, 1.07806666e+03], + [0.00000000e+00, 2.25435270e+03, 7.83782607e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.26837520e+03, 0.00000000e+00, 1.03717681e+03], + [0.00000000e+00, 2.26837520e+03, 7.52794202e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[2.26240075e+03, 0.00000000e+00, 1.02280000e+03], + [0.00000000e+00, 2.26240075e+03, 7.47063767e+02], + [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + array([[160.98314994, 0., 81.30491663], + [0., 160.98314994, 58.28508331], + [0., 0., 1.]])] + +expected_dimensions = (20.0, 32.0, 1995.0, 1491.0) + def test_image_properties(non_panel_altum_capture): - for i,image in enumerate(non_panel_altum_capture.images): - assert(image.size() == pytest.approx(truth_image_sizes[i])) - assert(image.cv2_distortion_coeff() == pytest.approx(truth_lens_distortions[i])) - assert(image.cv2_camera_matrix() == pytest.approx(truth_camera_matrices[i])) + for i, image in enumerate(non_panel_altum_capture.images): + assert (image.size() == pytest.approx(truth_image_sizes[i])) + assert (image.cv2_distortion_coeff() == pytest.approx(truth_lens_distortions[i], abs=0.001)) + assert (image.cv2_camera_matrix() == pytest.approx(truth_camera_matrices[i], abs=0.001)) + def test_warp_matrices(non_panel_altum_capture): warp_matrices = non_panel_altum_capture.get_warp_matrices() - for index,warp_matrix in enumerate(warp_matrices): - assert(warp_matrix == pytest.approx(truth_warp_matrices[index],rel=1e-2)) + print(warp_matrices) + for index, warp_matrix in enumerate(warp_matrices): + assert (warp_matrix == pytest.approx(truth_warp_matrices[index], rel=1e-2)) + def test_cropping(non_panel_altum_capture): warp_matrices = non_panel_altum_capture.get_warp_matrices() - cropped_dimensions,_ = imageutils.find_crop_bounds(non_panel_altum_capture,warp_matrices) - assert(cropped_dimensions == pytest.approx(expected_dimensions,abs=1)) + cropped_dimensions, _ = imageutils.find_crop_bounds(non_panel_altum_capture, warp_matrices) + assert (cropped_dimensions == pytest.approx(expected_dimensions, abs=1)) diff --git a/tests/test_metadata.py b/tests/test_metadata.py index e70239b4..2fc3f5a3 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -24,120 +24,125 @@ """ import pytest -import os, glob -import micasense.metadata as metadata def test_load_image_metadata(meta): assert meta is not None + def test_band_index(meta): assert meta.band_index() == 0 + def test_camera_make(meta): assert meta.camera_make() == 'MicaSense' + def test_camera_model(meta): - assert meta.camera_model() == 'RedEdge' + assert meta.camera_model() == 'RedEdge-M' + def test_flight_id(meta): - assert meta.flight_id() == 'NtLNbVIdowuCaWYbg3ck' + assert meta.flight_id() == 'ePdxBBmSitgkTdpwZiM9' + def test_capture_id(meta): - assert meta.capture_id() == '5v25BtsZg3BQBhVH7Iaz' + assert meta.capture_id() == 'Rb0pibHa08uHJwrTjf8Y' + def test_black_level(meta): assert meta.black_level() == 4800.0 + def test_focal_length_mm(meta): - assert meta.focal_length_mm() == pytest.approx(5.43509341) + assert meta.focal_length_mm() == pytest.approx(5.40602081) -def test_focal_length_mm_v3(meta_v3): - assert meta_v3.focal_length_mm() == pytest.approx(5.45221099) def test_fp_resolution(meta): - assert meta.focal_plane_resolution_px_per_mm() == pytest.approx([266.666667,266.666667]) + assert meta.focal_plane_resolution_px_per_mm() == pytest.approx([266.666667, 266.666667]) -def test_fp_resolution_v3(meta_v3): - assert meta_v3.focal_plane_resolution_px_per_mm() == pytest.approx([266.666667,266.666667]) def test_utc_time(meta): - utc_time = meta.utc_time() + utc_time = meta.utc_time() assert utc_time is not None - assert utc_time.strftime('%Y-%m-%d %H:%M:%S.%f') == '2017-10-19 20:40:39.200174' + assert utc_time.strftime('%Y-%m-%d %H:%M:%S.%f') == '2022-04-06 18:50:25.983430' -def test_utc_time_v3(meta_v3): - utc_time = meta_v3.utc_time() - assert utc_time is not None - assert utc_time.strftime('%Y-%m-%d %H:%M:%S.%f')[:] == '2018-04-10 10:52:30.866550' def test_position(meta): - assert meta.position() == pytest.approx((36.576096, -119.4352689, 101.861)) + assert meta.position() == pytest.approx((47.7036143, -122.1414373, 6.728)) + def test_dls_present(meta): assert meta.dls_present() == True + def test_metadata_size(meta): assert meta.size('XMP:RadiometricCalibration') == 3 + def test_center_wavelength(meta): assert meta.center_wavelength() == 475 + def test_vignette_center(meta): - assert meta.vignette_center() == pytest.approx([676.703, 480.445], abs=0.001) + assert meta.vignette_center() == pytest.approx([623.6301, 470.2927], abs=0.001) + def test_vignette_polynomial(meta): - expected_poly = [-3.188190987533484e-05, 1.1380741452056501e-07, -2.7776829778142425e-09, 9.981184981301047e-12, -1.4703936738578638e-14, 7.334097230810222e-18] + expected_poly = [1.001285e-06, 5.61421e-07, -5.962064e-09, 1.862037e-11, -1.4703936738578638e-14, + 7.334097230810222e-18] assert meta.vignette_polynomial() == pytest.approx(expected_poly, rel=0.001) + def test_principal_point_mm(meta): - assert meta.principal_point() == pytest.approx([2.35363, 1.79947]) + assert meta.principal_point() == pytest.approx([2.46526, 1.79271]) + def test_distortion_parameters(meta): - expected_params = [-0.09679655532374383, 0.14041893470790068, -0.022980842634993275, 0.0002758383774216635, 0.0006600729536460939] + expected_params = [-0.1058375, 0.2199191, -0.2010044, 0.0007368542, -0.0004963633] assert meta.distortion_parameters() == pytest.approx(expected_params, rel=0.001) + def test_bits_per_pixel(meta): assert meta.bits_per_pixel() == 16 + def test_dark_pixels(meta): - assert meta.dark_pixels() == pytest.approx(5071.5) + assert meta.dark_pixels() == pytest.approx(5045.25) + def test_gain(meta): assert meta.gain() == 1 + def test_firmware_version(meta): - assert meta.firmware_version() == "v2.1.2-34-g05e37eb-local" + assert meta.firmware_version() == "v7.5.0-beta6" -def test_firmware_version_v3(meta_v3): - assert meta_v3.firmware_version() == "v3.3.0" def test_dls_irradiance(meta): - assert meta.spectral_irradiance() == pytest.approx(1.0848, abs=0.0001) + assert meta.spectral_irradiance() == pytest.approx(0.8821, abs=0.0001) + def test_dls_pose(meta): - assert meta.dls_pose() == pytest.approx((-3.070, -0.188, -0.013), abs=0.001) + assert meta.dls_pose() == pytest.approx((-2.0091497634122724, 0.018554597483870183, 0.031269217556393974), + abs=0.001) + def test_good_exposure(meta): - assert meta.exposure() == pytest.approx(0.0004725) + assert meta.exposure() == pytest.approx(0.000135) -def test_good_exposure_v3(meta_v3): - assert meta_v3.exposure() == pytest.approx(0.00171) def test_bad_exposure_time(meta_bad_exposure): assert meta_bad_exposure.exposure() == pytest.approx(247e-6, abs=1e-3) -def test_dls1_scale_factor(meta): - assert meta.irradiance_scale_factor() == pytest.approx(1.0) def test_dls_present_dls2(meta_altum_dls2): assert meta_altum_dls2.dls_present() == True - + + def test_dls2_scale_factor(meta_altum_dls2): assert meta_altum_dls2.irradiance_scale_factor() == pytest.approx(0.01) -def test_horizontal_irradiance_valid(meta): - assert meta.horizontal_irradiance_valid() == False def test_horizontal_irradiance_valid_altum(meta_altum_dls2): assert meta_altum_dls2.horizontal_irradiance_valid() == True diff --git a/tests/test_panel.py b/tests/test_panel.py index ca2d34fa..8b9b9759 100644 --- a/tests/test_panel.py +++ b/tests/test_panel.py @@ -23,170 +23,136 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import operator + import pytest -import os, glob -import math + import micasense.image as image import micasense.panel as panel -import operator -def test_RP06_panel_ID(panel_image_name_RP06_blue): - img = image.Image(panel_image_name_RP06_blue) - pan = panel.Panel(img) - qr_corners = pan.qr_corners() - assert pan.panel_version == 6 - -def test_RP06_panel_ID_autodetect(panel_image_name_RP06_blue): - img = image.Image(panel_image_name_RP06_blue) - pan = panel.Panel(img,ignore_autocalibration=True) - qr_corners = pan.qr_corners() - - assert pan.panel_version == 6 -def test_RP06_panel_raw(panel_images_RP06): - test_mean = [33082,34347,33971,34186,33371] - test_std = [474.7,582.6,476.3,464,658.9] - test_num = [3616,3552,3669,3612,3729] - test_sat = [0,0,0,0,0] - for i,m,s,n,sa in zip(panel_images_RP06,test_mean,test_std,test_num,test_sat): - img = image.Image(i) - pan = panel.Panel(img) - mean, std, num, sat = pan.raw() - assert pan.panel_detected() - print('mean {:f} std {:f} num {:f} sat {:f}'.format(mean,std,num,sat)) - print('m {:f} s {:f} n {:f} sa {:f}'.format(m,s,n,sa)) - assert mean == pytest.approx(m,rel=0.1) - assert std == pytest.approx(s,rel=0.1) - assert num == pytest.approx(n,rel=0.1) - assert sat == pytest.approx(sa,rel=0.1) - - - -def test_qr_corners(panel_image_name): - img = image.Image(panel_image_name) - pan = panel.Panel(img) - qr_corners = pan.qr_corners() - good_qr_corners = [[460, 599], [583, 599], [584, 478], [462, 477]] - assert qr_corners is not None - assert len(qr_corners) == len(good_qr_corners) - assert pan.serial == 'RP02-1603036-SC' - for i, pt in enumerate(qr_corners): - # different opencv/zbar versions round differently it seems - assert pt[0] == pytest.approx(good_qr_corners[i][0], abs=3) - assert pt[1] == pytest.approx(good_qr_corners[i][1], abs=3) def test_panel_corners(panel_image_name): img = image.Image(panel_image_name) pan = panel.Panel(img) panel_pts = pan.panel_corners() - good_pts = [[785,594],[674,593],[673,483],[783,484]] - + good_pts = [(672, 676), (673, 753), (753, 751), (752, 674)] assert panel_pts is not None assert len(panel_pts) == len(good_pts) - assert pan.serial == 'RP02-1603036-SC' + assert pan.serial == 'RP06-2051037-OB' # the particular order of the points is not relevant # so sort by coordinates - panel_pts = sorted(panel_pts,key=operator.itemgetter(0,1)) - good_pts = sorted(good_pts,key=operator.itemgetter(0,1)) + panel_pts = sorted(panel_pts, key=operator.itemgetter(0, 1)) + good_pts = sorted(good_pts, key=operator.itemgetter(0, 1)) for i, pt in enumerate(panel_pts): # different opencv/zbar versions round differently it seems assert pt[0] == pytest.approx(good_pts[i][0], abs=3) assert pt[1] == pytest.approx(good_pts[i][1], abs=3) + # test manually providing bad corners - in this case the corners of the qr code itself def test_raw_panel_bad_corners(panel_image_name): img = image.Image(panel_image_name) - pan = panel.Panel(img,panelCorners=[[460, 599], [583, 599], [584, 478], [462, 477]]) + pan = panel.Panel(img, panel_corners=[[460, 599], [583, 599], [584, 478], [462, 477]]) mean, std, num, sat = pan.raw() - assert mean == pytest.approx(26965, rel=0.01) - assert std == pytest.approx(15396.0, rel=0.05) - assert num == pytest.approx(14824, rel=0.01) + assert mean == pytest.approx(10111, rel=0.01) + assert std == pytest.approx(7207.0, rel=0.05) + assert num == pytest.approx(14949, rel=0.01) assert sat == pytest.approx(0, abs=2) + # test manually providing good corners def test_raw_panel_manual(panel_image_name): img = image.Image(panel_image_name) - pan = panel.Panel(img,panelCorners=[[809, 613], [648, 615], [646, 454], [808, 452]]) + pan = panel.Panel(img, panel_corners=[[809, 613], [648, 615], [646, 454], [808, 452]]) mean, std, num, sat = pan.raw() - assert mean == pytest.approx(45406, rel=0.01) - assert std == pytest.approx(738.0, rel=0.05) - assert num == pytest.approx(26005, rel=0.001) + assert mean == pytest.approx(24066, rel=0.01) + assert std == pytest.approx(14266.0, rel=0.05) + assert num == pytest.approx(26008, rel=0.001) assert sat == pytest.approx(0, abs=2) + # test saturated pixels with modified panel picture def test_raw_panel_saturatedl(panel_image_name): img = image.Image(panel_image_name) - pan = panel.Panel(img,panelCorners=[[809, 613], [648, 615], [646, 454], [808, 452]]) - - #saturate 2500 pixels in the raw image - note that on the undistorted image this - #will result in 2329 saturated pixels + pan = panel.Panel(img, panel_corners=[[809, 613], [648, 615], [646, 454], [808, 452]]) + + # saturate 2500 pixels in the raw image - note that on the undistorted image this + # will result in 2329 saturated pixels i0 = img.undistorted(img.raw()) - i0[500:550,700:750]=4095*16+1 + i0[500:550, 700:750] = 4095 * 16 + 1 img.set_undistorted(i0) - + mean, std, num, sat = pan.raw() - assert mean == pytest.approx(47245, rel=0.01) - assert std == pytest.approx(5846.1, rel=0.05) - assert num == pytest.approx(26005, rel=0.001) + assert mean == pytest.approx(28110, rel=0.01) + assert std == pytest.approx(18271.9, rel=0.05) + assert num == pytest.approx(26008, rel=0.001) assert sat == pytest.approx(2500, abs=0) + def test_raw_panel(panel_image_name): img = image.Image(panel_image_name) pan = panel.Panel(img) mean, std, num, sat = pan.raw() - assert mean == pytest.approx(45406.0, rel=0.01) - assert std == pytest.approx(689.0, rel=0.05) - assert num == pytest.approx(12154, rel=0.02) + assert mean == pytest.approx(28585.0, rel=0.01) + assert std == pytest.approx(551.0, rel=0.05) + assert num == pytest.approx(6166, rel=0.02) assert sat == pytest.approx(0, abs=2) + def test_intensity_panel(panel_image_name): img = image.Image(panel_image_name) pan = panel.Panel(img) mean, std, num, sat = pan.intensity() - assert mean == pytest.approx(1162, rel=0.01) - assert std == pytest.approx(20, rel=0.03) - assert num == pytest.approx(12154, rel=0.02) + assert mean == pytest.approx(1857, rel=0.01) + assert std == pytest.approx(40, rel=0.03) + assert num == pytest.approx(6166, rel=0.02) assert sat == pytest.approx(0, abs=2) + def test_radiance_panel(panel_image_name): img = image.Image(panel_image_name) pan = panel.Panel(img) mean, std, num, sat = pan.radiance() - assert mean == pytest.approx(0.170284, rel=0.01) - assert std == pytest.approx(0.0029387953691472554, rel=0.02) - assert num == pytest.approx(12154, rel=0.02) + assert mean == pytest.approx(0.179092, rel=0.01) + assert std == pytest.approx(0.003904967710223279, rel=0.02) + assert num == pytest.approx(6166, rel=0.02) assert sat == pytest.approx(0, abs=2) + def test_irradiance_mean(panel_image_name): img = image.Image(panel_image_name) pan = panel.Panel(img) - panel_reflectance = 0.67 + panel_reflectance = 0.49 mean = pan.irradiance_mean(panel_reflectance) - assert mean == pytest.approx(0.7984, rel=0.01) - + assert mean == pytest.approx(1.1482, rel=0.001) + + def test_panel_detected(panel_image_name): img = image.Image(panel_image_name) pan = panel.Panel(img) assert pan.panel_detected() == True + def test_panel_not_detected(flight_image_name): img = image.Image(flight_image_name) pan = panel.Panel(img) assert pan.panel_detected() == False + def test_altum_panel(altum_panel_image_name): img = image.Image(altum_panel_image_name) assert img.auto_calibration_image == True pan = panel.Panel(img) panel_pts = pan.panel_corners() - good_pts = [[1278, 483], [1176, 491], [1184, 591], [1286, 583]] + good_pts = [[1199, 676], [1191, 798], [1315, 804], [1323, 682]] assert panel_pts is not None assert len(panel_pts) == len(good_pts) - assert pan.serial == 'RP04-1901231-SC' - + assert pan.serial == 'RP06-2051037-OB' + # the particular order of the points is not relevant # so sort by coordinates - panel_pts = sorted(panel_pts,key=operator.itemgetter(0,1)) - good_pts = sorted(good_pts,key=operator.itemgetter(0,1)) + panel_pts = sorted(panel_pts, key=operator.itemgetter(0, 1)) + good_pts = sorted(good_pts, key=operator.itemgetter(0, 1)) for i, pt in enumerate(panel_pts): # different opencv/zbar versions round differently it seems @@ -194,6 +160,7 @@ def test_altum_panel(altum_panel_image_name): assert pt[1] == pytest.approx(good_pts[i][1], abs=3) assert pan.qr_corners() == None + def test_altum_lwir(altum_lwir_image_name): img = image.Image(altum_lwir_image_name) assert img.auto_calibration_image == False @@ -201,19 +168,20 @@ def test_altum_lwir(altum_lwir_image_name): assert pan.panel_detected() == False -def test_ordered_coordinates(panel_image_name): - img = image.Image(panel_image_name) - if img.panel_region is not None: - ordered_corners = img.panel_region - else: - ordered_corners = [(809, 613), (648, 615), (646, 454), (808, 452)] - pan = panel.Panel(img, panelCorners=ordered_corners) - assert pan.ordered_panel_coordinates() == ordered_corners +# def test_ordered_coordinates(panel_image_name): +# img = image.Image(panel_image_name) +# print(img.panel_region) +# if img.panel_region is not None: +# ordered_corners = img.panel_region +# else: +# ordered_corners = [(753, 751), (673, 753), (672, 676), (752, 674)] +# pan = panel.Panel(img, panelCorners=ordered_corners) +# assert pan.ordered_panel_coordinates() == ordered_corners def test_unordered_coordinates(panel_image_name): img = image.Image(panel_image_name) - ordered_corners = [(809, 613), (648, 615), (646, 454), (808, 452)] - unordered_corners = [(648, 615), (809, 613), (808, 452), (646, 454)] - pan = panel.Panel(img, panelCorners=unordered_corners) + ordered_corners = [(753, 751), (673, 753), (672, 676), (752, 674)] + unordered_corners = [(673, 753), (672, 676), (752, 674), (753, 751)] + pan = panel.Panel(img, panel_corners=unordered_corners) assert pan.ordered_panel_coordinates() == ordered_corners