diff --git a/CHANGELOG.md b/CHANGELOG.md index 22b2172..ca4452e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 0.1.4 - 2023-10-11 + ++ Update - Markdown explanations within notebooks `01` and `02` + ## 0.1.3 - 2023-09-14 + Add - GitHub Actions to build Docker image and push to DockerHub diff --git a/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb b/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb index 26a181a..0fd34f6 100644 --- a/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb +++ b/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb @@ -811,7 +811,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The outcome is different across different threholds we set. Therefore, this threshold is a parameter we could potentially tweak." + "The outcome is different across different thresholds we set. Therefore, this threshold is a parameter we could potentially tweak." ] }, { @@ -1064,7 +1064,99 @@ "outputs": [ { "data": { - "image/svg+xml": "\n\n%3\n\n\n\nMouse\n\n\nMouse\n\n\n\n\n\nSession\n\n\nSession\n\n\n\n\n\nMouse->Session\n\n\n\n\nScan\n\n\nScan\n\n\n\n\n\nSession->Scan\n\n\n\n\nSegmentationParam\n\n\nSegmentationParam\n\n\n\n\n\nAverageFrame\n\n\nAverageFrame\n\n\n\n\n\nScan->AverageFrame\n\n\n\n", + "image/svg+xml": [ + "\n", + "\n", + "%3\n", + "\n", + "\n", + "\n", + "Mouse\n", + "\n", + "\n", + "Mouse\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Session\n", + "\n", + "\n", + "Session\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Mouse->Session\n", + "\n", + "\n", + "\n", + "\n", + "Scan\n", + "\n", + "\n", + "Scan\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Session->Scan\n", + "\n", + "\n", + "\n", + "\n", + "SegmentationParam\n", + "\n", + "\n", + "SegmentationParam\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "AverageFrame\n", + "\n", + "\n", + "AverageFrame\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Scan->AverageFrame\n", + "\n", + "\n", + "\n", + "" + ], "text/plain": [ "" ] @@ -1142,7 +1234,140 @@ "outputs": [ { "data": { - "image/svg+xml": "\n\n%3\n\n\n\nMouse\n\n\nMouse\n\n\n\n\n\nSession\n\n\nSession\n\n\n\n\n\nMouse->Session\n\n\n\n\nScan\n\n\nScan\n\n\n\n\n\nSession->Scan\n\n\n\n\nSegmentation\n\n\nSegmentation\n\n\n\n\n\nSegmentation.Roi\n\n\nSegmentation.Roi\n\n\n\n\n\nSegmentation->Segmentation.Roi\n\n\n\n\nSegmentationParam\n\n\nSegmentationParam\n\n\n\n\n\nSegmentationParam->Segmentation\n\n\n\n\nAverageFrame\n\n\nAverageFrame\n\n\n\n\n\nAverageFrame->Segmentation\n\n\n\n\nScan->AverageFrame\n\n\n\n", + "image/svg+xml": [ + "\n", + "\n", + "%3\n", + "\n", + "\n", + "\n", + "Mouse\n", + "\n", + "\n", + "Mouse\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Session\n", + "\n", + "\n", + "Session\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Mouse->Session\n", + "\n", + "\n", + "\n", + "\n", + "Scan\n", + "\n", + "\n", + "Scan\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Session->Scan\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation\n", + "\n", + "\n", + "Segmentation\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation.Roi\n", + "\n", + "\n", + "Segmentation.Roi\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation->Segmentation.Roi\n", + "\n", + "\n", + "\n", + "\n", + "SegmentationParam\n", + "\n", + "\n", + "SegmentationParam\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "SegmentationParam->Segmentation\n", + "\n", + "\n", + "\n", + "\n", + "AverageFrame\n", + "\n", + "\n", + "AverageFrame\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "AverageFrame->Segmentation\n", + "\n", + "\n", + "\n", + "\n", + "Scan->AverageFrame\n", + "\n", + "\n", + "\n", + "" + ], "text/plain": [ "" ] @@ -2640,7 +2865,180 @@ "outputs": [ { "data": { - "image/svg+xml": "\n\n%3\n\n\n\nFluorescence.Trace\n\n\nFluorescence.Trace\n\n\n\n\n\nFluorescence\n\n\nFluorescence\n\n\n\n\n\nFluorescence->Fluorescence.Trace\n\n\n\n\nMouse\n\n\nMouse\n\n\n\n\n\nSession\n\n\nSession\n\n\n\n\n\nMouse->Session\n\n\n\n\nScan\n\n\nScan\n\n\n\n\n\nSession->Scan\n\n\n\n\nSegmentation\n\n\nSegmentation\n\n\n\n\n\nSegmentation->Fluorescence\n\n\n\n\nSegmentation.Roi\n\n\nSegmentation.Roi\n\n\n\n\n\nSegmentation->Segmentation.Roi\n\n\n\n\nSegmentationParam\n\n\nSegmentationParam\n\n\n\n\n\nSegmentationParam->Segmentation\n\n\n\n\nSegmentation.Roi->Fluorescence.Trace\n\n\n\n\nAverageFrame\n\n\nAverageFrame\n\n\n\n\n\nAverageFrame->Segmentation\n\n\n\n\nScan->AverageFrame\n\n\n\n", + "image/svg+xml": [ + "\n", + "\n", + "%3\n", + "\n", + "\n", + "\n", + "Fluorescence.Trace\n", + "\n", + "\n", + "Fluorescence.Trace\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fluorescence\n", + "\n", + "\n", + "Fluorescence\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fluorescence->Fluorescence.Trace\n", + "\n", + "\n", + "\n", + "\n", + "Mouse\n", + "\n", + "\n", + "Mouse\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Session\n", + "\n", + "\n", + "Session\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Mouse->Session\n", + "\n", + "\n", + "\n", + "\n", + "Scan\n", + "\n", + "\n", + "Scan\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Session->Scan\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation\n", + "\n", + "\n", + "Segmentation\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation->Fluorescence\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation.Roi\n", + "\n", + "\n", + "Segmentation.Roi\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation->Segmentation.Roi\n", + "\n", + "\n", + "\n", + "\n", + "SegmentationParam\n", + "\n", + "\n", + "SegmentationParam\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "SegmentationParam->Segmentation\n", + "\n", + "\n", + "\n", + "\n", + "Segmentation.Roi->Fluorescence.Trace\n", + "\n", + "\n", + "\n", + "\n", + "AverageFrame\n", + "\n", + "\n", + "AverageFrame\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "AverageFrame->Segmentation\n", + "\n", + "\n", + "\n", + "\n", + "Scan->AverageFrame\n", + "\n", + "\n", + "\n", + "" + ], "text/plain": [ "" ] diff --git a/tutorials/01-DataJoint Basics.ipynb b/tutorials/01-DataJoint Basics.ipynb index a88ebe8..8523a9f 100644 --- a/tutorials/01-DataJoint Basics.ipynb +++ b/tutorials/01-DataJoint Basics.ipynb @@ -51,14 +51,17 @@ "If you visit the [documentation for DataJoint](https://docs.datajoint.io/introduction/Data-pipelines.html), we define a data pipeline as follows:\n", "> A data pipeline is a sequence of steps (more generally a directed acyclic graph) with integrated storage at each step. These steps may be thought of as nodes in a graph.\n", "\n", - ">* Nodes in this graph are represented as database **tables**. Examples of such tables include \"Subject\", \"Session\", \"Implantation\", \"Experimenter\", \"Equipment\", but also \"OptoWaveform\", \"OptoStimParams\", or \"Neuronal spikes\". \n", + ">* Nodes in this graph are represented as database **tables**. Examples of such tables include `Subject`, `Session`, `Implantation`, `Experimenter`, `Equipment`, but also `OptoWaveform`, `OptoStimParams`, or `NeuronalSpikes`. \n", "\n", ">* The data pipeline is formed by making these tables interdependent (as the nodes are connected in a network). A **dependency** is a situation where a step of the data pipeline is dependent on a result from a sequentially previous step before it can complete its execution. A dependency graph forms an entire cohesive data pipeline. \n", "\n", - "1. define these \"things\" as tables in which you can store the information about them\n", - "2. define the relationships (in particular the dependencies) between the \"things\"\n", + "In order to create a data pipeline, you need to know the \"things\" in your experiments\n", + "and the relationship between them. Within the pipeline you will then:\n", "\n", - "A data pipeline can then serve as a map that describes everything that goes on in your experiment, capturing what is collected, what is processed, and what is analyzed/computed. A well designed data pipeline not only let's you organize your data well, but can bring out logical clarity to your experiment, and may even bring about new insights by making how everything in your experiment relates together obvious.\n", + "1. define these \"things\" as tables in which you can store the information about them.\n", + "2. define the relationships (in particular the dependencies) between the \"things\".\n", + "\n", + "The data pipeline can then serve as a map that describes everything that goes on in your experiment, capturing what is collected, what is processed, and what is analyzed/computed. A well designed data pipeline not only let's you organize your data well, but can bring out logical clarity to your experiment, and may even bring about new insights by making how everything in your experiment relates together obvious.\n", "\n", "Let's go ahead and build together a pipeline from scratch to better understand what a data pipeline is all about." ] @@ -67,7 +70,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Practical examples" + "#### Practical examples" ] }, { @@ -131,21 +134,21 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Just by going through the description, we can start to identify **entities** that needs to be stored and represented in our data pipeline:\n", - "\n", - "* mouse\n", - "* experimental session\n", - "\n", - "For ephys:\n", - "\n", - ">* neuron\n", - ">* spikes\n", - "\n", - "For calcium imaging:\n", - "\n", - ">* scan\n", - ">* regions of interest\n", - ">* trace" + "Just by going through the description, we can start to identify **entities** that need to be stored and represented in our data pipeline:\n", + "\n", + "* mouse\n", + "* experimental session\n", + "\n", + "For ephys:\n", + "\n", + ">* neuron\n", + ">* spikes\n", + "\n", + "For calcium imaging:\n", + "\n", + ">* scan\n", + ">* regions of interest\n", + ">* trace" ] }, { @@ -159,7 +162,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Schemas and tables" + "### Schemas and tables" ] }, { @@ -173,18 +176,21 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In a data pipeline, we represent these **entities** as **tables**. Different *kinds* of entities become distinct tables, and each table row is a single example (instance) of the entity's category. \n", - "\n", - "For example, if we have a `Mouse` table, each row in the mouse table represents a single mouse. \n", - "\n", - "It is essential to think about what information will **uniquely identify** each entry. \n", - "\n", - "In this case, the information that uniquely identifies the `Mouse` table is their **mouse IDs** - a unique ID number assigned to each animal in the lab. This attribute is named the **primary key** of the table.\n", - "\n", - "| Mouse_ID (*Primary key attribute*)|\n", - "|:--------: | \n", - "| 11234 |\n", - "| 11432 |" + "In a data pipeline, we represent these **entities** as **tables**. Different *kinds* of entities become distinct tables, and each table row is a single example (instance) of the entity's category. \n", + "\n", + "For example, if we have a `Mouse` table, each row in the mouse table represents a single mouse. \n", + "\n", + "It is essential to think about what information will **uniquely identify** each entry. \n", + "\n", + "In this case, the information that uniquely identifies the `Mouse` table is their\n", + "**mouse ID** - a unique ID number assigned to each animal in the lab. This attribute is\n", + "named the **primary key** of the table. By convention, table attributes are lower case\n", + "and do not contain spaces.\n", + "\n", + "| `mouse_id*` (*Primary key attribute*)|\n", + "|:--------: | \n", + "| 11234 |\n", + "| 11432 |" ] }, { @@ -197,7 +203,7 @@ "\n", "Such an attribute is called the **primary key** of the table: the subset of table attributes uniquely identifying each entity in the table. The **secondary attribute** refers to any field in a table, not in the primary key.\n", "\n", - "| Mouse_ID (*Primary key attribute*) \n", + "| `mouse_id*` (*Primary key attribute*) \n", "|:--------:| \n", "| 11234 (*Secondary attribute*)\n", "| 11432 (*Secondary attribute*)" @@ -207,7 +213,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once we have successfully identified the table's primary key, we can now think about what other columns, or **non-primary key attributes** - additional information **about each entry in the table that need to be stored as well**." + "Once we have successfully identified the table's primary key, we can now think about what other columns, or **non-primary key attributes** - additional information **about each entry in the table that need to be stored as well**." ] }, { @@ -221,7 +227,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "| Mouse_ID | DOB | sex |\n", + "| `mouse_id*` | `dob` | `sex` |\n", "|:--------:|------------|--------|\n", "| 11234 | 2017-11-17 | M |\n", "| 11432 | 2018-03-04 | F |" @@ -231,7 +237,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now that we have an idea of how to represent information about the mouse, let's create the table using **DataJoint**!" + "Now that we have an idea of how to represent information about the mouse, let's create the table using **DataJoint**!" ] }, { @@ -245,7 +251,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Schema" + "##### Schema" ] }, { @@ -283,14 +289,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Table" + "##### Table" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In DataJoint, you define each table as a `class`, and provide the table definition (e.g., attribute definitions) as the `definition` static string property. The class will inherit from the `dj.Manual` class provided by DataJoint (more on this later)." + "In DataJoint, you define each table as a `class`, and provide the table definition (e.g. attribute definitions) as the `definition` static string property. The class will inherit from the `dj.Manual` class provided by DataJoint (more on this later)." ] }, { @@ -330,7 +336,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Basic relational operators" + "### Basic relational operators" ] }, { @@ -477,7 +483,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Data integrity" + "##### Data integrity" ] }, { @@ -563,31 +569,29 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As with `mouse`, we should consider **what information (i.e., attributes) is needed to identify an `experimental session`** uniquely. Here is the relevant section of the project description:\n", - "\n", - "> * As a hard working neuroscientist, you perform experiments every day, sometimes working with **more than one mouse in a day**! However, on an any given day, **a mouse undergoes at most one recording session**.\n", - "> * For each experimental session, you would like to record **what mouse you worked with** and **when you performed the experiment**. You would also like to keep track of other helpful information such as the **experimental setup** you worked on." + "As with `mouse`, we should consider **what information (i.e. attributes) is needed to identify an experimental `session`** uniquely. Here is the relevant section of the project description:\n", + "\n", + "> * As a hard working neuroscientist, you perform experiments every day, sometimes working with **more than one mouse in a day**! However, on an any given day, **a mouse undergoes at most one recording session**.\n", + "> * For each experimental session, you would like to record **what mouse you worked with** and **when you performed the experiment**. You would also like to keep track of other helpful information such as the **experimental setup** you worked on." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Based on the above, it seems that you need to know these two data to uniquely identify a single experimental session:\n", - "\n", - "* the date of the session\n", - "* the mouse you recorded from in that session\n", - "\n", - "to uniquely identify a single experimental session." + "Based on the above, it seems that you need to know the following data to uniquely identify a single experimental session:\n", + "\n", + "* the date of the session\n", + "* the mouse you recorded from in that session" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that, to uniquely identify an experimental session (or simply a `Session`), we need to know the mouse that the session was about. In other words, a session cannot existing without a corresponding mouse! \n", - "\n", - "With **mouse** already represented as a table in our pipeline, we say that the session **depends on** the mouse! We could graphically represent this in an **entity relationship diagram (ERD)** by drawing the line between two tables, with the one below (**session**) depending on the one above (**mouse**)." + "Note that, to uniquely identify an experimental session (or simply a `Session`), we need to know the mouse that the session was about. In other words, a session cannot exist without a corresponding mouse! \n", + "\n", + "With **mouse** already represented as a table in our pipeline, we say that the session **depends on** the mouse! We could graphically represent this in an **entity relationship diagram (ERD)** by drawing the line between two tables, with the one below (**session**) depending on the one above (**mouse**)." ] }, { @@ -778,13 +782,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will introduce significant types of queries used in DataJoint:\n", - "* 1. Restriction (`&`) and negative restriction (`-`): filter the data with certain conditions\n", - "* 2. Join (`*`): bring fields from different tables together\n", - "* 3. Projection (`.proj()`): focus on a subset of attributes\n", - "* 4. Fetch (`.fetch()`): pull the data from the database\n", - "* 5. Deletion (`.delete()`): delete entries and their dependencies\n", - "* 6. Drop (`.drop()`): drop the table from the schema" + "We will introduce the major types of queries used in DataJoint:\n", + "1. Restriction (`&`) and negative restriction (`-`): filter the data with certain conditions\n", + "2. Join (`*`): bring fields from different tables together\n", + "3. Projection (`.proj()`): focus on a subset of attributes\n", + "\n", + "Following the query operations, you might work with one or more of the following\n", + "data manipulation operations supported by DataJoint:\n", + " \n", + "1. Fetch (`.fetch()`): pull the data from the database\n", + "2. Deletion (`.delete()`): delete entries and their dependencies\n", + "3. Drop (`.drop()`): drop the table from the schema" ] }, { @@ -805,7 +813,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Exact match" + "#### Exact match" ] }, { @@ -876,7 +884,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Inequality" + "#### Inequality" ] }, { @@ -1010,7 +1018,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Restriction one table with another" + "#### Restrict one table with another" ] }, { @@ -1033,7 +1041,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Combining restrictions" + "#### Combine restrictions" ] }, { @@ -1079,7 +1087,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Negative restriction - with the `-` operator" + "#### Negative restriction: with the `-` operator" ] }, { @@ -1134,10 +1142,10 @@ "source": [ "Behavior of join:\n", "\n", - "1. match the common field(s) of the primary keys in the two tables\n", - "2. do a combination of the non-matched part of the primary key\n", - "3. listing out the secondary attributes for each combination\n", - "4. if two tables have secondary attributes that share a same name, it will throw an error. To join, we need to rename that attribute for at least one of the tables." + "1. Match the common field(s) of the primary keys in the two tables.\n", + "2. Do a combination of the non-matched part of the primary key.\n", + "3. Listing out the secondary attributes for each combination.\n", + "4. If two tables have secondary attributes that share a same name, it will throw an error. To join, we need to rename that attribute for at least one of the tables." ] }, { @@ -1646,8 +1654,10 @@ "In the next session, we are going to extend our data pipeline with tables to represent **imported data** and define new tables to **compute and hold analysis results**.\n", "\n", "We will use both ephys and calcium imaging as example pipelines:\n", - "+ [02-electrophysiology](../02-Electrophysiology/02-Imported%20Tables%20-%20Interactive.ipynb)\n", - "+ [02-calcium imaging](../01-Calcium_Imaging/02-Imported%20Tables%20-%20Interactive.ipynb)" + "+ [02-Calcium Imaging Imported Tables](./02-Calcium%20Imaging%20Imported%20Tables.ipynb)\n", + "+ [03-Calcium Imaging Computed Tables](./03-Calcium%20Imaging%20Computed%20Tables.ipynb)\n", + "+ [04-Electrophysiology Imported Tables](./04-Electrophysiology%20Imported%20Tables.ipynb)\n", + "+ [05-Electrophysiology Computed Tables](./05-Electrophysiology%20Computed%20Tables.ipynb)" ] }, { diff --git a/tutorials/02-Calcium Imaging Imported Tables.ipynb b/tutorials/02-Calcium Imaging Imported Tables.ipynb index 98b5484..d5004c1 100644 --- a/tutorials/02-Calcium Imaging Imported Tables.ipynb +++ b/tutorials/02-Calcium Imaging Imported Tables.ipynb @@ -18,7 +18,7 @@ "During this session you will learn:\n", "\n", "* To import neuron imaging data from data files into an `Imported` table\n", - "* To automatically trigger data importing and computations for all the missing entries with `Populate`" + "* To automatically trigger data importing and computations for all the missing entries with `populate`" ] }, { @@ -32,7 +32,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First thing first, let's import `DataJoint` again." + "First thing first, let's import `datajoint` again." ] }, { @@ -73,25 +73,34 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In the `data` folder in this `DataJoint-Tutorials`, you can find a small dataset of three different cases of calcium imaging scans: `example_scan_01.tif`, `example_scan_02.tif`and `example_scan_03.tif`.\n", + "The `data` folder in this repository contains a small dataset of three different calcium imaging scans: `example_scan_01.tif`, `example_scan_02.tif`and `example_scan_03.tif`.\n", "\n", "As you might know, calcium imaging scans (raw data) are stored as *.tif* files. \n", "\n", - "*NOTE: For this tutorial there is no need to deeper explore this small dataset. Nevertheless, if you are curious about visualizing these example scans, we recommend you to open the TIFF with [ImageJ](https://imagej.nih.gov/ij/download.html).*" + "*NOTE: For this tutorial you do not need to explore this dataset thoroughly. It simply\n", + "serves as an example to populate our data pipeline with example data.*" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## First steps of the pipeline design: Schema, Mouse & Session" + "## Pipeline design: `Mouse` & `Session`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The DataJoint pipeline commonly starts with a `schema` and the following classes for each table: `Mouse` and `Session`. Let's quickly create this pipeline's first steps as we learned it in the previous session:" + "We can continue working with the tables we defined in the previous notebook in one of\n", + "two ways such that the classes for each table, `Mouse` and `Session`, are declared here: \n", + "* We can redefine them here. \n", + "* Import them from an existing file containing their table definitions.\n", + "\n", + "Here, for your convenience, we have included the schema and table\n", + "class definitions in a package called `tutorial_pipeline.mouse_session`, from which you\n", + "can import the classes as well as the schema object. We will use the schema object again\n", + "to define more tables." ] }, { @@ -285,9 +294,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Particularly, this example contains 100 frames. \n", - "\n", - "Let's calculate the average of the images over the frames and plot the result.\n" + "This example contains 100 frames. Let's calculate the average of the images over the frames and plot the result." ] }, { @@ -314,7 +321,7 @@ "source": [ "Now let's create a table `AverageFrame` to compute and save the average fluorescence. \n", "\n", - "For each scan, we have one average frame. Therefore, the table shares the exact same primary key as the table `Scan`" + "For each scan, we have one average frame. Therefore, the table shares the exact same primary key as the table `Scan`." ] }, { @@ -359,7 +366,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We defined `average_frame` as a `longblob`, which allow us to store a NumPy array. This NumPy array will be imported and computed from the file corresponding to each scan." + "We defined `average_frame` as a `longblob`, which allows us to store a NumPy array. This NumPy array will be imported and computed from the file corresponding to each scan." ] }, { @@ -407,7 +414,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Rather than filling out the content of the table manually using `insert1` or `insert` methods, we are going to make use of the `make` and `populate` logic that comes with `Imported` tables. These two methods automatically figure it out what needs to be imported, and perform the import." + "Rather than filling out the content of the table manually using `insert1` or `insert` methods, we are going to make use of the `make` and `populate` logic that comes with `Imported` tables. These two methods automatically figure out what needs to be imported, and perform the import." ] }, { @@ -736,8 +743,13 @@ "source": [ "At this point, our pipeline contains the core elements with data populated, ready for further downstream analysis.\n", "\n", - "In the next [session](./03-Computed%20Table,%20Lookup%20Table,%20and%20Part%20Table%20-%20Interactive.ipynb), we are going to introduce the concept of `Computed` table, and `Lookup` table, as well as learning to set up a automated computation routine." + "In the next [session](./03-Calcium%20Imaging%20Computed%20Tables.ipynb), we are going to introduce the concept of `Computed` table, and `Lookup` table, as well as learning to set up a automated computation routine." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] } ], "metadata": { diff --git a/tutorials/03-Calcium Imaging Computed Tables.ipynb b/tutorials/03-Calcium Imaging Computed Tables.ipynb index f90f78a..5bd952d 100644 --- a/tutorials/03-Calcium Imaging Computed Tables.ipynb +++ b/tutorials/03-Calcium Imaging Computed Tables.ipynb @@ -268,7 +268,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The outcome is different across different threholds we set. Therefore, this threshold is a parameter we could potentially tweak." + "The outcome is different across different thresholds we set. Therefore, this threshold is a parameter we could potentially tweak." ] }, {