diff --git a/.gradient/notebook-tests.yaml b/.gradient/notebook-tests.yaml index b0ef4b7..db5a6a6 100644 --- a/.gradient/notebook-tests.yaml +++ b/.gradient/notebook-tests.yaml @@ -247,3 +247,11 @@ packed-bert-single-label-classification: notebook: file: packedBERT_single_label_text_classification.ipynb timeout: 10000 + +#MolFeat Transformer +molfeat: + location: ../molfeat + generated: true + notebook: + file: transformers_molfeat_finetune.ipynb + timeout: 1000 diff --git a/dolly2-instruction-following/api/pipeline.py b/dolly2-instruction-following/api/pipeline.py index 340d277..083c4cf 100644 --- a/dolly2-instruction-following/api/pipeline.py +++ b/dolly2-instruction-following/api/pipeline.py @@ -95,13 +95,14 @@ def __init__( tokenizer: Optional[AutoTokenizer] = None, prompt_format: Optional[str] = None, end_key: Optional[str] = None, + task=None, **kwargs, ) -> None: if sequence_length is not None: config.model.sequence_length = sequence_length if micro_batch_size is not None: config.execution.micro_batch_size = micro_batch_size - + self.task = "text-generation" logging.info(f"Creating session") session: popxl.Session = inference(config) if isinstance(hf_dolly_checkpoint, str): diff --git a/molfeat/requirements.txt b/molfeat/requirements.txt new file mode 100644 index 0000000..fe1a6e8 --- /dev/null +++ b/molfeat/requirements.txt @@ -0,0 +1,13 @@ +ipywidgets==8.0.6 +awscli==1.27.76 +matplotlib==3.7.1 +molfeat==0.8.8 +transformers==4.28.1 +tabulate==0.9.0 +seaborn==0.12.2 +stmol +rdkit==2023.3.1 +py3Dmol==2.0.1.post1 +torchinfo==1.7.2 +numpy +examples-utils[common] @ git+https://github.com/graphcore/examples-utils@latest_stable diff --git a/molfeat/transformers_molfeat_finetune.ipynb b/molfeat/transformers_molfeat_finetune.ipynb new file mode 100644 index 0000000..bd587e5 --- /dev/null +++ b/molfeat/transformers_molfeat_finetune.ipynb @@ -0,0 +1,711 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) 2023 Graphcore Ltd. All rights reserved." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Molecular Classification on IPUs using MolFeat \n", + "## Fine tune pre-trained Transformer models for Blood Brain Barrier Permiability and QM9 prediction \n", + "\n", + "\n", + "[![Run on Gradient](https://assets.paperspace.io/img/gradient-badge.svg)](https://ipu.dev/yoyy6N)\n", + " [![Join our Slack Community](https://img.shields.io/badge/Slack-Join%20Graphcore's%20Community-blue?style=flat-square&logo=slack)](https://www.graphcore.ai/join-community)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Introduction: MolFeat on the IPU\n", + "\n", + "The popular [MolFeat Library](https://molfeat.datamol.io) offers a comprehensive open-source collection of pre-trained featurizers for molecules, designed for seamless integration into ML workflows. \n", + "The pre-trained featurizers in the Molfeat library have been trained on large quantities of data from a variety of domains, making them ideally suited to providing an initial featurization on new molecular datasets. This makes it possible to use larger and more sophisticated models even when only very small datasets are available as the fundamental physical properties of the molecule are already represented in the featurizer. \n", + "\n", + "In this notebook, we present a step-by-step guide on how to employ the Graphcore IPU for fine-tuning a pre-trained Transformer model on the `Blood-Brain Barrier Permeability (BBBP)` dataset from MoleculeNet. The goal is to predict which molecules can cross the blood-brain barrier, an essential property for drug development. Additionally, we demonstrate the versatility of this method for other regression tasks using new datasets.\n", + "\n", + "### Summary Table\n", + "| Domain | Tasks | Model | Datasets | Workflow | Number of IPUs | Execution Time |\n", + "|:----------:|:----------------:|:---------------------:|:----------:|:-----------------------:|:-------------------:|:--------------:|\n", + "| Molecules | Classification / Regression | ChemBERTa-77M-MLM / ChemGPT | BBBP / QM9 | Training, evaluation, inference | Recommended: 4x (Min: 1x) | 5 min |\n", + "\n", + "### Learning Outcomes\n", + "Through this demo, you will acquire the skills to:\n", + "- Classify molecules by leveraging a MolFeat featurizer and fine-tuning a Hugging Face Transformer on the IPU.\n", + "- Construct an inference workflow for individual molecule predictions.\n", + "- Understand how to transition between classification and regression tasks.\n", + "\n", + "### Links to Other Resources\n", + "For additional information on MolFeat, consult their [documentation](https://molfeat.datamol.io), and for more details about the datasets employed in this notebook, explore [MoleculeNet](https://moleculenet.org/datasets-1). This notebook presumes prior knowledge of Transformer architecture and PyTorch on the IPU. To review these topics, refer to the relevant tutorials on using PyTorch on the IPU, available [here](https://console.paperspace.com/github/graphcore/Gradient-HuggingFace?machine=Free-IPU-POD4&container=graphcore%2Fpytorch-jupyter%3A3.2.0-ubuntu-20.04-20230331&file=%2Fnatural-language-processing%2Fintroduction_to_optimum_graphcore.ipynb).\n", + "\n", + "[![Join our Slack Community](https://img.shields.io/badge/Slack-Join%20Graphcore's%20Community-blue?style=flat-square&logo=slack)](https://www.graphcore.ai/join-community)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running on Paperspace\n", + "\n", + "The Paperspace environment lets you run this notebook with almost no set up. To improve your experience we preload datasets and pre-install packages, this can take a few minutes, if you experience errors immediately after starting a session please try restarting the kernel before contacting support. If a problem persists or you want to give us feedback on the content of this notebook, please reach out to through our community of developers using our [slack channel](https://www.graphcore.ai/join-community) or raise a [GitHub issue](https://github.com/graphcore/examples).\n", + "\n", + "Requirements:\n", + "\n", + "* Python packages installed with `pip install -r ./requirements.txt`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make imported python modules automatically reload when the files are changed\n", + "# needs to be before the first import.\n", + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q -r ./requirements.txt\n", + "from examples_utils import notebook_logging\n", + "\n", + "%load_ext gc_logger" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The Problem...\n", + "\n", + "Let's start by posing a toy problem. Imagine we have a dataset of molecules that we know something about, in this case about the ability of various compounds to penetrate the Blood-Brain-Barrier, but it could be any property we wish. This dataset has been made from experimental results which is ultimately very expensive and time consuming to collect and extend. \n", + "\n", + "So like any good scientist, we wonder can we take the data we have and create a model to describe the physical results that we can use to make predictions about new molecules where experiments haven't been carried out with reliable results? \n", + "\n", + "Below we can see an example molecule from the dataset, Propanolol, represented as a SMILES string and visualised in 3D. Looking at the table we can see the expected result. This is the target we are aiming to produce, and by the end of this notebook we will have a model to fill in the rest of the table. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import report_molecule_classification\n", + "\n", + "report_molecule_classification(\n", + " \"Propanolol\", True, None, \"[Cl].CC(C)NCC(O)COc1cccc2ccccc12\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import copy\n", + "import pandas as pd\n", + "import numpy as np\n", + "from tqdm.notebook import tqdm\n", + "\n", + "from molfeat.trans.pretrained import PretrainedHFTransformer\n", + "\n", + "import poptorch\n", + "from torch.utils.data import Dataset\n", + "from IPython.display import clear_output\n", + "import ipywidgets as widgets\n", + "\n", + "\n", + "import poptorch\n", + "\n", + "from utils import (\n", + " plot_smoothed_loss,\n", + " report_molecule_classification,\n", + " report_molecule_regression,\n", + ")\n", + "from utils import Emoji" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we need to select the featurizer and the dataset. In this example we'll use the `ChemBERTa-77M-MLM` pre-trained featurizer and the `BBBP` (Blood Brain Barrier Permiability) dataset. We load the featurizer, read the dataset from the URL provided, then we can look at the dataframe and plot the first molecule from the dataset. \n", + "\n", + "We can see the dataframe includes the name of the molecule, the label (does the molecule pass through the BBB), and the SMILES string for the molecule. We can then use the utility function `report_molecule_classification` to explore the dataset - look at different molecules in the dataset by changing the `mol_id`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "featurizer = PretrainedHFTransformer(\n", + " kind=\"ChemBERTa-77M-MLM\", pooling=\"bert\", preload=True\n", + ")\n", + "\n", + "df = pd.read_csv(\"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/BBBP.csv\")\n", + "print(df.head())\n", + "print(f\"Length of dataset: {len(df)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here the molecule report is showing the basic information - name and target, through this notebook we'll show how to finetune the Transformer model to classify individual molecules with our model in this format. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mol_id = 10 # SET THIS VALUE AND RE-RUN THIS CELL TO EXPLORE THE DATASET.\n", + "\n", + "view = report_molecule_classification(\n", + " df.name.values[mol_id], df.p_np.values[mol_id], None, df.smiles.values[mol_id]\n", + ")\n", + "view.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Exercise:** When you've finished running the notebook, come back here and uncomment this block to run the `ChemGPT-4.7M` model with a regression task. There are a few small changes to make through the notebook for this, but we'll point those out when we get to them. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# featurizer = PretrainedHFTransformer(kind=\"ChemGPT-4.7M\", notation=\"selfies\")\n", + "\n", + "# df = pd.read_csv(\"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm9.csv\")\n", + "# print(df.head())\n", + "\n", + "# report_molecule_regression(df.mol_id.values[0], df.gap.values[0], None, df.smiles.values[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## MolFeat Featurizer\n", + "\n", + "The main advantage of using the Molfeat library is the pre-trained featurizers, these are models provided that provide a tuned method to take a molecule from a SMILES string format and generate an embedding of the molecule that's ready to be used for downstream tasks. \n", + "We can think of this like the pre-trained word embeddings in an NLP model.\n", + "\n", + "In this case we need to use the dataset wrapper from `MolFeat` - more details of this can be found in the tutorials in the docs [here](https://molfeat-docs.datamol.io/stable/tutorials/transformer_finetuning.html).\n", + "\n", + "**Exercise:** To see how this dataset class should be changed depending on the requirements of the dataset look at the PyTorch Geometric Molfeat tutorial [here](https://console.paperspace.com/github/graphcore/Gradient-Pytorch-Geometric?machine=Free-IPU-POD4&container=graphcore%2Fpytorch-geometric-jupyter%3A3.2.0-ubuntu-20.04-20230314&file=%2F%2FREADME_first.ipynb).\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class DTset(Dataset):\n", + " def __init__(self, smiles, y, mf_featurizer):\n", + " super().__init__()\n", + " self.smiles = smiles\n", + " self.mf_featurizer = mf_featurizer\n", + " self.y = torch.tensor(y).float()\n", + " # here we use the molfeat mf_featurizer to convert the smiles to\n", + " # corresponding tokens based on the internal tokenizer\n", + " # we just want the data from the batch encoding object\n", + " self.transformed_mols = self.mf_featurizer._convert(smiles)\n", + "\n", + " @property\n", + " def embedding_dim(self):\n", + " return len(self.mf_featurizer)\n", + "\n", + " @property\n", + " def max_length(self):\n", + " return self.transformed_mols.shape[-1]\n", + "\n", + " def __len__(self):\n", + " return self.y.shape[0]\n", + "\n", + " def collate_fn(self, **kwargs):\n", + " # the default collate fn self.mf_featurizer.get_collate_fn(**kwargs)\n", + " # returns None, which should just concatenate the inputs\n", + " # You could also use `transformers.default_data_collator` instead\n", + " return self.mf_featurizer.get_collate_fn(**kwargs)\n", + "\n", + " def __getitem__(self, index):\n", + " datapoint = dict(\n", + " (name, val[index]) for name, val in self.transformed_mols.items()\n", + " )\n", + " datapoint[\"y\"] = self.y[index]\n", + " return datapoint" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Build the poptorch Dataloader for the IPU\n", + "\n", + "We need to build a Poptorch Dataloader with our dataset. We can start by processing the dataframe with the `DTset` class above - this takes the featurizer and processes all the smiles strings and returns a PyTorch dataset. \n", + "Then after splitting the dataset into a train and test set, we can set up the IPU specific dataloader.\n", + "\n", + "This takes the train split of the dataset and the `ipu_opts` to build the dataloader. \n", + "\n", + "Some key aspects of the `ipu_opts` are given below:\n", + "* `deviceIterations` - this is the number of training steps taken before the IPU communciates with the host\n", + "* `gradientAccumulation` - we can accumulate gradient updates for a number of steps before updating the weights, this can be useful for larger training pipelines, but here we set this values to 2. \n", + "* `replciationFactor` - we can replicate the model over a number of IPUs to take advantage of data parallelism which can speed up training, however here 1x IPU is sufficient \n", + "* `BATCH_SIZE` - this is the micro batch size, however we also have a concept of a `total batch size` that is given by `BATCH_SIZE * NUM_REPLICAS * GRADIENT_ACCUMALATION` \n", + "\n", + "More details on these can be found in the introduction tutorials - and for the models here we can safely assume that we can leave the values set here. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset = DTset(df.smiles.values, df.p_np.values, featurizer)\n", + "\n", + "# NOTE: If you want to use the QM9 regression dataset, you need to change the args for the dataset here.\n", + "# Comment out the above line, and un-comment the below line.\n", + "# dataset = DTset(df.smiles.values, df.gap.values, featurizer)\n", + "\n", + "\n", + "generator = torch.Generator().manual_seed(42)\n", + "train_dt, test_dt = torch.utils.data.random_split(\n", + " dataset, [0.8, 0.2], generator=generator\n", + ")\n", + "\n", + "BATCH_SIZE = 32\n", + "# Set up the PyTorch DataLoader to load that much data at each iteration\n", + "train_opts = poptorch.Options()\n", + "train_opts.deviceIterations(1)\n", + "train_opts.Training.gradientAccumulation(2)\n", + "train_opts.replicationFactor(1)\n", + "\n", + "\n", + "train_loader = poptorch.DataLoader(\n", + " options=train_opts,\n", + " dataset=train_dt,\n", + " batch_size=BATCH_SIZE,\n", + " shuffle=False,\n", + " collate_fn=dataset.collate_fn(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model Structure\n", + "\n", + "The AwesomeNet model is a custom PyTorch neural network architecture that utilizes a pre-trained transformer model from the Molfeat library as the base for molecule featurization. The architecture is comprised of several layers and components:\n", + "\n", + "1. **Embedding Layer**: A copy of the base pre-trained transformer model from the MolFeat featurizer, which serves as the embedding layer for the input data.\n", + "2. **Embedding Dimension**: The size of the hidden layer in the base pre-trained transformer model.\n", + "3. **Pooling Layer**: Obtained from the MolFeat featurizer, this layer performs pooling operations on the embeddings.\n", + "4. **Hidden Layer**: A sequential layer consisting of dropout, linear, and ReLU activation layers. The input size is the length of the MolFeat featurizer, while the output size is a user-defined `hidden_size`.\n", + "5. **Output Layer**: A linear layer that maps the hidden layer's output to the final output size, which is usually set to 1 for regression tasks.\n", + "6. **Loss Function**: The model uses the Binary Cross Entropy with Logits Loss (BCEWithLogitsLoss) function to compute the loss during training.\n", + "\n", + "The `forward` function of the model takes input arguments and optional target labels (y) and performs the following steps:\n", + "\n", + "1. Pass the input data through the embedding layer to obtain the embeddings.\n", + "2. Extract the last hidden state from the embeddings.\n", + "3. Apply the pooling layer on the last hidden state using the input_ids and attention_mask.\n", + "4. Pass the pooled embeddings through the custom hidden layer.\n", + "5. Obtain the final output by passing the hidden layer's output through the output layer.\n", + "\n", + "If target labels are provided, the model computes the loss using the BCEWithLogitsLoss function and returns the output along with the computed loss. Otherwise, only the output is returned.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class AwesomeNet(torch.nn.Module):\n", + " def __init__(self, mf_featurizer, hidden_size=128, dropout=0.1, output_size=1):\n", + " super().__init__()\n", + " self.hidden_size = hidden_size\n", + " self.output_size = output_size\n", + " # we get the underlying model from the molfeat featurizer\n", + " # here we fetch the \"base\" huggingface transformer model\n", + " # and not the wrapper around for MLM\n", + " # this is principally to get smaller model and training efficiency\n", + " base_pretrained_model = getattr(\n", + " mf_featurizer.featurizer.model,\n", + " mf_featurizer.featurizer.model.base_model_prefix,\n", + " )\n", + " self.embedding_layer = copy.deepcopy(base_pretrained_model)\n", + " self.embedding_dim = mf_featurizer.featurizer.model.config.hidden_size\n", + " # we get the the pooling layer from the molfeat featurizer\n", + " self.pooling_layer = mf_featurizer._pooling_obj\n", + " self.hidden_layer = torch.nn.Sequential(\n", + " torch.nn.Dropout(p=dropout),\n", + " torch.nn.Linear(len(mf_featurizer), self.hidden_size),\n", + " torch.nn.ReLU(),\n", + " )\n", + " self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)\n", + " self.loss_fn = torch.nn.BCEWithLogitsLoss()\n", + "\n", + " # Swap the loss function here for a regression task\n", + " # self.loss_fn = torch.nn.L1Loss()\n", + "\n", + " def forward(self, y=None, **kwargs):\n", + " # get embeddings\n", + " x = self.embedding_layer(**kwargs)\n", + " # we take the last hidden state\n", + " emb = x[\"last_hidden_state\"]\n", + " # run poolings\n", + " h = self.pooling_layer(\n", + " emb,\n", + " kwargs[\"input_ids\"],\n", + " mask=kwargs.get(\"attention_mask\"),\n", + " )\n", + " # run through our custom and optional hidden layer\n", + " h = self.hidden_layer(h)\n", + " # run through output layers to get logits\n", + " if y is not None:\n", + " out = self.output_layer(h)\n", + " loss = self.loss_fn(out.squeeze(), y)\n", + " return out, loss\n", + " return self.output_layer(h)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can define the training model and set some key hyoperparameters. \n", + "* `NUM_EPOCHS` - how long to train the model for\n", + "* `LEARNING_RATE` - set the learning rate to tune the training \n", + "* `HIDDEN_SIZE` - set the size of the final hidden layer in the model\n", + "\n", + "Then we call the model and define the optimizer. \n", + "In this case we're going to keep it simple and use Adam. \n", + "\n", + "Then the `poptorch` training model is created from our `AwesomeNet` - which we can look at in the summary below. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Key Hyperparameters\n", + "NUM_EPOCHS = 10\n", + "LEARNING_RATE = 1e-3\n", + "HIDDEN_SZIE = 64\n", + "\n", + "\n", + "model = AwesomeNet(featurizer, hidden_size=HIDDEN_SZIE, dropout=0.1, output_size=1)\n", + "optimizer = poptorch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n", + "\n", + "# Wrap the model in a PopTorch training wrapper\n", + "train_model = poptorch.trainingModel(model, options=train_opts, optimizer=optimizer)\n", + "\n", + "\n", + "from torchinfo import summary\n", + "\n", + "summary(train_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training the model\n", + "\n", + "Now we can train the model.\n", + "This block will compile the model and loop through the dataset for the number of epochs specified. \n", + "Finally the model is detached from the IPU to free up resources for the next step. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Train\n", + "epoch_losses = []\n", + "from tqdm.notebook import tqdm\n", + "\n", + "pbar = tqdm(range(NUM_EPOCHS), colour=\"#FF6F79\")\n", + "for epoch in pbar:\n", + " losses = []\n", + " for data in train_loader:\n", + " out, loss = train_model(**data)\n", + " losses.append(torch.mean(loss).item())\n", + " epoch_losses.append(torch.mean(loss).item())\n", + " pbar.set_description(f\"Epoch {epoch} - Loss {np.mean(losses):.5f}\")\n", + "train_model.detachFromDevice()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we can plot the smoothed loss history, to validate to that the training looks sensible. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import plot_smoothed_loss\n", + "\n", + "plot_smoothed_loss(epoch_losses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation\n", + "\n", + "To run the evaluation we need a few key steps, these are very similar to the steps for the training. We need to define the `ipu_opts` for the test model, and we need to create a poptorch dataloader. \n", + "1. Build the `test_loader` for the test dataset \n", + "2. Build the poptorch inference model \n", + "3. Loop through the test dataset to get predictions. \n", + " NB: Be careful with the processing of the outputs if you try the QM9 regression dataset, remove the sigmoid function on the outputs. \n", + "4. Evaluate with the accuracy and ROC score\n", + "\n", + "This will then finally print out the performance on the test dataset. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import accuracy_score, roc_auc_score\n", + "from matplotlib import pyplot as plt\n", + "\n", + "test_opts = poptorch.Options()\n", + "test_opts.deviceIterations(1)\n", + "test_opts.replicationFactor(1)\n", + "\n", + "test_loader = poptorch.DataLoader(\n", + " options=test_opts,\n", + " dataset=test_dt,\n", + " batch_size=BATCH_SIZE,\n", + " shuffle=False,\n", + " collate_fn=dataset.collate_fn(),\n", + ")\n", + "\n", + "inference_model = poptorch.inferenceModel(model, options=test_opts)\n", + "test_y_hat = []\n", + "test_y_true = []\n", + "\n", + "predictions, labels = [], []\n", + "for data in test_loader:\n", + " out, _ = inference_model(**data)\n", + " # we apply sigmoid for classification - don't need this for regression\n", + " out = torch.sigmoid(out)\n", + " test_y_hat.append(out.detach().cpu().squeeze())\n", + " test_y_true.append(data[\"y\"])\n", + "test_y_hat = torch.cat(test_y_hat).squeeze().numpy()\n", + "test_y_true = torch.cat(test_y_true).squeeze().numpy()\n", + "assert len(test_y_true) == len(test_y_hat)\n", + "roc_auc = roc_auc_score(test_y_true, test_y_hat)\n", + "acc = accuracy_score(test_y_true, test_y_hat >= 0.5)\n", + "print(f\"Test ROC AUC: {roc_auc:.3f}\\nTest Accuracy: {acc:.3f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Demo Molecules\n", + "\n", + "We can see the accuracy and ROC score above, they seem rpetty good, but it's hard to put that in context of a real use case. \n", + "\n", + "We have a fine-tuned model, so let's use it to make predictions on individual moelcules as we loop through the dataset. \n", + "\n", + "\n", + "For demonstration purposes we'll build a new data loader for inference with a abtch-size of 1 molecule, and for simplicity we'll just take the original dataset - in reality this might be a different dataset, or new molecules as they are needed to be evaluated, but this gives an idea of the workflow. \n", + "\n", + "Like previously we set up the Dataloader and ipu_opts in the same manner, and build the inference version of out model. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_opts = poptorch.Options()\n", + "test_opts.deviceIterations(1)\n", + "test_opts.replicationFactor(1)\n", + "# Dataframe of \"new molecules\"\n", + "inf_df = df\n", + "dataset = DTset(inf_df.smiles.values, inf_df.p_np.values, featurizer)\n", + "# Set the batch size to 1 as we intend to process each molecule one at a time.\n", + "inf_loader = poptorch.DataLoader(\n", + " options=test_opts,\n", + " dataset=dataset,\n", + " batch_size=1,\n", + " shuffle=False,\n", + " collate_fn=dataset.collate_fn(),\n", + ")\n", + "# And we define the inference model here.\n", + "model.eval()\n", + "inference_model = poptorch.inferenceModel(model, options=test_opts)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then if we turn the loader and dataframe into iterables we can loop through one molecule at a time on-demmand. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sampler = iter(inf_loader)\n", + "smiles = iter(df.smiles.values)\n", + "names = iter(df.name.values)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following block is the inference call. The first time it is run the inference model will compile, and for subsequent calls it will run directly and can be run until the dataloader is exhuasted. \n", + "See how the molecule report has been extended to the predicted result. \n", + "\n", + "You can re-run the next molecule cell multiple times to get a feeling for how the model performs, and what a case-by-case application feels like. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def next_molecule():\n", + "\n", + " # =============================================================\n", + " # | |\n", + " # | DEMO MOLECULE PREDICTION |\n", + " # | |\n", + " # =============================================================\n", + " clear_output(True)\n", + " sample = next(sampler)\n", + " name = next(names)\n", + " smile = next(smiles)\n", + "\n", + " # Grab the true value for the report\n", + " y_truth = sample[\"y\"]\n", + " sample[\"y\"] = None\n", + "\n", + " # Run inference on the model with the single sampled molecule from the sampler\n", + " out = inference_model(**sample)\n", + " out = torch.sigmoid(out)\n", + "\n", + " # Report the result and plot the molecule with utility function\n", + " view = report_molecule_classification(name, bool(y_truth), out, smile)\n", + " view.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# RE-RUN THIS CELL TO SEE INFERENCE RESULTS ON INDIVIDUAL MOLECULES\n", + "\n", + "next_molecule()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "We've seen how to combine a MolFeat Featurizer and a HuggingFace Transformer to classify molecules in the BBBP dataset, and how to run the final model in a single molecule inference mode. \n", + "\n", + "**Next Steps:**\n", + "* Try tuning the hyperparameters - the length of training, the size of the hidden layer and the learning rate will all impact the final performance. \n", + "* Try changing the dataset - the BBBP dataset is a classificiation dataset, but the code is provided in commented blocks to use the QM9 dataset for regression tasks. Start here as an extension to see how to adapt the data loading and the final layer of the model (and loss function) for the new task\n", + "* Alternatively head over to [MoleculeNet](https://moleculenet.org/datasets-1) and explore the datasets there - they are labeled by task so you could pick an alternative classification dataset to fine-tune on or a regression task.\n", + "* Provided in the same block as the QM9 dataset is an alternative model - `ChemGPT-4.7M`, you can try swapping the featurizer / model to finetune out and see how the performance compares, and how to tune the finetuning to achieve better down-stream results. \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Finally detach the inference model - this line is at the end to make sure it's not run accidentally before finishing the notebook.\n", + "inference_model.detachFromDevice()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.10 ('3.2.0+1277_poptorch': venv)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "vscode": { + "interpreter": { + "hash": "dbfc712368f9f170a0635e201b20e8278cfe4185a9064a095e68346a2c2a6bf6" + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/molfeat/utils.py b/molfeat/utils.py new file mode 100644 index 0000000..a538d9d --- /dev/null +++ b/molfeat/utils.py @@ -0,0 +1,115 @@ +import numpy as np +import matplotlib.pyplot as plt +from typing import Optional +import seaborn as sns + +from stmol import showmol +import py3Dmol + +from rdkit import Chem +from rdkit.Chem import AllChem +import tabulate + + +class Emoji: + microscope = '\U0001F52C' + test_tube = '\U0001F9EA' + yes = '\u2705' + no = '\u274C' + ruler = '\U0001F4CF' + warning = '\u26A0' + rocket = '\U0001F680' + + +def make_block(smi: str) -> str: + mol = Chem.MolFromSmiles(smi) + mol = Chem.AddHs(mol) + AllChem.EmbedMolecule(mol) + mblock = Chem.MolToMolBlock(mol) + return mblock + + +def render_mol(xyz: str): + xyzview = py3Dmol.view() + xyzview.addModel(xyz, 'mol') + xyzview.setStyle({'stick': {}}) + xyzview.setBackgroundColor('white') + xyzview.zoomTo() + showmol(xyzview, height=500, width=500) + return xyzview + + +def plot_3d_mol(smile: str): + blk = make_block(smile) + view = render_mol(blk) + return view + + +def report_molecule_classification(name: str, y_truth: bool, out: Optional[float], smile: str): + table = [ + ["Molecule:", name], + ["BBBP:", f"{y_truth} (target) {Emoji.microscope}"], + ] + if out is not None: + table.append(["Prediction:", f"{bool(out > 0.5)} {Emoji.test_tube}"]) + table.append(["Correct:", f"{Emoji.yes}" if bool(out > 0.5) == y_truth else f"{Emoji.no}"]) + print(tabulate.tabulate(table, tablefmt="heavy_grid")) + + return plot_3d_mol(smile) + + +def report_molecule_regression(name: str, y_truth: float, out: Optional[float], smile: str, mask=None): + table = [ + ["Molecule:", name], + ["exp:", f"{y_truth:.4f} (target) {Emoji.microscope}"], + ] + if out is not None: + err = abs(y_truth - out) + table.append(["Prediction:", f"{out:.4f} {Emoji.test_tube}"]) + table.append(["|err|:", f"{err:.4f} " + (f"{Emoji.ruler}" if err < 1.5 else f"{Emoji.warning}")]) + print(tabulate.tabulate(table, tablefmt="heavy_grid")) + return plot_3d_mol(smile) + + +def plot_smoothed_loss(epoch_losses: np.ndarray, window_size: int = 10): + moving_avg = np.convolve(epoch_losses, np.ones(window_size) / window_size, mode='valid') + moving_avg = np.clip(moving_avg, 0, None) + + q1, q3 = np.percentile(epoch_losses, [25, 75]) + iqr = q3 - q1 + + fig, ax = plt.subplots(figsize=(10, 5)) + ax.plot(moving_avg, color='#FF6F79') + ax.fill_between( + range(len(moving_avg)), np.clip(moving_avg - iqr, 0, None), moving_avg + iqr, alpha=0.3, color='#FF6F79' + ) + + ax.set_title('Smoothed Loss with IQR') + ax.set_xlabel('Steps') + ax.set_ylabel('Loss') + + plt.show() + + +def plot_contours(test_y_true: np.ndarray, test_y_hat: np.ndarray, r2: float, mae: float): + plt.style.use('seaborn') + + hist, xedges, yedges = np.histogram2d(test_y_true, test_y_hat, bins=10) + X, Y = np.meshgrid(xedges[:-1], yedges[:-1]) + Z = hist.T + plt.contour(X, Y, Z, colors=None, levels=5, linewidths=1.5, alpha=0.7, cmap='viridis') + + plt.scatter(test_y_true, test_y_hat, alpha=0.7, edgecolors='k', linewidths=0.5) + + plt.gca().annotate( + "$R2 = {:.2f}$\n MAE = {:.2f}".format(r2, mae), + xy=(0.05, 0.9), + xycoords='axes fraction', + size=10, + bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"), + ) + + plt.xlabel("y true") + plt.ylabel("y pred") + + plt.show()