Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Review #6

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
382 changes: 382 additions & 0 deletions .ipynb_checkpoints/SeBRe_detection-checkpoint.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,382 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "bba90324",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import sys\n",
"import random\n",
"import math\n",
"import re\n",
"import time\n",
"import numpy as np\n",
"import cv2\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"from config import Config\n",
"import utils\n",
"import glob #for selecting png files in training images folder\n",
"from natsort import natsorted, ns #for sorting filenames in a directory\n",
"import skimage\n",
"import pandas\n",
"\n",
"import model as modellib\n",
"import visualize\n",
"from model import log"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e92401ef",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"C:\\Users\\dal4019\\Documents\\Bst_Reg\n"
]
}
],
"source": [
"def resetDataDir():\n",
" while os.getcwd() != \"C:\\\\\":\n",
" os.chdir('..')\n",
"\n",
" # Replace the following with the entire path to your data\n",
" os.chdir('C:\\\\Users\\\\dal4019\\\\Documents\\\\Bst_Reg')\n",
"\n",
"# Root directory of the project\n",
"resetDataDir()\n",
"ROOT_DIR = os.getcwd()\n",
"print(ROOT_DIR)\n",
"\n",
"# Directory to save logs and trained model\n",
"MODEL_DIR = os.path.join(ROOT_DIR, \"weights\")\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "db89c922",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"25\n"
]
}
],
"source": [
"RGB_MAPPINGS_DIR = 'rgb_mappings_medulla_v2.csv'\n",
"\n",
"resetDataDir()\n",
"\n",
"RGB_MAPPINGS = pandas.read_csv(RGB_MAPPINGS_DIR, usecols = ['Label', 'R', 'G', 'B']).dropna()\n",
"RGB_MAPPINGS_MAP = {}\n",
"for index, row in RGB_MAPPINGS.iterrows():\n",
" r = row[\"R\"]\n",
" g = row[\"G\"]\n",
" b = row[\"B\"]\n",
" label = row[\"Label\"]\n",
" RGB_MAPPINGS_MAP[label] =(r,g,b)\n",
" \n",
"RGB_MAPPINGS_LABELS = list(RGB_MAPPINGS_MAP.keys())\n",
"RGB_MAPPINGS_INDEX = RGB_MAPPINGS.index.values\n",
"NUM_LABELS = len(RGB_MAPPINGS_INDEX)\n",
"print(NUM_LABELS)"
]
},
{
"cell_type": "markdown",
"id": "21f1c41b",
"metadata": {},
"source": [
"## Load data to run detection"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "d4493dfa",
"metadata": {},
"outputs": [],
"source": [
"########### Create detection dataset:\n",
"\n",
"class BrainDataset(utils.Dataset):\n",
" \"\"\"Generates the brain section dataset. The dataset consists of locally stored \n",
" brain section images, to which file access is required.\n",
" \"\"\"\n",
"\n",
" #see utils.py for default def load_image() function; modify according to your dataset\n",
" \n",
" def load_brain(self): \n",
" \"\"\"\n",
" for naming image files follow this convention: '*_(image_id).jpg'\n",
" \"\"\"\n",
" for index, label in enumerate(RGB_MAPPINGS_LABELS):\n",
" self.add_class('brain', index+1, label)\n",
" \n",
" training_images_folder = 'images/VALIDATION'\n",
" resetDataDir()\n",
" os.chdir(training_images_folder)\n",
" cwd = os.getcwd()\n",
" img_list = glob.glob('*.png')\n",
" img_list = natsorted(img_list, key=lambda y: y.lower())\n",
" im_id=0\n",
" for i in img_list:\n",
" img = skimage.io.imread(i) #grayscale = 0\n",
" [s1, s2] = np.shape(img)\n",
" im_dims = np.shape(img)\n",
" self.add_image(\"brain\", image_id=im_id, path = cwd+'/'+i, height = im_dims[0], width = im_dims[1])\n",
" im_id+=1"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "22874be2",
"metadata": {},
"outputs": [],
"source": [
"# Detection dataset\n",
"resetDataDir()\n",
"dataset = BrainDataset()\n",
"dataset.load_brain()\n",
"dataset.prepare()\n",
"print(\"Done processing data.\")"
]
},
{
"cell_type": "markdown",
"id": "24bf9ee6",
"metadata": {},
"source": [
"## Load inference config and run detection"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "218fd787",
"metadata": {},
"outputs": [],
"source": [
"class BrainConfig(Config):\n",
" \"\"\"Configuration for training on the brain dataset.\n",
" Derives from the base Config class and overrides values specific\n",
" to the brain dataset.\n",
" \"\"\"\n",
" # Give the configuration a recognizable name\n",
" NAME = \"brain\"\n",
"\n",
" # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n",
" # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n",
" GPU_COUNT = 1\n",
" IMAGES_PER_GPU = 1 #8 ; reduced to avoid running out of memory when image size increased\n",
"\n",
" # Number of classes (including background)\n",
" NUM_CLASSES = 1 + NUM_LABELS # background + 4 regions\n",
"\n",
" # Use small images for faster training. Set the limits of the small side\n",
" # the large side, and that determines the image shape.\n",
" IMAGE_MIN_DIM = 128*3 #128\n",
" IMAGE_MAX_DIM = 128*3#128\n",
"\n",
" # Use smaller anchors because our image and objects are small\n",
" RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n",
"\n",
" # Reduce training ROIs per image because the images are small and have\n",
" # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n",
" TRAIN_ROIS_PER_IMAGE = 32\n",
"\n",
" # Use a small epoch since the data is simple\n",
" STEPS_PER_EPOCH = 2000 #100 #steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. \n",
" #steps_per_epoch = TotalTrainingSamples / TrainingBatchSize (default to use entire training data per epoch; can modify if required)\n",
" \n",
" # use small validation steps since the epoch is small\n",
" VALIDATION_STEPS = 100 #5 #validation_steps = TotalvalidationSamples / ValidationBatchSize\n",
" #Ideally, you use all your validation data at once. If you use only part of your validation data, you will get different metrics for each batch, \n",
" #what may make you think that your model got worse or better when it actually didn't, you just measured different validation sets.\n",
" #That's why they suggest validation_steps = uniqueValidationData / batchSize. \n",
" #Theoretically, you test your entire data every epoch, as you theoretically should also train your entire data every epoch.\n",
" #https://stackoverflow.com/questions/45943675/meaning-of-validation-steps-in-keras-sequential-fit-generator-parameter-list\n",
" \n",
"\n",
" \n",
" ###### Further changes (experimentation):\n",
" \n",
" # Maximum number of ground truth instances to use in one image\n",
" MAX_GT_INSTANCES = 8 #100 #decreased to avoid duplicate instances of each brain region\n",
" \n",
" # Max number of final detections\n",
" DETECTION_MAX_INSTANCES = 8 #100 # #decreased to avoid duplicate instances of each brain region\n",
"\n",
" # Minimum probability value to accept a detected instance\n",
" # ROIs below this threshold are skipped\n",
" DETECTION_MIN_CONFIDENCE = 0.1 #0.7\n",
"\n",
" # Non-maximum suppression threshold for detection\n",
" DETECTION_NMS_THRESHOLD = 0.9 # if overlap ratio is greater than the overlap threshold (0.3), suppress object (https://www.pyimagesearch.com/2014/11/17/non-maximum-suppression-object-detection-python)\n",
"\n",
" \n",
" \n",
" \n",
"config = BrainConfig()\n",
"config.display()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f3d04824",
"metadata": {},
"outputs": [],
"source": [
"class InferenceConfig(BrainConfig):\n",
" GPU_COUNT = 1\n",
" IMAGES_PER_GPU = 1\n",
"\n",
"inference_config = InferenceConfig()\n",
"\n",
"# Recreate the model in inference mode\n",
"model = modellib.MaskRCNN(mode=\"inference\", \n",
" config=inference_config,\n",
" model_dir=MODEL_DIR)\n",
"\n",
"# Get path to saved weights\n",
"# Either set a specific path or find last trained weights\n",
"resetDataDir()\n",
"model_path = os.path.join(\"weights\", \"mask_rcnn_shapes.h5\")\n",
"# model_path = model.find_last()[1]\n",
"\n",
"# Load trained weights (fill in path to trained weights here)\n",
"assert model_path != \"DANA_WEIGHTS\", \"Provide path to trained weights\"\n",
"print(\"Loading weights from \", model_path)\n",
"model.load_weights(model_path, by_name=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "57418e89",
"metadata": {},
"outputs": [],
"source": [
"colors = []\n",
"for color in RGB_MAPPINGS_MAP.values():\n",
" colors.append(tuple((color[0]/255, color[1]/255, color[2]/255)))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4cdfbf53",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"image = dataset.load_image(95)\n",
"results = model.detect([image], verbose=1)\n",
"plt.figure(figsize=(20,20))\n",
"\n",
"r = results[0]\n",
"print(r['rois'])\n",
"print(r['class_ids'])\n",
"print(dataset.class_names)\n",
"\n",
"visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], \n",
" dataset.class_names, r['scores'], figsize=(15, 15), colors=colors)#ax=get_ax()"
]
},
{
"cell_type": "markdown",
"id": "5e249df7",
"metadata": {},
"source": [
"## Download detection masks"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "929072b8",
"metadata": {},
"outputs": [],
"source": [
"from PIL import Image\n",
"\n",
"# Create dir for detection masks\n",
"resetDataDir()\n",
"detection_mask_dir = 'detection_masks'\n",
"if (not os.path.isdir(detection_mask_dir)):\n",
" os.mkdir(detection_mask_dir)\n",
"\n",
"# Create id class map\n",
"class_map = {}\n",
"for info in dataset.class_info[1:]:\n",
" class_map[int(info['id'])] = info['name']\n",
"\n",
"# Download masks\n",
"for image_id in dataset.image_ids:\n",
" original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n",
" modellib.load_image_gt(dataset, inference_config, \n",
" image_id, use_mini_mask=False)\n",
" results = model.detect([original_image], verbose=1)\n",
" r = results[0]\n",
" \n",
" # Reset data directory to directory with detection masks\n",
" resetDataDir()\n",
" os.chdir('detection_masks')\n",
"\n",
" # Download detection masks\n",
" section_detection_mask_dir = \"section_masks_\" + str(image_id)\n",
" if (not os.path.isdir(section_detection_mask_dir)):\n",
" os.mkdir(section_detection_mask_dir)\n",
" os.chdir(section_detection_mask_dir)\n",
" num_classes = np.shape(r['masks'])[2]\n",
" for class_id in range(1,num_classes+1):\n",
" im = Image.fromarray(np.uint8(r['masks'][:,:,class_id-1] * 255) , 'L')\n",
" im.save(\"section_masks_\" + str(image_id) + \"_\" + class_map[class_id] + \"_m_\" + str(class_id) + \".png\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "49883ef2",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:bstreg] *",
"language": "python",
"name": "conda-env-bstreg-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading